[Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Tue Oct 31 17:28:08 UTC 2006
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2006-10-31 17:28:04
Modified files:
luci/cluster : form-macros index_html resource-form-macros
resource_form_handlers.js
luci/homebase : form-macros index_html
luci/logs : index_html
luci/site/luci/Extensions: LuciSyslog.py ModelBuilder.py
cluster_adapters.py
homebase_adapters.py ricci_bridge.py
ricci_communicator.py
Log message:
fixes (or at least improvements) for bz#s: 212021, 212632, 212006, 212022, 212440, 212991, 212584, 213057
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.1&r2=1.90.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20&r2=1.20.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21&r2=1.21.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource_form_handlers.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20&r2=1.20.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.44&r2=1.44.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.18&r2=1.18.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/logs/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciSyslog.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.1&r2=1.2.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.8.2.1&r2=1.8.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.7&r2=1.120.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.34.2.2&r2=1.34.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.4&r2=1.30.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.1&r2=1.9.2.2
--- conga/luci/cluster/form-macros 2006/10/25 01:53:33 1.90.2.1
+++ conga/luci/cluster/form-macros 2006/10/31 17:28:03 1.90.2.2
@@ -21,10 +21,10 @@
<span tal:define="global nodereports isBusy/nodereports"/>
<span tal:repeat="nodereport nodereports">
<tr><td>
- <span tal:condition="python: nodereport['isnodecreation'] == False">
+ <span tal:condition="python: not 'isnodecreation' in nodereport or nodereport['isnodecreation'] == False">
<h2><span tal:content="nodereport/desc" /></h2>
</span>
- <span tal:condition="python: nodereport['isnodecreation'] == True">
+ <span tal:condition="python: 'isnodecreation' in nodereport and nodereport['isnodecreation'] == True">
<span tal:condition="python: nodereport['iserror'] == True">
<h2><span tal:content="nodereport/desc" /></h2>
<font color="red"><span tal:content="nodereport/errormessage"/></font>
@@ -2060,6 +2060,7 @@
set_page_title('Luci â cluster â services â Configure a service');
</script>
<tal:block metal:use-macro="here/form-macros/macros/service-config-head-macro" />
+
<table class="cluster service" width="100%">
<tr class="cluster service info_top">
<td class="cluster service service_name">
@@ -2070,6 +2071,8 @@
</td>
<td class="cluster service service_action">
<form method="post" onSubmit="return dropdown(this.gourl)">
+ <input type="hidden" name="pagetype" tal:attributes="
+ value request/pagetype | request/form/pagetype | nothing" />
<select name="gourl"
tal:define="global innermap sinfo/innermap;
starturls innermap/links">
--- conga/luci/cluster/index_html 2006/10/16 20:25:33 1.20
+++ conga/luci/cluster/index_html 2006/10/31 17:28:03 1.20.2.1
@@ -212,24 +212,16 @@
<metal:main-form-content use-macro="here/form-chooser/macros/main-form">
- <h1>Future Site of Forms</h1>
</metal:main-form-content>
- </div>
-
- </div>
-
- </metal:block>
-
- <span tal:omit-tag=""
- tal:define="global ret python: request.SESSION.get('checkRet')"
- />
+ <tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+ tal:define="ret python: request.SESSION.get('checkRet')">
<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
<div class="hbclosebox">
<a href="javascript:hide_element('retmsgsdiv');"><img src="../homebase/x.png"></a>
</div>
<ul class="retmsgs">
- <tal:block repeat="e python:ret['messages']">
+ <tal:block tal:repeat="e python:ret['messages']">
<li class="retmsgs" tal:content="python:e" />
</tal:block>
</ul>
@@ -241,11 +233,17 @@
</div>
<p class="errmsgs">The following errors occurred:</p>
<ul class="errmsgs">
- <tal:block repeat="e python:ret['errors']">
+ <tal:block tal:repeat="e python:ret['errors']">
<li class="errmsgs" tal:content="python:e" />
</tal:block>
</ul>
</div>
+ </tal:block>
+ </div>
+
+ </div>
+
+ </metal:block>
</td>
<tal:comment replace="nothing"> End of main content block </tal:comment>
--- conga/luci/cluster/resource-form-macros 2006/10/16 04:26:19 1.21
+++ conga/luci/cluster/resource-form-macros 2006/10/31 17:28:03 1.21.2.1
@@ -199,9 +199,8 @@
src="/luci/cluster/resource_form_handlers.js">
</script>
- <tal:block
- tal:define="
- global res python: here.getResourceInfo(modelb, request);" />
+ <tal:block tal:define="
+ global res python: here.getResourceInfo(modelb, request);" />
<h2>Add a Resource</h2>
--- conga/luci/cluster/resource_form_handlers.js 2006/10/07 20:12:47 1.20
+++ conga/luci/cluster/resource_form_handlers.js 2006/10/31 17:28:03 1.20.2.1
@@ -140,7 +140,7 @@
function validate_filesystem(form) {
var errors = new Array();
- if (!form.fsTypeSelect || str_is_blank(form.fsTypeSelect.value)) {
+ if (!form.fstype || str_is_blank(form.fstype.value)) {
errors.push('No file system type was given.');
set_form_err(form.fsTypeSelect);
} else
--- conga/luci/homebase/form-macros 2006/10/16 20:46:46 1.44
+++ conga/luci/homebase/form-macros 2006/10/31 17:28:04 1.44.2.1
@@ -1,7 +1,7 @@
<html>
<tal:comment tal:replace="nothing">
- $Id: form-macros,v 1.44 2006/10/16 20:46:46 rmccabe Exp $
+ $Id: form-macros,v 1.44.2.1 2006/10/31 17:28:04 rmccabe Exp $
</tal:comment>
<head>
@@ -554,8 +554,11 @@
set_page_title('Luci â homebase â Add a running cluster to be managed by Luci');
</script>
- <tal:block tal:define="
- global sessionObj python:request.SESSION.get('checkRet')" />
+ <tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+ tal:define="global sessionObj python:request.SESSION.get('checkRet')" />
+
+ <tal:block tal:condition="python: not request.SESSION.has_key('checkRet')"
+ tal:define="global sessionObj python:{}" />
<h2 class="homebase">Add Cluster</h2>
--- conga/luci/homebase/index_html 2006/10/09 16:16:11 1.18
+++ conga/luci/homebase/index_html 2006/10/31 17:28:04 1.18.2.1
@@ -15,7 +15,7 @@
xml:lang language">
<tal:comment replace="nothing">
- $Id: index_html,v 1.18 2006/10/09 16:16:11 rmccabe Exp $
+ $Id: index_html,v 1.18.2.1 2006/10/31 17:28:04 rmccabe Exp $
</tal:comment>
<head metal:use-macro="here/header/macros/html_header">
@@ -133,16 +133,15 @@
Homebase
</metal:main_form>
- <span tal:omit-tag=""
- tal:define="global ret python: request.SESSION.get('checkRet')"
- />
+ <tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+ tal:define="ret python: request.SESSION.get('checkRet')">
<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
<div class="hbclosebox">
<a href="javascript:hide_element('retmsgsdiv');"><img src="x.png"></a>
</div>
<ul class="retmsgs">
- <tal:block repeat="e python:ret['messages']">
+ <tal:block tal:repeat="e python:ret['messages']">
<li class="retmsgs" tal:content="python:e" />
</tal:block>
</ul>
@@ -154,11 +153,12 @@
</div>
<p class="errmsgs">The following errors occurred:</p>
<ul class="errmsgs">
- <tal:block repeat="e python:ret['errors']">
+ <tal:block tal:repeat="e python:ret['errors']">
<li class="errmsgs" tal:content="python:e" />
</tal:block>
</ul>
</div>
+ </tal:block>
</div>
--- conga/luci/logs/index_html 2006/10/25 16:04:13 1.1.2.2
+++ conga/luci/logs/index_html 2006/10/31 17:28:04 1.1.2.3
@@ -44,18 +44,61 @@
</metal:javascriptslot>
</head>
- <script type="text/javascript">
- function delWaitBox() {
- var waitbox = document.getElementById('waitbox');
- if (!waitbox)
- return (-1);
- waitbox.parentNode.removeChild(waitbox);
- return (0);
- }
- </script>
- <body onLoad="javascript:delWaitBox()"
- tal:attributes="class here/getSectionFromURL;
+
+<script language="javascript" type="text/javascript">
+
+var xmlHttp_object = false;
+
+function initiate_async_get(url, funct) {
+ xmlHttp_object = false;
+
+ /*@cc_on @*/
+ /*@if (@_jscript_version >= 5)
+ try {
+ xmlHttp_object = new ActiveXObject("Msxml2.XMLHTTP");
+ } catch (e) {
+ try {
+ xmlHttp_object = new ActiveXObject("Microsoft.XMLHTTP");
+ } catch (e2) {
+ xmlHttp_object = false;
+ }
+ }
+ @end @*/
+
+ if (!xmlHttp_object && typeof XMLHttpRequest != 'undefined') {
+ xmlHttp_object = new XMLHttpRequest();
+ }
+
+ if (xmlHttp_object) {
+ xmlHttp_object.open("GET", url, true);
+ xmlHttp_object.onreadystatechange = funct;
+ xmlHttp_object.send(null);
+ } else {
+ alert("Unable to initiate async GET");
+ }
+}
+
+function replace_loginfo_callback() {
+ if (xmlHttp_object.readyState == 4) {
+ if (xmlHttp_object.status == 200) {
+ var response = xmlHttp_object.responseText;
+ document.getElementById('log_entries').innerHTML = response;
+ } else {
+ alert("Error retrieving data from server");
+ }
+ }
+}
+function replace_loginfo(url) {
+ initiate_async_get(url, replace_loginfo_callback);
+}
+</script>
+
+ <body tal:define="nodename request/nodename;
+ log_url context/logs/log_provider/absolute_url;
+ log_url_full python:log_url + '?nodename=' + nodename"
+ tal:attributes="onload python:'replace_loginfo(\'' + log_url_full + '\')';
+ class here/getSectionFromURL;
dir python:test(isRTL, 'rtl', 'ltr')">
<div id="visual-portal-wrapper">
@@ -69,16 +112,26 @@
<div class="visualClear"><!-- --></div>
- <div id="waitbox">
- <span>
- Log information for <span tal:replace="request/nodename | string: host"/> is being retrieved...
- </span>
- <img src="/luci/storage/100wait.gif">
- </div>
-
<div id="log_data">
<h2>Recent log information for <span tal:replace="request/nodename | string: host"/></h2>
- <pre tal:content="structure python: here.getLogsForNode(request)" />
+
+
+ <div id="log_entries">
+ <table style="width: 100%;">
+ <tr>
+ <td align="center">
+ <img src="../storage/100wait.gif" style="padding-top: 1cm;"/>
+ </td>
+ </tr>
+ <tr>
+ <td align="center">
+ <div style="padding-bottom: 4cm;">Retrieving log info</div>
+ </td>
+ </tr>
+ </table>
+ </div>
+
+
</div>
</body>
</html>
--- conga/luci/site/luci/Extensions/LuciSyslog.py 2006/10/24 16:36:23 1.2.2.1
+++ conga/luci/site/luci/Extensions/LuciSyslog.py 2006/10/31 17:28:04 1.2.2.2
@@ -50,7 +50,7 @@
try:
syslog(LOG_DEBUG, msg)
except:
- raise LuciSyslogError, 'syslog debug calle failed'
+ raise LuciSyslogError, 'syslog debug call failed'
def debug(self, msg):
if not LUCI_DEBUG_MODE or not self.__init:
@@ -58,7 +58,7 @@
try:
syslog(LOG_DEBUG, msg)
except:
- raise LuciSyslogError, 'syslog debug calle failed'
+ raise LuciSyslogError, 'syslog debug call failed'
def close(self):
try:
--- conga/luci/site/luci/Extensions/ModelBuilder.py 2006/10/24 01:42:52 1.8.2.1
+++ conga/luci/site/luci/Extensions/ModelBuilder.py 2006/10/31 17:28:04 1.8.2.2
@@ -416,9 +416,9 @@
return True
- def exportModelAsString(self, strbuf):
+ def exportModelAsString(self):
if self.perform_final_check() == False: # failed
- return False
+ return None
#check for dual power fences
self.dual_power_fence_check()
@@ -438,7 +438,7 @@
#can be used
self.purgePCDuplicates()
- return True
+ return strbuf
def has_filepath(self):
if self.filename == None:
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/10/30 20:43:25 1.120.2.7
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/10/31 17:28:04 1.120.2.8
@@ -258,7 +258,6 @@
flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
flag.manage_addProperty(LAST_STATUS, 0, "int")
-
def validateAddClusterNode(self, request):
errors = list()
messages = list()
@@ -441,7 +440,7 @@
return (False, {'errors': ['An invalid resource type was specified: ' + res_type]})
try:
- resObj = resourceAddHandler[res_type](self, dummy_form)
+ resObj = resourceAddHandler[res_type](request, dummy_form)
except:
luci_log('res type %d is invalid' % res_type)
resObj = None
@@ -453,11 +452,32 @@
return (True, {'messages': ['This service has been updated.']})
def validateResourceAdd(self, request):
- return (True, {})
-
-def validateResourceEdit(self, request):
- return (True, {})
+ try:
+ res_type = request.form['type'].strip()
+ if not res_type:
+ raise KeyError, 'type is blank'
+ except Exception, e:
+ luci_log.debug_verbose('resourceAdd: type is blank')
+ return (False, {'errors': ['No resource type was given.']})
+
+ errors = list()
+ try:
+ res = resourceAddHandler[res_type](request)
+ if res is None or res[0] is None or res[1] is None:
+ if res and res[2]:
+ errors.extend(res[2])
+ raise Exception, 'An error occurred while adding this resource'
+ modelb = res[1]
+ newres = res[0]
+ addResource(self, request, modelb, newres)
+ except Exception, e:
+ if len(errors) < 1:
+ errors.append('An error occurred while adding this resource')
+ luci_log.debug_verbose('resource error: %s' % str(e))
+ return (False, {'errors': errors})
+ return (True, {'messages': ['Resource added successfully']})
+
## Cluster properties form validation routines
def validateMCastConfig(self, form):
@@ -705,7 +725,7 @@
21: validateServiceAdd,
24: validateServiceAdd,
31: validateResourceAdd,
- 33: validateResourceEdit,
+ 33: validateResourceAdd,
51: validateFenceAdd,
50: validateFenceEdit,
}
@@ -724,9 +744,9 @@
if request.REQUEST_METHOD == 'POST':
ret = validatePost(self, request)
try:
- request.SESSION.set('checkRet', ret[1])
+ request.SESSION.set('checkRet', ret[1])
except:
- request.SESSION.set('checkRet', {})
+ request.SESSION.set('checkRet', {})
else:
try: request.SESSION.set('checkRet', {})
except: pass
@@ -1332,19 +1352,21 @@
def getRicciAgent(self, clustername):
#Check cluster permission here! return none if false
- path = CLUSTER_FOLDER_PATH + clustername
+ path = str(CLUSTER_FOLDER_PATH + clustername)
try:
clusterfolder = self.restrictedTraverse(path)
if not clusterfolder:
- luci_log.debug('cluster folder %s for %s is missing.' \
+ luci_log.debug('GRA: cluster folder %s for %s is missing.' \
% (path, clustername))
- raise
+ raise Exception, 'no cluster folder at %s' % path
nodes = clusterfolder.objectItems('Folder')
if len(nodes) < 1:
- luci_log.debug('no cluster nodes for %s found.' % clustername)
- return None
- except:
+ luci_log.debug('GRA: no cluster nodes for %s found.' % clustername)
+ raise Exception, 'no cluster nodes were found at %s' % path
+ except Exception, e:
+ luci_log.debug('GRA: cluster folder %s for %s is missing: %s.' \
+ % (path, clustername, str(e)))
return None
cluname = lower(clustername)
@@ -1361,24 +1383,31 @@
try:
rc = RicciCommunicator(hostname)
except RicciError, e:
- luci_log.debug('ricci error: %s' % str(e))
+ luci_log.debug('GRA: ricci error: %s' % str(e))
continue
try:
clu_info = rc.cluster_info()
- if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
- luci_log.debug('%s reports it\'s in cluster %s:%s; we expect %s' \
+ except Exception, e:
+ luci_log.debug('GRA: cluster_info error: %s' % str(e))
+
+ if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
+ try:
+ luci_log.debug('GRA: %s reports it\'s in cluster %s:%s; we expect %s' \
% (hostname, clu_info[0], clu_info[1], cluname))
- # node reports it's in a different cluster
- raise
- except:
+ setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
+ except:
+ pass
continue
if rc.authed():
return rc
- setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ try:
+ setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
- luci_log.debug('no ricci agent could be found for cluster %s' % cluname)
+ luci_log.debug('GRA: no ricci agent could be found for cluster %s' % cluname)
return None
def getRicciAgentForCluster(self, req):
@@ -1395,23 +1424,14 @@
return getRicciAgent(self, clustername)
def getClusterStatus(self, rc):
- clustatus_batch ='<?xml version="1.0" ?><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch>'
-
- try:
- clustatuscmd_xml = minidom.parseString(clustatus_batch).firstChild
- except:
- return {}
-
- try:
- ricci_xml = rc.process_batch(clustatuscmd_xml, async=False)
- except RicciError, e:
- luci_log.debug('ricci error: %s', str(e))
- except:
+ doc = getClusterStatusBatch(rc)
+ if not doc:
+ try:
+ luci_log.debug_verbose('getClusterStatusBatch returned None for %s/%s' % rc.cluster_info())
+ except:
+ pass
return {}
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return {}
results = list()
vals = {}
@@ -1617,6 +1637,7 @@
try:
svcname = req.form['servicename']
except:
+ luci_log.debug_verbose('serviceStart error: no service name')
return None
try:
@@ -1625,91 +1646,160 @@
try:
nodename = req.form['nodename']
except:
- return None
+ nodename = None
+
+ cluname = None
try:
cluname = req['clustername']
except KeyError, e:
try:
- cluname = req.form['clusterName']
+ cluname = req.form['clustername']
except:
- return None
+ pass
+
+ if cluname is None:
+ luci_log.debug_verbose('serviceStart error: %s no service name' \
+ % svcname)
+ return None
ricci_agent = rc.hostname()
batch_number, result = startService(rc, svcname, nodename)
- #Now we need to create a DB flag for this system.
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('startService %s call failed' \
+ % svcname)
+ return None
- path = CLUSTER_FOLDER_PATH + cluname
- clusterfolder = self.restrictedTraverse(path)
+ #Now we need to create a DB flag for this system.
+ path = str(CLUSTER_FOLDER_PATH + cluname)
batch_id = str(batch_number)
- objname = ricci_agent + "____flag"
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- #flag[BATCH_ID] = batch_id
- #flag[TASKTYPE] = SERVICE_START
- #flag[FLAG_DESC] = "Starting service " + svcname
- flag.manage_addProperty(BATCH_ID,batch_id, "string")
- flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
- flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
+ objname = str(ricci_agent + "____flag")
+
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ objpath = str(path + "/" + objname)
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, SERVICE_START, "string")
+ flag.manage_addProperty(FLAG_DESC, "Starting service \'" + svcname + "\'", "string")
+ except Exception, e:
+ luci_log.debug_verbose('Error creating flag at %s: %s' % (objpath, str(e)))
+
response = req.RESPONSE
response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
def serviceRestart(self, rc, req):
- svcname = req['servicename']
- batch_number, result = restartService(rc, svcname)
+ try:
+ svcname = req['servicename']
+ except KeyError, e:
+ try:
+ svcname = req.form['servicename']
+ except:
+ luci_log.debug_verbose('no service name for serviceRestart')
+ return None
+ except:
+ luci_log.debug_verbose('no service name for serviceRestart')
+ return None
- ricci_agent = rc.hostname()
- #Now we need to create a DB flag for this system.
- cluname = req['clustername']
-
- path = CLUSTER_FOLDER_PATH + cluname
- clusterfolder = self.restrictedTraverse(path)
- batch_id = str(batch_number)
- objname = ricci_agent + "____flag"
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- #flag[BATCH_ID] = batch_id
- #flag[TASKTYPE] = SERVICE_RESTART
- #flag[FLAG_DESC] = "Restarting service " + svcname
- flag.manage_addProperty(BATCH_ID,batch_id, "string")
- flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
- flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
+ #Now we need to create a DB flag for this system.
+ cluname = None
+ try:
+ cluname = req['clustername']
+ except:
+ try:
+ cluname = req.form['clustername']
+ except:
+ pass
- response = req.RESPONSE
- response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+ if cluname is None:
+ luci_log.debug_verbose('unable to determine cluser name for serviceRestart %s' % svcname)
+ return None
+
+ batch_number, result = restartService(rc, svcname)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('restartService for %s failed' % svcname)
+ return None
+
+ ricci_agent = rc.hostname()
+
+ path = str(CLUSTER_FOLDER_PATH + cluname)
+ batch_id = str(batch_number)
+ objname = str(ricci_agent + "____flag")
+
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
+ #Now we need to annotate the new DB object
+ objpath = str(path + "/" + objname)
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, SERVICE_RESTART, "string")
+ flag.manage_addProperty(FLAG_DESC, "Restarting service " + svcname, "string")
+ except Exception, e:
+ luci_log.debug_verbose('Error creating flag in restartService %s: %s' \
+ % (svcname, str(e)))
+
+ response = req.RESPONSE
+ response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
def serviceStop(self, rc, req):
- svcname = req['servicename']
- batch_number, result = stopService(rc, svcname)
+ try:
+ svcname = req['servicename']
+ except KeyError, e:
+ try:
+ svcname = req.form['servicename']
+ except:
+ luci_log.debug_verbose('no service name for serviceStop')
+ return None
+ except:
+ luci_log.debug_verbose('no service name for serviceStop')
+ return None
+
+ #Now we need to create a DB flag for this system.
+ cluname = None
+ try:
+ cluname = req['clustername']
+ except:
+ try:
+ cluname = req.form['clustername']
+ except:
+ pass
- #Now we need to create a DB flag for this system.
- cluname = req['clustername']
+ if cluname is None:
+ luci_log.debug_verbose('unable to determine cluser name for serviceStop %s' % svcname)
+ return None
- ricci_agent = rc.hostname()
+ batch_number, result = stopService(rc, svcname)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('stopService for %s failed' % svcname)
+ return None
+
+ ricci_agent = rc.hostname()
- path = CLUSTER_FOLDER_PATH + cluname
- clusterfolder = self.restrictedTraverse(path)
- batch_id = str(batch_number)
- objname = ricci_agent + "____flag"
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- #flag[BATCH_ID] = batch_id
- #flag[TASKTYPE] = SERVICE_STOP
- #flag[FLAG_DESC] = "Stopping service " + svcname
- flag.manage_addProperty(BATCH_ID,batch_id,"string")
- flag.manage_addProperty(TASKTYPE,SERVICE_STOP, "string")
- flag.manage_addProperty(FLAG_DESC,"Stopping service " + svcname,"string")
+ path = str(CLUSTER_FOLDER_PATH + cluname)
+ batch_id = str(batch_number)
+ objname = str(ricci_agent + "____flag")
+
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ objpath = str(path + "/" + objname)
+ flag = self.restrictedTraverse(objpath)
- time.sleep(2)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, SERVICE_STOP, "string")
+ flag.manage_addProperty(FLAG_DESC, "Stopping service " + svcname, "string")
+ time.sleep(2)
+ except Exception, e:
+ luci_log.debug_verbose('Error creating flags for stopService %s: %s' \
+ % (svcname, str(e)))
- response = req.RESPONSE
- response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+ response = req.RESPONSE
+ response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
def getFdomsInfo(self, modelb, request, clustatus):
slist = list()
@@ -2008,7 +2098,7 @@
clustername = request['clustername']
except KeyError, e:
try:
- clustername = request.form['clusterName']
+ clustername = request.form['clustername']
except:
luci_log.debug('missing cluster name for NTP')
return None
@@ -2105,16 +2195,20 @@
return None
batch_number, result = nodeLeaveCluster(rc)
- batch_id = str(batch_number)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('nodeLeaveCluster error: batch_number and/or result is None')
+ return None
+ batch_id = str(batch_number)
objpath = str(path + "/" + objname)
+
try:
nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
#Now we need to annotate the new DB object
flag = self.restrictedTraverse(objpath)
flag.manage_addProperty(BATCH_ID, batch_id, "string")
- flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
- flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+ flag.manage_addProperty(TASKTYPE, NODE_LEAVE_CLUSTER, "string")
+ flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' leaving cluster", "string")
except:
luci_log.debug('An error occurred while setting flag %s' % objpath)
@@ -2123,34 +2217,52 @@
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
elif task == NODE_JOIN_CLUSTER:
batch_number, result = nodeJoinCluster(rc)
- path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
- nodefolder = self.restrictedTraverse(path)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('nodeJoin error: batch_number and/or result is None')
+ return None
+
+ path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
batch_id = str(batch_number)
- objname = nodename_resolved + "____flag"
- nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID,batch_id, "string")
- flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
- flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+ objname = str(nodename_resolved + "____flag")
+ objpath = str(path + "/" + objname)
+
+ try:
+ nodefolder = self.restrictedTraverse(path)
+ nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, NODE_JOIN_CLUSTER, "string")
+ flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' joining cluster", "string")
+ except Exception, e:
+ luci_log.debug_verbose('nodeJoin error: creating flags at %s: %s' \
+ % (path, str(e)))
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
elif task == NODE_REBOOT:
batch_number, result = nodeReboot(rc)
- path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
- nodefolder = self.restrictedTraverse(path)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('nodeReboot: batch_number and/or result is None')
+ return None
+
+ path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
batch_id = str(batch_number)
- objname = nodename_resolved + "____flag"
- nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID, batch_id, "string")
- flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
- flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+ objname = str(nodename_resolved + "____flag")
+ objpath = str(path + "/" + objname)
+
+ try:
+ nodefolder = self.restrictedTraverse(path)
+ nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
+ flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+ except Exception, e:
+ luci_log.debug_verbose('nodeReboot err: creating flags at %s: %s' \
+ % (path, str(e)))
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2161,16 +2273,19 @@
try:
clusterfolder = self.restrictedTraverse(path)
if not clusterfolder:
- raise
- except:
- luci_log.debug('The cluster folder for %s could not be found.' \
- % clustername)
+ raise Exception, 'no cluster folder at %s' % path
+ except Exception, e:
+ luci_log.debug('The cluster folder for %s could not be found: %s' \
+ % (clustername, str(e)))
return None
try:
nodes = clusterfolder.objectItems('Folder')
- except:
- luci_log.debug('No cluster nodes for %s were found' % clustername)
+ if not nodes or len(nodes) < 1:
+ raise Exception, 'no cluster nodes'
+ except Exception, e:
+ luci_log.debug('No cluster nodes for %s were found: %s' \
+ % (clustername, str(e)))
return None
found_one = False
@@ -2210,17 +2325,26 @@
return None
batch_number, result = nodeFence(rc, nodename)
- path = path + "/" + nodename_resolved
- nodefolder = self.restrictedTraverse(path)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('nodeFence: batch_number and/or result is None')
+ return None
+
+ path = str(path + "/" + nodename_resolved)
batch_id = str(batch_number)
- objname = nodename_resolved + "____flag"
- nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
- objpath = path + "/" + objname
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID,batch_id, "string")
- flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
- flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+ objname = str(nodename_resolved + "____flag")
+ objpath = str(path + "/" + objname)
+
+ try:
+ nodefolder = self.restrictedTraverse(path)
+ nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, NODE_FENCE, "string")
+ flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being fenced", "string")
+ except Exception, e:
+ luci_log.debug_verbose('nodeFence err: creating flags at %s: %s' \
+ % (path, str(e)))
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2231,17 +2355,25 @@
#and propogate it. We will need two ricci agents for this task.
# Make sure we can find a second node before we hose anything.
- path = CLUSTER_FOLDER_PATH + clustername
+ path = str(CLUSTER_FOLDER_PATH + clustername)
try:
clusterfolder = self.restrictedTraverse(path)
if not clusterfolder:
- raise
- except:
+ raise Exception, 'no cluster folder at %s' % path
+ except Exception, e:
+ luci_log.debug_verbose('node delete error for cluster %s: %s' \
+ % (clustername, str(e)))
return None
- nodes = clusterfolder.objectItems('Folder')
- found_one = False
+ try:
+ nodes = clusterfolder.objectItems('Folder')
+ if not nodes or len(nodes) < 1:
+ raise Exception, 'no cluster nodes in DB'
+ except Exception, e:
+ luci_log.debug_verbose('node delete error for cluster %s: %s' \
+ % (clustername, str(e)))
+ found_one = False
for node in nodes:
if node[1].getId().find(nodename) != (-1):
continue
@@ -2250,47 +2382,75 @@
# in the cluster we believe it is.
try:
rc2 = RicciCommunicator(node[1].getId())
- if not rc2.authed():
- # set the flag
- rc2 = None
- if not rc2:
- raise
- found_one = True
- break
+ except Exception, e:
+ luci_log.info('ricci %s error: %s' % (node[0], str(e)))
+ continue
except:
continue
+ if not rc2.authed():
+ try:
+ setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+
+ try:
+ snode = getStorageNode(self, node[0])
+ setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+
+ luci_log.debug_verbose('%s is not authed' % node[0])
+ rc2 = None
+ continue
+ else:
+ found_one = True
+ break
+
if not found_one:
+ luci_log.debug_verbose('unable to find ricci node to delete %s from %s' % (nodename, clustername))
return None
#First, delete cluster.conf from node to be deleted.
#next, have node leave cluster.
batch_number, result = nodeLeaveCluster(rc, purge=True)
+ if batch_number is None or result is None:
+ luci_log.debug_verbose('nodeDelete: batch_number and/or result is None')
+ return None
#It is not worth flagging this node in DB, as we are going
#to delete it anyway. Now, we need to delete node from model
#and send out new cluster.conf
delete_target = None
- try:
- nodelist = model.getNodes()
- find_node = lower(nodename)
- for n in nodelist:
+ nodelist = model.getNodes()
+ find_node = lower(nodename)
+ for n in nodelist:
+ try:
if lower(n.getName()) == find_node:
delete_target = n
break
- except:
- pass
+ except:
+ continue
if delete_target is None:
+ luci_log.debug_verbose('unable to find delete target for %s in %s' \
+ % (nodename, clustername))
return None
model.deleteNode(delete_target)
- str_buf = ""
- model.exportModelAsString(str_buf)
+
+ try:
+ str_buf = model.exportModelAsString()
+ if not str_buf:
+ raise Exception, 'model string is blank'
+ except Exception, e:
+ luci_log.debug_verbose('NTP exportModelAsString: %s' % str(e))
+ return None
# propagate the new cluster.conf via the second node
batch_number, result = setClusterConf(rc2, str(str_buf))
if batch_number is None:
+ luci_log.debug_verbose('batch number is None after del node in NTP')
return None
#Now we need to delete the node from the DB
@@ -2301,19 +2461,24 @@
delnode = self.restrictedTraverse(del_path)
clusterfolder = self.restrictedTraverse(path)
clusterfolder.manage_delObjects(delnode[0])
- except:
- # XXX - we need to handle this
- pass
+ except Exception, e:
+ luci_log.debug_verbose('error deleting %s: %s' % (del_path, str(e)))
batch_id = str(batch_number)
objname = str(nodename_resolved + "____flag")
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
objpath = str(path + "/" + objname)
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID,batch_id, "string")
- flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
- flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
+
+ try:
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, NODE_DELETE, "string")
+ flag.manage_addProperty(FLAG_DESC, "Deleting node \'" + nodename + "\'", "string")
+ except Exception, e:
+ luci_log.debug_verbose('nodeDelete %s err setting flag at %s: %s' \
+ % (nodename, objpath, str(e)))
+
response = request.RESPONSE
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -2670,12 +2835,28 @@
xvm.addAttribute("name", req.form['xenvmname'])
xvm.addAttribute("path", req.form['xenvmpath'])
- stringbuf = ""
- model.exportModelAsString(stringbuf)
- setClusterConf(stringbuf)
+ try:
+ stringbuf = model.exportModelAsString()
+ if not stringbuf:
+ raise Exception, 'model is blank'
+ except Exception, e:
+ luci_log.debug_verbose('exportModelAsString error: %s' % str(e))
+ return None
-
-
+ try:
+ clustername = model.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from modelb.getClusterName() is blank'
+ except Exception, e:
+ luci_log.debug_verbose('error: getClusterName: %s' % str(e))
+ return None
+
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('Unable to find a ricci agent for the %s cluster' % clustername)
+ return None
+
+ setClusterConf(rc, stringbuf)
def getXenVMInfo(self, model, request):
try:
@@ -2717,31 +2898,35 @@
try:
cluname = req.form['clusterName']
except:
- luci_log.debug_verbose('No cluster name -- returning empty map')
+ luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
return map
- path = CLUSTER_FOLDER_PATH + cluname
+ path = str(CLUSTER_FOLDER_PATH + cluname)
try:
- clusterfolder = self.restrictedTraverse(str(path))
+ clusterfolder = self.restrictedTraverse(path)
if not clusterfolder:
raise Exception, 'clusterfolder is None'
except Exception, e:
- luci_log.debug_verbose('cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
+ luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
return map
except:
- luci_log.debug_verbose('cluster %s [%s] folder missing: returning empty map' % (cluname, path))
+ luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
try:
items = clusterfolder.objectItems('ManagedSystem')
if not items or len(items) < 1:
+ luci_log.debug_verbose('ICB3: no flags at %s for cluster %s' \
+ % (cluname, path))
return map #This returns an empty map, and should indicate not busy
except Exception, e:
- luci_log.debug('An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
+ luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
return map
except:
- luci_log.debug('An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
+ luci_log.debug('ICB5: An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
return map
-
+
+ luci_log.debug_verbose('ICB6: isClusterBusy: %s is busy: %d flags' \
+ % (cluname, len(items)))
map['busy'] = "true"
#Ok, here is what is going on...if there is an item,
#we need to call the ricci_bridge and get a batch report.
@@ -2771,31 +2956,58 @@
batch_xml = None
ricci = item[0].split("____") #This removes the 'flag' suffix
+ luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
+ % (ricci[0], item[0]))
try:
rc = RicciCommunicator(ricci[0])
+ if not rc:
+ rc = None
+ raise RicciError, 'rc is None for %s' % ricci[0]
except RicciError, e:
rc = None
- luci_log.debug_verbose('ricci returned error in iCB for %s: %s' \
+ luci_log.debug_verbose('ICB7: ricci returned error in iCB for %s: %s' \
% (cluname, str(e)))
except:
rc = None
- luci_log.info('ricci connection failed for cluster %s' % cluname)
+ luci_log.info('ICB8: ricci connection failed for cluster %s' % cluname)
+ batch_id = None
if rc is not None:
try:
- batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
- if batch_xml != None:
- (creation_status, total) = batch_status(batch_xml)
- else:
- luci_log.debug_verbose('batch report for cluster %s, item %s is None' % (cluname, item[0]))
- except:
- creation_status = RICCI_CONNECT_FAILURE #No contact with ricci (-1000)
- batch_xml = "bloody_failure" #set to avoid next if statement
- else:
+ batch_id = item[1].getProperty(BATCH_ID)
+ luci_log.debug_verbose('ICB8A: got batch_id %s from %s' \
+ % (batch_id, item[0]))
+ except Exception, e:
+ try:
+ luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %s' \
+ % (item[0], str(e)))
+ except:
+ luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' % item[0])
+
+ if batch_id is not None:
+ try:
+ batch_xml = rc.batch_report(batch_id)
+ if batch_xml is not None:
+ luci_log.debug_verbose('ICB8D: batch_xml for %s from batch_report is not None -- getting batch status' % batch_id)
+ (creation_status, total) = batch_status(batch_xml)
+ try:
+ luci_log.debug_verbose('ICB8E: batch status returned (%d,%d)' \
+ % (creation_status, total))
+ except:
+ luci_log.debug_verbose('ICB8F: error logging batch status return')
+ else:
+ luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
+ except Exception, e:
+ luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %s' % str(e))
+ creation_status = RICCI_CONNECT_FAILURE #No contact with ricci (-1000)
+ batch_xml = "bloody_failure" #set to avoid next if statement
+
+ if rc is None or batch_id is None:
+ luci_log.debug_verbose('ICB12: unable to connect to a ricci agent for cluster %s to get batch status')
creation_status = RICCI_CONNECT_FAILURE #No contact with ricci (-1000)
- batch_xml = "bloody_failure" #set to avoid next if statement
+ batch_xml = "bloody_bloody_failure" #set to avoid next if statement
- if batch_xml == None: #The job is done and gone from queue
+ if batch_xml is None: #The job is done and gone from queue
if redirect_message == False: #We have not displayed this message yet
node_report['desc'] = REDIRECT_MSG
node_report['iserror'] = True
@@ -2803,7 +3015,7 @@
nodereports.append(node_report)
redirect_message = True
- luci_log.debug_verbose('batch job is done -- deleting %s' % item[0])
+ luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
clusterfolder.manage_delObjects(item[0])
continue
@@ -2857,7 +3069,7 @@
try:
clusterfolder.manage_delObjects(item[0])
except Exception, e:
- luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
+ luci_log.info('ICB14: Unable to delete %s: %s' % (item[0], str(e)))
continue
else:
map['busy'] = "true"
@@ -2917,7 +3129,12 @@
map['isVirtualized'] = rc.dom0()
except:
# default to rhel5 if something crazy happened.
- luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+ try:
+ luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+ except:
+ # this can throw an exception if the original exception
+ # is caused by rc being None or stale.
+ pass
map['os'] = 'rhel5'
map['isVirtualized'] = False
return map
@@ -2949,15 +3166,30 @@
return resList
def getResourceInfo(modelb, request):
+ if not modelb:
+ luci_log.debug_verbose('no modelb obj in getResourceInfo')
+ return {}
+
+ name = None
try:
name = request['resourcename']
except KeyError, e:
try:
name = request.form['resourcename']
except:
- luci_log.debug_verbose('getResourceInfo missing res name')
- return {}
+ pass
except:
+ pass
+
+ if name is None:
+ try:
+ type = request.form['type']
+ if type == 'ip':
+ name = request.form['value'].strip()
+ except:
+ pass
+
+ if name is None:
luci_log.debug_verbose('getResourceInfo missing res name')
return {}
@@ -2998,7 +3230,7 @@
try:
modelb = request.SESSION.get('model')
except:
- luci_log.debug_verbose('delResource unable to extract model from SESSION')
+ luci_log.debug_verbose('delRes unable to extract model from SESSION')
return errstr
try:
@@ -3007,10 +3239,10 @@
try:
name = request.form['resourcename']
except:
- luci_log.debug_verbose('delResource missing resname %s' % str(e))
+ luci_log.debug_verbose('delRes missing resname %s' % str(e))
return errstr + ': ' + str(e)
except:
- luci_log.debug_verbose('delResource missing resname')
+ luci_log.debug_verbose('delRes missing resname')
return errstr + ': ' + str(e)
try:
@@ -3019,7 +3251,7 @@
try:
clustername = request.form['clustername']
except:
- luci_log.debug_verbose('delResource missing cluster name')
+ luci_log.debug_verbose('delRes missing cluster name')
return errstr + ': could not determine the cluster name.'
try:
@@ -3040,20 +3272,20 @@
break
if not found:
- luci_log.debug_verbose('delresource cant find res %s' % name)
+ luci_log.debug_verbose('delRes cant find res %s' % name)
return errstr + ': the specified resource was not found.'
try:
conf = modelb.exportModelAsString()
if not conf:
- raise
- except:
- luci_log.debug_verbose('exportModelAsString failed')
+ raise Exception, 'model string is blank'
+ except Exception, e:
+ luci_log.debug_verbose('delRes: exportModelAsString failed: %s' % str(e))
return errstr
- batch_number, result = setClusterConf(str(conf))
+ batch_number, result = setClusterConf(rc, str(conf))
if batch_number is None or result is None:
- luci_log.debug_verbose('missing batch and/or result from setClusterConf')
+ luci_log.debug_verbose('delRes: missing batch and/or result from setClusterConf')
return errstr
modelstr = ""
@@ -3071,10 +3303,10 @@
flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
except Exception, e:
- luci_log.debug('An error occurred while setting flag %s: %s' \
+ luci_log.debug('delRes: An error occurred while setting flag %s: %s' \
% (objname, str(e)))
except:
- luci_log.debug('An error occurred while setting flag %s' % objname)
+ luci_log.debug('delRes: An error occurred while setting flag %s' % objname)
response = request.RESPONSE
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -3083,99 +3315,142 @@
if form is None:
form = request.form
+ if not form:
+ luci_log.debug_verbose('addIp error: form is missing')
+ return None
+
modelb = request.SESSION.get('model')
- if not modelb or not form:
+ if not modelb:
+ luci_log.debug_verbose('addIp error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addIp error: %s' % str(e))
return None
else:
- res = apply(Ip)
+ try:
+ res = apply(Ip)
+ if not res:
+ raise Exception, 'apply(Ip) is None'
+ except Exception, e:
+ luci_log.debug_verbose('addIp error: %s' % str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addIp error: res is none')
return None
+ errors = list()
try:
addr = form['ip_address'].strip()
if not addr:
- raise KeyError('ip_address is blank')
+ raise KeyError, 'ip_address is blank'
# XXX: validate IP addr
res.attr_hash['address'] = addr
except KeyError, e:
- return None
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addIp error: %s' % err)
if 'monitorLink' in form:
res.attr_hash['monitor_link'] = '1'
else:
res.attr_hash['monitor_link'] = '0'
- modelb.getResourcesPtr().addChild(res)
- return res
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addFs(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- if not modelb or not form:
+ if not form:
+ luci_log.debug_verbose('addFs error: form is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addFs error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addFs error: %s' % str(e))
return None
else:
- res = apply(Fs)
+ try:
+ res = apply(Fs)
+ if not res:
+ raise Exception, 'apply(Fs) is None'
+ except Exception, e:
+ luci_log.debug_verbose('addFs error: %s' % str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addFs error: fs obj was not created')
return None
# XXX: sanity check these fields
+ errors = list()
try:
name = form['resourceName'].strip()
res.attr_hash['name'] = name
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
try:
mountpoint = form['mountpoint'].strip()
res.attr_hash['mountpoint'] = mountpoint
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
try:
device = form['device'].strip()
res.attr_hash['device'] = device
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
try:
options = form['options'].strip()
res.attr_hash['options'] = options
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
try:
fstype = form['fstype'].strip()
res.attr_hash['fstype'] = fstype
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
try:
fsid = form['fsid'].strip()
res.attr_hash['fsid'] = fsid
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addFs error: %s' % err)
if form.has_key('forceunmount'):
res.attr_hash['force_unmount'] = '1'
@@ -3192,27 +3467,33 @@
else:
res.attr_hash['force_fsck'] = '0'
- modelb.getResourcesPtr().addChild(res)
- return res
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addGfs(request, form=None):
if form is None:
form = request.form
+ if not form:
+ luci_log.debug_verbose('addGfs error: form is missing')
+ return None
+
modelb = request.SESSION.get('model')
if not modelb:
+ luci_log.debug_verbose('addGfs error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
if not res:
luci_log.debug('resource %s was not found for editing' % oldname)
return None
- except KeyError, e:
+ except Exception, e:
luci_log.debug('resource %s was not found for editing: %s' \
% (oldname, str(e)))
return None
@@ -3220,286 +3501,387 @@
try:
res = apply(Clusterfs)
if not res:
- raise
+ raise Exception, 'apply(Clusterfs) is None'
+ except Exception, e:
+ luci_log.debug('addGfs error: %s' % str(e))
+ return None
except:
- luci_log.debug('Error creating node Clusterfs resource')
+ luci_log.debug('addGfs error')
return None
# XXX: sanity check these fields
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- luci_log.debug_verbose('name is missing in clusterfs res')
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addGfs error: %s' % err)
try:
mountpoint = form['mountpoint'].strip()
res.attr_hash['mountpoint'] = mountpoint
- except:
- luci_log.debug_verbose('mountpoint is missing in clusterfs res')
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addGfs error: %s' % err)
try:
device = form['device'].strip()
res.attr_hash['device'] = device
- except:
- luci_log.debug_verbose('device is missing in clusterfs res')
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addGfs error: %s' % err)
try:
options = form['options'].strip()
res.attr_hash['options'] = options
- except:
- luci_log.debug_verbose('options is missing in clusterfs res')
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addGfs error: %s' % err)
try:
fsid = form['fsid'].strip()
res.attr_hash['fsid'] = fsid
- except:
- luci_log.debug_verbose('fsid is missing in clusterfs res')
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addGfs error: %s' % err)
if form.has_key('forceunmount'):
res.attr_hash['force_unmount'] = '1'
else:
res.attr_hash['force_unmount'] = '0'
- modelb.getResourcesPtr().addChild(res)
- return res
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addNfsm(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- if not form or not modelb:
+ if not form:
+ luci_log.debug_verbose('addNfsm error: form is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addNfsm error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addNfsm error: %s' % str(e))
return None
else:
- res = apply(Netfs)
+ try:
+ res = apply(Netfs)
+ except Exception, e:
+ luci_log.debug_verbose('addNfsm error: %s' % str(e))
+ return None
if not res:
return None
# XXX: sanity check these fields
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
try:
mountpoint = form['mountpoint'].strip()
res.attr_hash['mountpoint'] = mountpoint
- except:
- return None
-
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
+
try:
host = form['host'].strip()
res.attr_hash['host'] = host
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
try:
options = form['options'].strip()
res.attr_hash['options'] = options
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
try:
exportpath = form['exportpath'].strip()
res.attr_hash['exportpath'] = exportpath
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
try:
nfstype = form['nfstype'].strip().lower()
if nfstype != 'nfs' and nfstype != 'nfs4':
- raise
+ raise KeyError, 'invalid nfs type: %s' % nfstype
res.attr_hash['nfstype'] = nfstype
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsm error: %s' % err)
if form.has_key('forceunmount'):
res.attr_hash['force_unmount'] = '1'
else:
res.attr_hash['force_unmount'] = '0'
- modelb.getResourcesPtr().addChild(res)
- return res
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addNfsc(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- if not form or not modelb:
+ if not form:
+ luci_log.debug_verbose('addNfsc error: form is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addNfsc error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addNfsc error: %s' % str(e))
return None
else:
- res = apply(NFSClient)
+ try:
+ res = apply(NFSClient)
+ except:
+ luci_log.debug_verbose('addNfsc error: %s' % str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addNfsc error: res is none')
return None
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsc error: %s' % err)
try:
target = form['target'].strip()
res.attr_hash['target'] = target
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsc error: %s' % err)
try:
options = form['options'].strip()
res.attr_hash['options'] = options
- except:
- return None
-
- modelb.getResourcesPtr().addChild(res)
- return res
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsc error: %s' % err)
+
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addNfsx(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- if not modelb or not form:
+ if not form:
+ luci_log.debug_verbose('addNfsx error: modelb is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addNfsx error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addNfsx error: %s', str(e))
return None
else:
- res = apply(NFSExport)
+ try:
+ res = apply(NFSExport)
+ except:
+ luci_log.debug_verbose('addNfsx error: %s', str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addNfsx error: res is None')
return None
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- return None
-
- modelb.getResourcesPtr().addChild(res)
- return res
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addNfsx error: %s', err)
+
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addScr(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- form = request.form
- if not modelb or not form:
+ if not form:
+ luci_log.debug_verbose('addScr error: form is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addScr error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addScr error: %s' % str(e))
return None
else:
- res = apply(Script)
+ try:
+ res = apply(Script)
+ except Exception, e:
+ luci_log.debug_verbose('addScr error: %s' % str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addScr error: res is None')
return None
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addScr error: %s' % err)
try:
file = form['file'].strip()
if not file:
- raise
+ raise KeyError, 'file path is blank'
res.attr_hash['file'] = file
- except:
- return None
-
- modelb.getResourcesPtr().addChild(res)
- return res
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addScr error: %s' % err)
+
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
def addSmb(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
- if not modelb or not form:
+ if not form:
+ luci_log.debug_verbose('addSmb error: form is missing')
+ return None
+
+ modelb = request.SESSION.get('model')
+ if not modelb:
+ luci_log.debug_verbose('addSmb error: modelb is missing')
return None
if form.has_key('edit'):
try:
oldname = form['oldname'].strip()
if not oldname:
- raise KeyError('oldname is blank.')
+ raise KeyError, 'oldname is blank.'
res = getResourceForEdit(modelb, oldname)
- except KeyError, e:
+ except Exception, e:
+ luci_log.debug_verbose('addSmb error: %s' % str(e))
return None
else:
- res = apply(Samba)
+ try:
+ res = apply(Samba)
+ except Exception, e:
+ luci_log.debug_verbose('addSmb error: %s' % str(e))
+ return None
if not res:
+ luci_log.debug_verbose('addSmb error: res is None')
return None
+ errors = list()
try:
name = form['resourceName'].strip()
if not name:
- raise
+ raise KeyError, 'resourceName is blank'
res.attr_hash['name'] = name
- except:
- return None
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addSmb error: %s' % err)
try:
workgroup = form['workgroup'].strip()
res.attr_hash['workgroup'] = workgroup
- except:
- return None
-
- modelb.getResourcesPtr().addChild(res)
- return res
+ except Exception, e:
+ err = str(e)
+ errors.append(err)
+ luci_log.debug_verbose('addSmb error: %s' % err)
+
+ if len(errors) > 1:
+ return [None, None, errors]
+ return [res, modelb, None]
resourceAddHandler = {
'ip': addIp,
@@ -3582,48 +3964,37 @@
return messages
-def addResource(self, rc, request):
- if not request.form:
- return (False, {'errors': ['No form was submitted.']})
+def addResource(self, request, modelb, res):
+ clustername = modelb.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from modelb.getClusterName() is blank'
+
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ raise Exception, 'Unable to find a ricci agent for the %s cluster' % clustername
- try:
- type = request.form['type'].strip()
- if not type or not type in resourceAddHandler:
- raise
- except:
- return (False, {'errors': ['Form type is missing.']})
-
- try:
- resname = request.form['resourceName']
- except KeyError, e:
- # For IP, the IP address itself is the name.
- if request.form['type'] != 'ip':
- return (False, {'errors': ['No resource name was given.']})
+ modelb.getResourcesPtr().addChild(res)
try:
- clustername = request['clustername']
- except KeyError, e:
- try:
- clustername = request.form['clustername']
- except:
- return 'unable to determine the current cluster\'s name'
-
- res = resourceAddHandler[type](request)
- modelb = request.SESSION.get('model')
- modelstr = ""
- conf = modelb.exportModelAsString()
+ conf = modelb.exportModelAsString()
+ if not conf:
+ raise Exception, 'model string for %s is blank' % clustername
+ except Exception, e:
+ luci_log.debug_verbose('addResource: exportModelAsString err: %s' % str(e))
+ return 'An error occurred while adding this resource'
try:
ragent = rc.hostname()
if not ragent:
- luci_log.debug('missing hostname')
- raise
- batch_number, result = setClusterConf(str(conf))
+ luci_log.debug_verbose('missing hostname')
+ raise Exception, 'unknown ricci agent hostname'
+ luci_log.debug_verbose('SENDING NEW CLUSTER CONF: %s' % conf)
+ batch_number, result = setClusterConf(rc, str(conf))
if batch_number is None or result is None:
- luci_log.debug('missing batch_number or result')
- raise
- except:
- return "Some error occured in setClusterConf\n"
+ luci_log.debug_verbose('missing batch_number or result')
+ raise Exception, 'batch_number or results is None from setClusterConf'
+ except Exception, e:
+ return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
path = str(CLUSTER_FOLDER_PATH + clustername)
clusterfolder = self.restrictedTraverse(path)
@@ -3639,7 +4010,7 @@
flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
if type != 'ip':
- flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+ flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
else:
flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
except Exception, e:
@@ -3668,7 +4039,7 @@
request.SESSION.set('model', model)
except:
luci_log.debug_verbose('Appending model to request failed')
- return False
+ return 'An error occurred while storing the cluster model.'
def resolve_nodename(self, clustername, nodename):
path = str(CLUSTER_FOLDER_PATH + clustername)
@@ -3733,7 +4104,7 @@
try:
cluster_conf_node = getClusterConf(rc)
if not cluster_conf_node:
- raise;
+ raise
except:
luci_log.debug('unable to get cluster_conf_node in getModelBuilder')
return None
--- conga/luci/site/luci/Extensions/homebase_adapters.py 2006/10/30 20:20:04 1.34.2.2
+++ conga/luci/site/luci/Extensions/homebase_adapters.py 2006/10/31 17:28:04 1.34.2.3
@@ -14,9 +14,6 @@
from clusterOS import resolveOSType
from conga_constants import *
-class InCluster(Exception):
- pass
-
def siteIsSetup(self):
try:
if os.path.isfile(CERTS_DIR_PATH + 'privkey.pem') and os.path.isfile(CERTS_DIR_PATH + 'cacert.pem'):
@@ -661,23 +658,20 @@
except:
sessionData = None
+ try:
+ request.SESSION.delete('checkRet')
+ except:
+ pass
+
if 'ACTUAL_URL' in request:
url = request['ACTUAL_URL']
else:
url = '.'
- if 'pagetype' in request.form:
- pagetype = int(request.form['pagetype'])
- else:
- try: request.SESSION.set('checkRet', {})
- except: pass
- return homebasePortal(self, request, '.', '0')
-
try:
+ pagetype = int(request.form['pagetype'])
validatorFn = formValidators[pagetype - 1]
except:
- try: request.SESSION.set('checkRet', {})
- except: pass
return homebasePortal(self, request, '.', '0')
if validatorFn == validateAddClusterInitial or validatorFn == validateAddCluster:
@@ -705,7 +699,7 @@
return homebaseControlPost(self, request)
try:
- request.SESSION.set('checkRet', {})
+ request.SESSION.delete('checkRet')
except:
pass
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/10/25 16:00:40 1.30.2.4
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/10/31 17:28:04 1.30.2.5
@@ -2,6 +2,12 @@
from time import time, ctime
from xml.dom import minidom
from ricci_communicator import RicciCommunicator
+from LuciSyslog import LuciSyslog
+
+try:
+ luci_log = LuciSyslog()
+except:
+ pass
def checkBatch(rc, batch_id):
try:
@@ -200,20 +206,29 @@
return minidom.parseString(batch).firstChild
-def batchAttemptResult(self, doc):
- docc = None
- rc_node = None
+def batchAttemptResult(doc):
+ try:
+ batch = doc.getElementsByTagName('batch')
+ if not batch or len(batch) < 1:
+ raise Exception, 'no batch tag was found'
+ except Exception, e:
+ luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
- for node in doc.firstChild.childNodes:
- if node.nodeType == xml.dom.Node.ELEMENT_NODE:
- if node.nodeName == 'batch':
- #get batch number and status code
- batch_number = node.getAttribute('batch_id')
- result = node.getAttribute('status')
- return (batch_number, result)
- else:
- #print "RETURNING NONE!!!"
- return (None, None)
+ for i in batch:
+ try:
+ batch_number = i.getAttribute('batch_id')
+ result = i.getAttribute('status')
+ return (str(batch_number), str(result))
+ except Exception, e:
+ luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
+
+ try:
+ luci_log.debug_verbose('no batch with batchid and status found in \"%s\"' % doc.toxml())
+ except:
+ pass
+
+ return (None, None)
+
def getPayload(bt_node):
if not bt_node:
@@ -260,6 +275,20 @@
doc.appendChild(cl_node)
return doc
+def getClusterStatusBatch(rc):
+ batch_str ='<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+ ricci_xml = rc.batch_run(batch_str, async=False)
+
+ if not ricci_xml or not ricci_xml.firstChild:
+ luci_log.debug_verbose('ricci_xml is None from batch_run')
+
+ doc = getPayload(ricci_xml.firstChild)
+ if not doc or not doc.firstChild:
+ luci_log.debug_verbose('doc is None from getPayload: %s' % ricci_xml.toxml())
+ return None
+
+ return doc
+
def setClusterConf(rc, clusterconf, propagate=True):
if propagate == True:
propg = 'true'
@@ -274,10 +303,7 @@
batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="' + propg + '"/><var type="xml" mutable="false" name="cluster.conf">' + conf + '</var></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def getNodeLogs(rc):
errstr = 'log not accessible'
@@ -334,10 +360,7 @@
batch_str = '<module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def nodeLeaveCluster(rc, cluster_shutdown=False, purge=False):
cshutdown = 'false'
@@ -351,19 +374,13 @@
batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/><var mutable="false" name="purge_conf" type="boolean" value="' + purge_conf + '"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def nodeFence(rc, nodename):
batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def nodeJoinCluster(rc, cluster_startup=False):
cstartup = 'false'
@@ -373,10 +390,7 @@
batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def startService(rc, servicename, preferrednode=None):
if preferrednode != None:
@@ -385,28 +399,19 @@
batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def restartService(rc, servicename):
batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def stopService(rc, servicename):
batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str)
- doc = getPayload(ricci_xml)
- if not doc or not doc.firstChild:
- return (None, None)
- return batchAttemptResult(doc)
+ return batchAttemptResult(ricci_xml)
def getDaemonStates(rc, dlist):
batch_str = '<module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
@@ -417,9 +422,10 @@
batch_str += '</var></function_call></request></module>'
ricci_xml = rc.batch_run(batch_str, async=False)
- if not ricci_xml:
+ if not ricci_xml or not ricci_xml.firstChild:
+ luci_log.debug_verbose('no ricci_xml in getDaemonStates')
return None
- result = extractDaemonInfo(ricci_xml)
+ result = extractDaemonInfo(ricci_xml.firstChild)
return result
def extractDaemonInfo(bt_node):
--- conga/luci/site/luci/Extensions/ricci_communicator.py 2006/10/24 16:36:23 1.9.2.1
+++ conga/luci/site/luci/Extensions/ricci_communicator.py 2006/10/31 17:28:04 1.9.2.2
@@ -216,10 +216,12 @@
luci_log.debug('An error occurred while trying to process the batch job: %s' % batch_xml_str)
return None
- return ricci_xml
+ doc = minidom.Document()
+ doc.appendChild(ricci_xml)
+ return doc
def batch_report(self, batch_id):
- luci_log.debug_verbose('[auth=%d] asking for batchid# %d for host %s' \
+ luci_log.debug_verbose('[auth=%d] asking for batchid# %s for host %s' \
% (self.__authed, batch_id, self.__hostname))
if not self.authed():
@@ -242,7 +244,7 @@
if doc.firstChild.getAttribute('success') == '12':
return None
if doc.firstChild.getAttribute('success') != '0':
- raise RicciError, 'Error while retrieving batch report for batch #%s from host %s' % (batch_id, self.__hostname)
+ raise RicciError, 'Error while retrieving batch report for batch #%d from host %s' % (batch_id, self.__hostname)
batch_node = None
for node in doc.firstChild.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
@@ -401,10 +403,10 @@
last = last + 1
last = last - 2 * last
try:
- luci_log.debug_verbose('Returning (%s, %s) for batch_status(\"%s\")' \
+ luci_log.debug_verbose('Returning (%d, %d) for batch_status(\"%s\")' \
% (last, total, batch_xml.toxml()))
except:
- pass
+ luci_log.debug_verbose('Returning last, total')
return (last, total)
More information about the Cluster-devel
mailing list