[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] conga/luci cluster/busy_wait-macro cluster/for ...



CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe sourceware org	2008-01-25 00:36:59

Modified files:
	luci/cluster   : busy_wait-macro form-macros 
	luci/homebase  : validate_cluster_add.js 
	luci/site/luci/Extensions: LuciDB.py RicciQueries.py 
	                           cluster_adapters.py 

Log message:
	Fix a few bugs found while testing

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.220&r2=1.221
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&r1=1.13&r2=1.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&r1=1.8&r2=1.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.280&r2=1.281

--- conga/luci/cluster/busy_wait-macro	2008/01/14 20:51:42	1.2
+++ conga/luci/cluster/busy_wait-macro	2008/01/25 00:36:59	1.3
@@ -65,6 +65,6 @@
 					tal:attributes="onclick python:'javascript:document.stop_waiting_form%s.submit()' % nodereport.get('report_index')">Stop waiting for this job to complete</a>
 			</form>
 		</div>
+		<hr/>
 	</div>
-	<hr/>
 </div>
--- conga/luci/cluster/form-macros	2008/01/23 04:34:09	1.220
+++ conga/luci/cluster/form-macros	2008/01/25 00:36:59	1.221
@@ -225,7 +225,7 @@
 							checked add_cluster/shared_storage |string:checked" />Enable Shared Storage Support
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<input type="checkbox" name="reboot_nodes"
+					<input type="checkbox" id="reboot_nodes" name="reboot_nodes"
 						tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
@@ -1135,7 +1135,7 @@
 							checked add_cluster/shared_storage | string:checked" />Enable Shared Storage Support
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<input type="checkbox" name="reboot_nodes"
+					<input type="checkbox" id="reboot_nodes" name="reboot_nodes"
 						tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
--- conga/luci/homebase/validate_cluster_add.js	2008/01/02 20:52:23	1.13
+++ conga/luci/homebase/validate_cluster_add.js	2008/01/25 00:36:59	1.14
@@ -48,14 +48,21 @@
 		return (-1);
 	}
 
+	var reboot_nodes = document.getElementById('reboot_nodes');
 	var view_certs = document.getElementById('view_certs');
 	if (!view_certs || !view_certs.checked) {
 		var confirm_str = '';
 		if (form.addnode) {
-			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach node added will be rebooted during this process.';
+			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+			if (reboot_nodes && reboot_nodes.checked) {
+				confirm_str += '\nEach node added will be rebooted during this process.';
+			}
 		} else {
 			if (form.cluster_create) {
-				confirm_str = 'All nodes added to this cluster will be rebooted as part of this process.\n\nCreate cluster \"' + clustername + '\"?';
+				confirm_str = 'Create cluster \"' + clustername + '\"?\n\n';
+				if (reboot_nodes && reboot_nodes.checked) {
+					confirm_str += 'All nodes added to this cluster will be rebooted as part of this process.\n\n';
+				}
 			} else {
 				confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
 			}
--- conga/luci/site/luci/Extensions/LuciDB.py	2008/01/14 20:51:42	1.8
+++ conga/luci/site/luci/Extensions/LuciDB.py	2008/01/25 00:36:59	1.9
@@ -335,14 +335,17 @@
 			objname = '%s____flag' % key
 
 			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
 			# now designate this new object properly
 			objpath = str('%s/%s' % (path, objname))
 			flag = self.restrictedTraverse(objpath)
 
 			flag.manage_addProperty(BATCH_ID, batch_id, 'string')
 			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
-			flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clustername), 'string')
 			flag.manage_addProperty(LAST_STATUS, 0, 'int')
+			flag.manage_addProperty(FLAG_DESC,
+				'Creating node "%s" for cluster "%s"' % (key, clustername),
+				'string')
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
--- conga/luci/site/luci/Extensions/RicciQueries.py	2008/01/23 04:34:09	1.10
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2008/01/25 00:36:59	1.11
@@ -68,7 +68,7 @@
 	batch.append('</request>')
 	batch.append('</module>')
 
-	need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+	need_reboot = reboot_nodes
 	if need_reboot:
 		batch.append('<module name="reboot">')
 		batch.append('<request API_version="1.0">')
@@ -183,7 +183,7 @@
 	batch.append('</request>')
 	batch.append('</module>')
 
-	need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+	need_reboot = reboot_nodes
 	if need_reboot:
 		batch.append('<module name="reboot">')
 		batch.append('<request API_version="1.0">')
@@ -738,11 +738,12 @@
 					install_shared_storage,
 					install_LVS,
 					upgrade_rpms,
-					gulm_lockservers):
+					gulm_lockservers,
+					reboot_nodes=False):
 
 	batch_str = createClusterBatch(os_str, cluster_name, cluster_alias,
                         nodeList, install_base, install_services,
                         install_shared_storage, install_LVS, upgrade_rpms,
-                        gulm_lockservers)
+                        gulm_lockservers, reboot_nodes)
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2008/01/23 04:34:09	1.280
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2008/01/25 00:36:59	1.281
@@ -280,7 +280,8 @@
 	ret = send_batch_to_hosts(node_list, 10, rq.create_cluster, 
 			add_cluster['cluster_os'], clustername, clustername,
 			node_list, True, True, add_cluster['shared_storage'], False,
-			add_cluster['download_pkgs'], lockservers, add_cluster['reboot_nodes'])
+			add_cluster['download_pkgs'], lockservers,
+			add_cluster['reboot_nodes'])
 
 	batch_id_map = {}
 	for i in ret.iterkeys():
@@ -291,14 +292,13 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose(msg)
 			continue
-		batch_id_map[i] = ret[i]['batch_result']
+		batch_id_map[i] = ret[i]['batch_result'][0]
 
 	if len(batch_id_map) == 0:
 		request.SESSION.set('create_cluster', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
 	buildClusterCreateFlags(self, batch_id_map, clustername)
-
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 		% (request['URL'], CLUSTER_CONFIG, clustername))


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]