[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] conga/luci cluster/form-macros site/luci/Exten ...



CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe sourceware org	2007-01-31 05:26:45

Modified files:
	luci/cluster   : form-macros 
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	GULM cluster deployment

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.167&r2=1.168
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.218&r2=1.219
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.54&r2=1.55

--- conga/luci/cluster/form-macros	2007/01/30 22:26:00	1.167
+++ conga/luci/cluster/form-macros	2007/01/31 05:26:44	1.168
@@ -309,7 +309,8 @@
 									tal:attributes="checked python: add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] == 'gulm'"
 								>GULM
 							</li>
-							<div id="gulm_lockservers" class="invisible">
+							<div id="gulm_lockservers"
+								tal:attributes="class python: (add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] != 'gulm') and 'invisible' or ''">
 								<fieldset>
 								<legend class="rescfg">GULM lock server properties</legend>
 								<p>You must enter exactly 1, 3, or 5 GULM lock servers.</p>
@@ -322,7 +323,7 @@
 												name="__GULM__:server1"
 												tal:attributes="
 													disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
-													value gulm_lockservers/server1 | nothing" />
+													value add_cluster/gulm_lockservers/server1 | nothing" />
 										</td>
 									</tr>
 									<tr>
@@ -332,7 +333,7 @@
 												name="__GULM__:server2"
 												tal:attributes="
 													disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
-													value gulm_lockservers/server2 | nothing" />
+													value add_cluster/gulm_lockservers/server2 | nothing" />
 										</td>
 									</tr>
 									<tr>
@@ -342,7 +343,7 @@
 												name="__GULM__:server3"
 												tal:attributes="
 													disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
-													value gulm_lockservers/server3 | nothing" />
+													value add_cluster/gulm_lockservers/server3 | nothing" />
 										</td>
 									</tr>
 									<tr>
@@ -352,7 +353,7 @@
 												name="__GULM__:server4"
 												tal:attributes="
 													disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
-													value gulm_lockservers/server4 | nothing" />
+													value add_cluster/gulm_lockservers/server4 | nothing" />
 										</td>
 									</tr>
 									<tr>
@@ -362,7 +363,7 @@
 												name="__GULM__:server5"
 												tal:attributes="
 													disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
-													value gulm_lockservers/server5 | nothing" />
+													value add_cluster/gulm_lockservers/server5 | nothing" />
 										</td>
 									</tr>
 								</table>
@@ -3735,7 +3736,7 @@
 
 <div metal:define-macro="xenvmadd-form">
   <span tal:define="global vmforminfo python: here.getXenVMInfo(modelb, request)"/>
-  <form method="get" action="" tal:attributes="action vmforminfo/formurl">
+  <form method="get" tal:attributes="action vmforminfo/formurl">
   <h4>Name for this VM: </h4><input type="text" name="xenvmname" value=""/>
   <h4>Path to configuration file: </h4><input type="text" name="xenvmpath" value=""/>
   <input type="submit" value="Create Xen VM"/>
@@ -3745,7 +3746,7 @@
 <div metal:define-macro="xenvmconfig-form">
   <h4>Properties for Xen VM <font color="green"><span tal:content="request/servicename"/></font></h4>
   <span tal:define="global xeninfo python:here.getXenVMInfo(modelb, request)">
-  <form method="get" action="" tal:attributes="action xeninfo/formurl">
+  <form method="get" tal:attributes="action xeninfo/formurl">
   <h4>Name of VM: </h4><input type="text" name="xenvmname" value="" tal:attributes="value xeninfo/name"/>
   <h4>Path to configuration file: </h4><input type="text" name="xenvmpath" value="" tal:attributes="value xeninfo/path"/>
   <input type="button" value="Delete"/>
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:41:56	1.218
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 05:26:45	1.219
@@ -235,6 +235,46 @@
 	if len(clusterName) > 15:
 		errors.append('A cluster\'s name must be less than 16 characters long.')
 
+	try:
+		cluster_os = add_cluster['cluster_os']
+	except:
+		pass
+
+	lockmanager = 'dlm'
+	if cluster_os == 'rhel4':
+		add_cluster['gulm_support'] = True
+		if not request.form.has_key('lockmanager'):
+			# The user hasn't been presented with the RHEL4
+			# lock manager options yet.
+			incomplete = True
+		else:
+			try:
+				lockmanager = request.form['lockmanager'].strip()
+			except:
+				lockmanager = 'dlm'
+
+	lockservers = None
+	if lockmanager == 'gulm':
+		add_cluster['lockmanager'] = 'gulm'
+		try:
+			lockservers = filter(lambda x: x.strip(), request.form['__GULM__'])
+			if not lockservers or len(lockservers) < 1:
+				raise Exception, 'blank'
+			num_lockservers = len(lockservers)
+			if not num_lockservers in (1, 3, 5):
+				errors.append('You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers.' % num_lockservers)
+		except:
+			errors.append('No lock servers were given.')
+
+		if len(errors) > 0:
+			try:
+				ls_hash = {}
+				for i in xrange(num_lockservers):
+					ls_hash['server%d' % (i + 1)] = lockservers[i]
+				add_cluster['gulm_lockservers'] = ls_hash
+			except:
+				pass
+
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('create_cluster', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
@@ -248,7 +288,8 @@
 					True,
 					add_cluster['shared_storage'],
 					False,
-					add_cluster['download_pkgs'])
+					add_cluster['download_pkgs'],
+					lockservers)
 
 	if not batchNode:
 		request.SESSION.set('create_cluster', add_cluster)
@@ -1638,7 +1679,7 @@
 					# games), so it's safe to pull the existing entry from
 					# the model. All we need is the device name, and nothing
 					# else needs to be done here.
-					# 
+					#
 					# For an existing non-shared device update the device
 					# in the model, since the user could have edited it.
 					retcode, retmsg = validateFenceDevice(fence_form, model)
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/08 19:46:50	1.54
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/31 05:26:45	1.55
@@ -153,7 +153,8 @@
 		       install_services,
 		       install_shared_storage,
 		       install_LVS,
-		       upgrade_rpms):
+		       upgrade_rpms,
+		       gulm_lockservers):
 	
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
@@ -228,12 +229,19 @@
 			batch += '<clusternode name="' + i + '" votes="1" nodeid="' + str(x) + '" />'
 		x = x + 1
 	batch += '</clusternodes>'
-	if len(nodeList) == 2:
-		batch += '<cman expected_votes="1" two_node="1"/>'
-	else:
-		batch += '<cman/>'
+
+	if not gulm_lockservers:
+		if len(nodeList) == 2:
+			batch += '<cman expected_votes="1" two_node="1"/>'
+		else:
+			batch += '<cman/>'
 	batch += '<fencedevices/>'
 	batch += '<rm/>'
+	if gulm_lockservers:
+		batch += '<gulm>'
+		for i in gulm_lockservers:
+			batch += '<lockserver name="%s" />' % i
+		batch += '</gulm>'
 	batch += '</cluster>'
 	batch += '</var>'
 	batch += '</function_call>'


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]