[Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...

rmccabe at sourceware.org rmccabe at sourceware.org
Mon Nov 6 23:55:25 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-06 23:55:23

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py ricci_bridge.py 
	                           ricci_communicator.py 

Log message:
	cleanups and fixes for config parameter propagation

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.147&r2=1.148
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.22&r2=1.23
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.40&r2=1.41
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.17&r2=1.18

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/06 20:21:04	1.147
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/06 23:55:23	1.148
@@ -1,11 +1,10 @@
 import socket
 from ModelBuilder import ModelBuilder
 from xml.dom import minidom
-from ZPublisher import HTTPRequest
 import AccessControl
 from conga_constants import *
 from ricci_bridge import *
-from ricci_communicator import *
+from ricci_communicator import RicciCommunicator, RicciError, batch_status, extract_module_status
 from string import lower
 import time
 import Products.ManagedSystem
@@ -19,13 +18,12 @@
 from Vm import Vm
 from Script import Script
 from Samba import Samba
-from FenceHandler import FenceHandler
 from clusterOS import resolveOSType
-from FenceHandler import FENCE_OPTS
+from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
 from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
-from LuciSyslog import LuciSyslogError, LuciSyslog
+from LuciSyslog import LuciSyslog
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -37,7 +35,7 @@
 
 try:
 	luci_log = LuciSyslog()
-except LuciSyslogError, e:
+except:
 	pass
 
 def validateClusterNodes(request, sessionData, clusterName, numStorage):
@@ -243,8 +241,8 @@
   clusterfolder = self.restrictedTraverse(path)
   for key in batch_map.keys():
     key = str(key)
-    id = batch_map[key]
-    batch_id = str(id)
+    batch_id = batch_map[key]
+    batch_id = str(batch_id)
     objname = str(key + "____flag") #This suffix needed to avoid name collision
     clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #now designate this new object properly
@@ -253,9 +251,9 @@
     #flag[BATCH_ID] = batch_id
     #flag[TASKTYPE] = CLUSTER_ADD
     #flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
-    flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
+    flag.manage_addProperty(BATCH_ID, batch_id, "string")
+    flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
+    flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
     flag.manage_addProperty(LAST_STATUS, 0, "int")
 
 def validateAddClusterNode(self, request):
@@ -420,16 +418,18 @@
 			form_hash[form_parent] = {'form': None, 'kids': []}
 		form_hash[form_parent]['kids'].append(form_id)
 		dummy_form = {}
+
 		for i in ielems:
 			try:
-				type = str(i.getAttribute('type'))
+				input_type = str(i.getAttribute('type'))
 			except:
 				continue
-			if not type or type == 'button':
+			if not input_type or input_type == 'button':
 				continue
 			try:
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
-			except:
+			except Exception, e:
+				luci_log.debug_verbose('Error parsing service XML: %s' % str(e))
 				pass
 
 		try:
@@ -654,7 +654,7 @@
 	try:
 		cp = model.getClusterPtr()
 		old_name = model.getClusterAlias()
-		old_ver = cp.getConfigVersion()
+		old_ver = int(cp.getConfigVersion())
 	except Exception, e:
 		luci_log.debug_verbose('getConfigVersion: %s' % str(e))
 		errors.append('unable to determine the current configuration version')
@@ -682,7 +682,7 @@
 		try:
 			if cluster_name != old_name:
 				cp.addAttribute('alias', cluster_name)
-			model.setConfigVersion(version_num)
+			cp.setConfigVersion(str(version_num))
 		except Exception, e:
 			luci_log.debug_verbose('unable to update general properties: %s' % str(e))
 			errors.append('Unable to update the cluster configuration.')
@@ -741,21 +741,46 @@
 def validateConfigCluster(self, request):
 	errors = list()
 	messages = list()
+	rc = None
 
 	try:
 		model = request.SESSION.get('model')
 		if not model:
 			raise Exception, 'model is none'
 	except Exception, e:
-		luci_log.debug_verbose('VCC0: unable to get model from session')
-		return (False, {'errors': ['No cluster model was found.']})
+		model = None
+		try:
+			cluname = request.form['clustername']
+		except:
+			try:
+				cluname = request['clustername']
+			except:
+				luci_log.debug_verbose('VCC0a: no model, no cluster name')
+				return (False, {'errors': ['No cluster model was found.']})
+
+		rc = getRicciAgent(self, cluname)
+		if not rc:
+			luci_log.debug_verbose('VCCb: no model in session, unable to find a ricci agent for the %s cluster' % cluname)
+			return (False, {'errors': ['No cluster model was found.']})
+
+		try:
+			model = getModelBuilder(rc, rc.dom0())
+			if not model:
+				raise Exception, 'model is none'
+		except Exception, e:
+			luci_log.debug_verbose('VCCc: unable to get model builder for cluster %s: %s' % (cluname, str(e)))
+			model = None
 
-	if not 'form' in request:
-		luci_log.debug_verbose('VCC1: no form passed in')
-		return (False, {'errors': ['No form was submitted.']})
+		if model is None:
+			luci_log.debug_verbose('VCC0: unable to get model from session')
+			return (False, {'errors': ['No cluster model was found.']})
 
-	if not 'configtype' in request.form:
-		luci_log.debug_verbose('VCC2: no configtype')
+	try:
+		if not 'configtype' in request.form:
+			luci_log.debug_verbose('VCC2: no configtype')
+			raise Exception, 'no config type'
+	except Exception, e:
+		luci_log.debug_verbose('VCC2a: %s' % str(e))
 		return (False, {'errors': ['No configuration type was submitted.']})
 
 	if not request.form['configtype'] in configFormValidators:
@@ -780,10 +805,10 @@
 
 	if retcode == True:
 		try:
-			old_ver = cp.getConfigVersion()
+			config_ver = int(cp.getConfigVersion()) + 1
 			# always increment the configuration version
-			model.setConfigVersion(old_ver + 1)
-			conf_str = str(model.exportModelAsString())
+			cp.setConfigVersion(config_ver)
+			conf_str = model.exportModelAsString()
 			if not conf_str:
 				raise Exception, 'conf_str is none'
 		except Exception, e:
@@ -799,13 +824,18 @@
 		luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
 		errors.append('unable to determine cluster name from model') 
 
-	rc = getRicciAgent(self, clustername)
+	if len(errors) > 0:
+		return (retcode, {'errors': errors, 'messages': messages})
+
 	if not rc:
-		luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
-		errors.append('unable to contact a ricci agent for cluster %s' \
-			% clustername)
-	else:
-		batch_id, result = setClusterConf(rc, conf_str)
+		rc = getRicciAgent(self, clustername)
+		if not rc:
+			luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+			errors.append('unable to contact a ricci agent for cluster %s' \
+				% clustername)
+
+	if rc:
+		batch_id, result = setClusterConf(rc, str(conf_str))
 		if batch_id is None or result is None:
 			luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
 			errors.append('unable to propagate the new cluster configuration for %s' \
@@ -858,8 +888,10 @@
     except:
       request.SESSION.set('checkRet', {})
   else:
-    try: request.SESSION.set('checkRet', {})
-    except: pass
+    try:
+      request.SESSION.set('checkRet', {})
+    except:
+      pass
 
   #First, see if a cluster is chosen, then
   #check that the current user can access that system
@@ -921,7 +953,7 @@
     clcfg['show_children'] = False
 
   #loop through all clusters
-  syslist= list()
+  syslist = list()
   for system in systems:
     clsys = {}
     clsys['Title'] = system[0]
@@ -1398,6 +1430,7 @@
   portaltabs = list()
   if not userAuthenticated(self):
     return portaltabs
+
   selectedtab = "homebase"
   try:
     baseurl = req['URL']
@@ -1410,11 +1443,6 @@
   except KeyError, e:
     pass
 
-  try:
-    base2 = req['BASE2']
-  except KeyError, e:
-    base2 = req['HTTP_HOST'] + req['SERVER_PORT']
-
   htab = { 'Title':"homebase",
            'Description':"Home base for this luci server",
            'Taburl':"/luci/homebase"}
@@ -1448,7 +1476,7 @@
 
 
 
-def check_clusters(self,clusters):
+def check_clusters(self, clusters):
   clist = list()
   for cluster in clusters:
     if cluster_permission_check(cluster[1]):
@@ -2029,12 +2057,41 @@
   else:
     return
 
-
 def getClusterInfo(self, model, req):
-  cluname = req[CLUNAME]
+  try:
+    cluname = req[CLUNAME]
+  except:
+    try:
+      cluname = req.form['clustername']
+    except:
+      try:
+        cluname = req.form['clusterName']
+      except:
+        luci_log.debug_verbose('GCI0: unable to determine cluster name')
+        return {}
+
+  if model is None:
+    rc = getRicciAgent(self, cluname)
+    if not rc:
+      luci_log.debug_verbose('GCI1: unable to find a ricci agent for the %s cluster' % cluname)
+      return {}
+    try:
+      model = getModelBuilder(rc, rc.dom0())
+      if not model:
+        raise Exception, 'model is none'
+
+      try:
+        req.SESSION.set('model', model)
+      except Exception, e2:
+        luci_log.debug_verbose('GCI2 unable to set model in session: %s' % str(e2))
+    except Exception, e:
+      luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % cluname, str(e))
+      return {}
+
   baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
+  prop_baseurl = req['URL'] + '?' + PAGETYPE + '=' + CLUSTER_CONFIG + '&' + CLUNAME + '=' + cluname + '&'
   map = {}
-  basecluster_url = baseurl + ACTIONTYPE + "=" + BASECLUSTER
+  basecluster_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_GENERAL_TAB
   #needed:
   map['basecluster_url'] = basecluster_url
   #name field
@@ -2046,7 +2103,7 @@
   #new cluster params - if rhel5
   #-------------
   #Fence Daemon Props
-  fencedaemon_url = baseurl + ACTIONTYPE + "=" + FENCEDAEMON
+  fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
   map['fencedaemon_url'] = fencedaemon_url
   fdp = model.getFenceDaemonPtr()
   pjd = fdp.getAttribute('post_join_delay')
@@ -2061,7 +2118,7 @@
   map['pfd'] = pfd
   #-------------
   #if multicast
-  multicast_url = baseurl + ACTIONTYPE + "=" + MULTICAST
+  multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB
   map['multicast_url'] = multicast_url
   #mcast addr
   is_mcast = model.isMulticast()
@@ -2075,7 +2132,7 @@
 
   #-------------
   #quorum disk params
-  quorumd_url = baseurl + ACTIONTYPE + "=" + QUORUMD
+  quorumd_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_QDISK_TAB
   map['quorumd_url'] = quorumd_url
   is_quorumd = model.isQuorumd()
   map['is_quorumd'] = is_quorumd
@@ -2146,7 +2203,7 @@
 
   return map
 
-def getClustersInfo(self,status,req):
+def getClustersInfo(self, status, req):
   map = {}
   nodelist = list()
   svclist = list()
@@ -2596,6 +2653,7 @@
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
+  item = None
   baseurl = request['URL']
   nodestate = NODE_ACTIVE
   svclist = list()
@@ -2692,7 +2750,7 @@
   return infohash
   #get list of faildoms for node
 
-def getNodesInfo(self, model, status,req):
+def getNodesInfo(self, model, status, req):
   resultlist = list()
   nodelist = list()
   svclist = list()
@@ -2802,7 +2860,7 @@
       map = fencedev.getAttributes()
       try:
         map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
-      except Exception, e:
+      except:
         map['pretty_name'] = fencedev.getAgentType()
 
       return map
@@ -2837,7 +2895,7 @@
           fencedev[kee] = attr_hash[kee] #copy attrs over
         try:
           fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
-        except Exception, e:
+        except:
           fencedev['pretty_name'] = fd.getAgentType()
 
         nodes_used = list() #This section determines which nodes use the dev
@@ -3052,15 +3110,12 @@
 def getXenVMInfo(self, model, request):
 	try:
 		xenvmname = request['servicename']
-	except KeyError, e:
+	except:
 		try:
 			xenvmname = request.form['servicename']
 		except:
 			luci_log.debug_verbose('servicename is missing from request')
 			return {}
-	except:
-		luci_log.debug_verbose('servicename is missing from request')
-		return {}
 
 	try:
 		xenvm = model.retrieveXenVMsByName(xenvmname)
@@ -3271,7 +3326,7 @@
           propslist = list()
           propslist.append(LAST_STATUS)
           item[1].manage_delProperties(propslist)
-          item[1].manage_addProperty(LAST_STATUS,creation_status, "int")
+          item[1].manage_addProperty(LAST_STATUS, creation_status, "int")
           continue
           
     else:
@@ -3340,15 +3395,12 @@
 
 	try:
 		cluname = request['clustername']
-	except KeyError, e:
+	except:
 		try:
 			cluname = request.form['clustername']
 		except:
 			luci_log.debug_verbose('getResourcesInfo missing cluster name')
 			return resList
-	except:
-		luci_log.debug_verbose('getResourcesInfo missing cluster name')
-		return resList
 
 	for item in modelb.getResources():
 		itemmap = {}
@@ -3368,18 +3420,16 @@
 	name = None
 	try:
 		name = request['resourcename']
-	except KeyError, e:
+	except:
 		try:
 			name = request.form['resourcename']
 		except:
 			pass
-	except:
-		pass
 
 	if name is None:
 		try:
-			type = request.form['type']
-			if type == 'ip':
+			res_type = request.form['type']
+			if res_type == 'ip':
 				name = request.form['value'].strip()
 		except:
 			pass
@@ -3390,15 +3440,12 @@
 
 	try:
 		cluname = request['clustername']
-	except KeyError, e:
+	except:
 		try:
 			cluname = request.form['clustername']
 		except:
 			luci_log.debug_verbose('getResourceInfo missing cluster name')
 			return {}
-	except:
-		luci_log.debug_verbose('getResourceInfo missing cluster name')
-		return {}
 
 	try:
 		baseurl = request['URL']
@@ -3483,7 +3530,6 @@
 		luci_log.debug_verbose('delRes: missing batch and/or result from setClusterConf')
 		return errstr
 
-	modelstr = ""
 	path = CLUSTER_FOLDER_PATH + str(clustername)
 	clusterfolder = self.restrictedTraverse(path)
 	batch_id = str(batch_number)
@@ -3530,7 +3576,7 @@
 			return None
 	else:
 		try:
-			res = apply(Ip)
+			res = Ip()
 			if not res:
 				raise Exception, 'apply(Ip) is None'
 		except Exception, e:
@@ -3586,7 +3632,7 @@
 			return None
 	else:
 		try:
-			res = apply(Fs)
+			res = Fs()
 			if not res:
 				raise Exception, 'apply(Fs) is None'
 		except Exception, e:
@@ -3694,7 +3740,7 @@
 			return None
 	else:
 		try:
-			res = apply(Clusterfs)
+			res = Clusterfs()
 			if not res:
 				raise Exception, 'apply(Clusterfs) is None'
 		except Exception, e:
@@ -3781,7 +3827,7 @@
 			return None
 	else:
 		try:
-			res = apply(Netfs)
+			res = Netfs()
 		except Exception, e:
 			luci_log.debug_verbose('addNfsm error: %s' % str(e))
 			return None
@@ -3876,7 +3922,7 @@
 			return None
 	else:
 		try:
-			res = apply(NFSClient)
+			res = NFSClient()
 		except:
 			luci_log.debug_verbose('addNfsc error: %s' % str(e))
 			return None
@@ -3940,7 +3986,7 @@
 			return None
 	else:
 		try:
-			res = apply(NFSExport)
+			res = NFSExport()
 		except:
 			luci_log.debug_verbose('addNfsx error: %s', str(e))
 			return None
@@ -3988,7 +4034,7 @@
 			return None
 	else:
 		try:
-			res = apply(Script)
+			res = Script()
 		except Exception, e:
 			luci_log.debug_verbose('addScr error: %s' % str(e))
 			return None
@@ -4009,10 +4055,10 @@
 		luci_log.debug_verbose('addScr error: %s' % err)
 
 	try:
-		file = form['file'].strip()
-		if not file:
+		path = form['file'].strip()
+		if not path:
 			raise KeyError, 'file path is blank'
-		res.attr_hash['file'] = file
+		res.attr_hash['file'] = path
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
@@ -4046,7 +4092,7 @@
 			return None
 	else:
 		try:
-			res = apply(Samba)
+			res = Samba()
 		except Exception, e:
 			luci_log.debug_verbose('addSmb error: %s' % str(e))
 			return None
@@ -4159,7 +4205,7 @@
 	
 	return messages
 
-def addResource(self, request, modelb, res, type):
+def addResource(self, request, modelb, res, res_type):
 	clustername = modelb.getClusterName()
 	if not clustername:
 		raise Exception, 'cluster name from modelb.getClusterName() is blank'
@@ -4204,7 +4250,7 @@
 		flag.manage_addProperty(BATCH_ID, batch_id, "string")
 		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
 
-		if type != 'ip':
+		if res_type != 'ip':
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
 		else:
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
@@ -4330,11 +4376,11 @@
 		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 		objpath = str(path + '/' + objname)
 		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batchid, 'string')
+		flag.manage_addProperty(BATCH_ID, batch_id, 'string')
 		flag.manage_addProperty(TASKTYPE, task, 'string')
 		flag.manage_addProperty(FLAG_DESC, desc)
 	except Exception, e:
 		errmsg = 'Error creating flag (%s,%s,%s) at %s: %s' \
-					% (batchid, task, desc, objpath, str(e))
+					% (batch_id, task, desc, objpath, str(e))
 		luci_log.debug_verbose(errmsg)
 		raise Exception, errmsg
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/11/03 22:48:15	1.22
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/11/06 23:55:23	1.23
@@ -55,6 +55,13 @@
 MULTICAST="203"
 QUORUMD="204"
 
+PROPERTIES_TAB = 'tab'
+
+PROP_GENERAL_TAB = '1'
+PROP_FENCE_TAB = '2'
+PROP_MCAST_TAB = '3'
+PROP_QDISK_TAB = '4'
+
 PAGETYPE="pagetype"
 ACTIONTYPE="actiontype"
 TASKTYPE="tasktype"
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/01 20:34:02	1.40
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/06 23:55:23	1.41
@@ -114,13 +114,6 @@
 						install_LVS,
 						upgrade_rpms):
 
-	if os_str == 'rhel5':
-		cluster_version = '5'
-	elif os_str == 'rhel4':
-		cluster_version = '4'
-	else:
-		cluster_version = 'unknown'
-
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
 	batch += '<module name="rpm">'
@@ -276,7 +269,7 @@
 	return doc
 
 def getClusterStatusBatch(rc):
-	batch_str ='<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
 	ricci_xml = rc.batch_run(batch_str, async=False)
 
 	if not ricci_xml or not ricci_xml.firstChild:
@@ -318,8 +311,8 @@
 		if not log_entries or len(log_entries) < 1:
 			raise Exception, 'no log data is available.'
 	except Exception, e:
-		'Error retrieving log data from %s: %s' \
-			% (rc.hostname(), str(e))
+		luci_log.debug_verbose('Error retrieving log data from %s: %s' \
+			% (rc.hostname(), str(e)))
 		return None
 	time_now = time()
 	entry = ''
@@ -463,7 +456,6 @@
 		return None
 
 	resultlist = list()
-	svc_node = None
 	for node in varnode.childNodes:
 		if node.nodeName == 'service':
 			svchash = {}
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/01 20:34:02	1.17
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/06 23:55:23	1.18
@@ -1,10 +1,8 @@
-from time import *
-from socket import *
+from socket import socket, ssl, AF_INET, SOCK_STREAM
 import xml
 import xml.dom
 from xml.dom import minidom
 from LuciSyslog import LuciSyslog
-from HelperFunctions import access_to_host_allowed
 
 CERTS_DIR_PATH = '/var/lib/luci/var/certs/'
 
@@ -210,8 +208,8 @@
                 % (batch_xml_str, self.__hostname))
             batch_xml = minidom.parseString(batch_xml_str).firstChild
         except Exception, e:
-            luci_log.debug('received invalid batch XML for %s: \"%s\"' \
-                % (self.__hostname, batch_xml_str))
+            luci_log.debug('received invalid batch XML for %s: \"%s\": %s' \
+                % (self.__hostname, batch_xml_str, str(e)))
             raise RicciError, 'batch XML is malformed'
 
         try:




More information about the Cluster-devel mailing list