[Libvirt-cim] [PATCH] [TEST] Update RAFP.01 for LXC support

yunguol at cn.ibm.com yunguol at cn.ibm.com
Wed Jun 25 05:19:49 UTC 2008


# HG changeset patch
# User Guolian Yun <yunguol at cn.ibm.com>
# Date 1214371183 -28800
# Node ID c8c27374e304a66cd71d1f0b5d0fc462e230a898
# Parent  727d97c09d77d73f3542ba49a9dd19ba119a67eb
[TEST] Update RAFP.01 for LXC support

Signed-off-by: Guolian Yun <yunguol at cn.ibm.com>

diff -r 727d97c09d77 -r c8c27374e304 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py
--- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py	Tue Jun 10 18:26:20 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py	Wed Jun 25 13:19:43 2008 +0800
@@ -26,66 +26,165 @@
 from VirtLib import utils
 from XenKvmLib import assoc
 from XenKvmLib import enumclass
-from XenKvmLib.classes import get_typed_class 
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.test_doms import destroy_and_undefine_all
+from XenKvmLib.vxml import get_class 
 from CimTest import Globals
 from CimTest.Globals import logger, do_main
 from CimTest.ReturnCodes import PASS, FAIL, XFAIL
+from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \
+create_netpool_conf
 
-sup_types = ['Xen', 'XenFV', 'KVM']
+sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
 
+test_dom    = "RAFP_dom"
+test_vcpus  = 1
+test_mem    = 128
+test_mac    = "00:11:22:33:44:aa"
+
+def setup_env(server, virt):
+    destroy_and_undefine_all(server)
+    vsxml = None
+    if virt == "Xen":
+        test_disk = "xvda"
+    elif virt == "XenFV" or virt=="KVM":
+        test_disk = "hda"
+    else:
+        test_disk = None
+
+    virtxml = get_class(virt)
+    if virt == 'LXC':
+        vsxml = virtxml(test_dom)
+    else:
+        vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus,
+                        mac = test_mac, disk = test_disk)
+    try:
+        ret = vsxml.define(server)
+        if not ret:
+            logger.error("Failed to Define the domain: %s", test_dom)
+            return FAIL, vsxml, test_disk
+
+    except Exception, details:
+        logger.error("Exception : %s", details)
+        return FAIL, vsxml, test_disk
+
+    return PASS, vsxml, test_disk
+
+def get_instance(server, pool, list, virt='Xen'):
+    try:
+        inst = enumclass.getInstance(server,
+                                     pool,
+                                     list,
+                                     virt)
+    except Exception:
+        logger.error(Globals.CIM_ERROR_GETINSTANCE  % pool)
+        return FAIL, inst
+  
+    return PASS, inst
+
+def verify_rasd(server, assoc_cn, cn, virt, list, rasd):
+    try:
+        data = assoc.AssociatorNames(server,
+                                     assoc_cn,
+                                     cn,
+                                     virt,
+                                     InstanceID=list)
+    except Exception:
+        logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % cn)
+        return FAIL
+
+    if len(data) < 1:
+        logger.error("Return NULL, expect at least one instance")
+        return FAIL
+    
+    for i in range(0, len(data)):
+        if data[i].classname == "LXC_MemResourceAllocationSettingData" \
+                   and data[i]['InstanceID'] == rasd["MemoryPool"]:
+            logger.info("MemoryPool InstanceID match")
+            return PASS
+        elif i == len(data) and data[i]['InstanceID'] != rasd["MemoryPool"]:
+            logger.error("InstanceID Mismatch, expect %s not %s" % \
+                         (rasd['MemoryPool'], data[i]['InstanceID']))
+            return FAIL
+        if data[i].classname == "LXC_ProcResourceAllocationSettingData" \
+                   and data[i]['InstanceID'] == rasd["ProcessorPool"]:
+            logger.info("ProcessorPool InstanceID match")
+            return PASS
+        elif i == len(data) and data[i]['InstanceID'] != rasd["ProcessorPool"]:
+            logger.error("InstanceID Mismatch, expect %s not %s" % \
+                         (rasd['ProcessorPool'], data[i]['InstanceID']))
+            return FAIL
+        if data[i].classname == "LXC_DiskResourceAllocationSettingData" \
+                   and data[i]['InstanceID'] == rasd["DiskPool"]:
+            logger.info("DiskPool InstanceID match")
+            return PASS
+        elif i == len(data) and data[i]['InstanceID'] != rasd["DiskPool"]:
+            logger.error("InstanceID Mismatch, expect %s" % \
+                         (rasd['DiskPool'], data[i]['InstanceID']))
+            return FAIL
+        if data[i].classname == "LXC_NetResourceAllocationSettingData" \
+                   and data[i]['InstanceID'] == rasd["NetworkPool"]:
+            logger.info("NetworkPool InstanceID match")
+            return PASS
+        elif i == len(data) and data[i]['InstanceID'] != rasd["NetworkPool"]:
+            logger.error("InstanceID Mismatch, expect %s" % \
+                         (rasd['NetworkPool'], data[i]['InstanceID']))
+            return FAIL
+               
 @do_main(sup_types)
 def main():
     options = main.options
     status = PASS
 
-    try:
-        key_list = { 'InstanceID' : "MemoryPool/0" }
-        mempool = enumclass.getInstance(options.ip,
-                                        "MemoryPool",
-                                        key_list,
-                                        options.virt)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_GETINSTANCE  % "MemoryPool")
-        return FAIL
+   
+    status, vsxml, test_disk = setup_env(options.ip, options.virt)
+    if status != PASS:
+        return status
+    
+    status, diskid = create_diskpool_conf(options.ip, options.virt)
+    if status != PASS:
+        cleanup_restore(options.ip, options.virt)
+        vsxml.undefine(options.ip)
+        return status
 
-    try:
-        key_list = { 'InstanceID' : "ProcessorPool/0" }
-        procpool = enumclass.getInstance(options.ip,
-                                         "ProcessorPool",
-                                         key_list,
-                                         options.virt)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_GETINSTANCE % "ProcessorPool")  
-        return FAIL
-     
-    try:
-        memdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool",
-                                        "MemoryPool",
-                                        options.virt,
-                                        InstanceID = mempool.InstanceID)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % mempool.InstanceID)
-        status = FAIL
-     
-    for i in range(len(memdata)):
-        if memdata[i].classname != get_typed_class(options.virt, "MemResourceAllocationSettingData"):
-            logger.error("ERROR: Association result error")
-            status = FAIL
-                
-    try:
-        procdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool",
-                                         "ProcessorPool",
-                                         options.virt,
-                                         InstanceID = procpool.InstanceID)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % procpool.InstanceID)
-        status = FAIL
-      
-    for j in range(len(procdata)):
-        if procdata[j].classname != get_typed_class(options.virt, "ProcResourceAllocationSettingData"):
-	    logger.error("ERROR: Association result error")
-            status = FAIL
+    status, test_network = create_netpool_conf(options.ip, options.virt)
+    if status != PASS:
+        cleanup_restore(options.ip, options.virt)
+        vsxml.undefine(options.ip)
+        return status
+ 
+    if options.virt == 'LXC':
+        pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} }
+        rasd = { "MemoryPool" :  "%s/mem" % test_dom }
+    else:
+        pool = { "MemoryPool"    : {'InstanceID' : "MemoryPool/0"},
+                 "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"},
+                 "DiskPool"      : {'InstanceID' : diskid},
+                 "NetworkPool"   : {'InstanceID' : "NetworkPool/%s" \
+                                     % test_network }}
+        rasd = { "MemoryPool"    : "%s/mem" % test_dom, 
+                 "ProcessorPool" : "%s/proc" % test_dom, 
+                 "DiskPool"      : "%s/%s" %(test_dom, test_disk), 
+                 "NetworkPool"   : "%s/%s" % (test_dom, test_mac) }
 
+    for k, v in pool.iteritems():
+        status, inst = get_instance(options.ip, k, v, options.virt) 
+        if status != PASS:
+            cleanup_restore(options.ip, options.virt)
+            vsxml.undefine(options.ip)
+            return status
+ 
+        status = verify_rasd(options.ip, "ResourceAllocationFromPool", 
+                             k, options.virt, inst.InstanceID,
+                             rasd)
+        if status != PASS:
+            cleanup_restore(options.ip, options.virt)
+            vsxml.undefine(options.ip)
+            return status
+
+
+    cleanup_restore(options.ip, options.virt)
+    vsxml.undefine(options.ip)
     return status 
         
         




More information about the Libvirt-cim mailing list