[libvirt] [PATCHv4 4/8] qemu: bulk stats: implement VCPU group

Francesco Romani fromani at redhat.com
Fri Sep 12 11:48:55 UTC 2014


This patch implements the VIR_DOMAIN_STATS_VCPU
group of statistics.
To do so, this patch also extracts a helper to gather the
VCpu information.

Signed-off-by: Francesco Romani <fromani at redhat.com>
---
 include/libvirt/libvirt.h.in |   1 +
 src/libvirt.c                |  12 +++
 src/qemu/qemu_driver.c       | 200 +++++++++++++++++++++++++++++--------------
 3 files changed, 149 insertions(+), 64 deletions(-)

diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index c005442..00eafc6 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -2513,6 +2513,7 @@ typedef enum {
     VIR_DOMAIN_STATS_STATE = (1 << 0), /* return domain state */
     VIR_DOMAIN_STATS_CPU_TOTAL = (1 << 1), /* return domain CPU info */
     VIR_DOMAIN_STATS_BALLOON = (1 << 2), /* return domain balloon info */
+    VIR_DOMAIN_STATS_VCPU = (1 << 3), /* return domain virtual CPU info */
 } virDomainStatsTypes;
 
 typedef enum {
diff --git a/src/libvirt.c b/src/libvirt.c
index 3fa86ab..ba7a780 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -21576,6 +21576,18 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
  * "balloon.maximum" - the maximum memory in kiB allowed
  *                     as unsigned long long.
  *
+ * VIR_DOMAIN_STATS_VCPU: Return virtual CPU statistics.
+ * Due to VCPU hotplug, the vcpu.<num>.* array could be sparse.
+ * The actual size of the array correspond to "vcpu.current".
+ * The array size will never exceed "vcpu.maximum".
+ * The typed parameter keys are in this format:
+ * "vcpu.current" - current number of online virtual CPUs as unsigned int.
+ * "vcpu.maximum" - maximum number of online virtual CPUs as unsigned int.
+ * "vcpu.<num>.state" - state of the virtual CPU <num>, as int
+ *                      from virVcpuState enum.
+ * "vcpu.<num>.time" - virtual cpu time spent by virtual CPU <num>
+ *                     as unsigned long long.
+ *
  * Using 0 for @stats returns all stats groups supported by the given
  * hypervisor.
  *
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f677884..12bf5e4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1380,6 +1380,76 @@ qemuGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, long *vm_rss,
 }
 
 
+static int
+qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
+                         unsigned char *cpumaps, int maplen)
+{
+    int maxcpu, hostcpus;
+    size_t i, v;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    if ((hostcpus = nodeGetCPUCount()) < 0)
+        return -1;
+
+    maxcpu = maplen * 8;
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    /* Clamp to actual number of vcpus */
+    if (maxinfo > priv->nvcpupids)
+        maxinfo = priv->nvcpupids;
+
+    if (maxinfo >= 1) {
+        if (info != NULL) {
+            memset(info, 0, sizeof(*info) * maxinfo);
+            for (i = 0; i < maxinfo; i++) {
+                info[i].number = i;
+                info[i].state = VIR_VCPU_RUNNING;
+
+                if (priv->vcpupids != NULL &&
+                    qemuGetProcessInfo(&(info[i].cpuTime),
+                                       &(info[i].cpu),
+                                       NULL,
+                                       vm->pid,
+                                       priv->vcpupids[i]) < 0) {
+                    virReportSystemError(errno, "%s",
+                                         _("cannot get vCPU placement & pCPU time"));
+                    return -1;
+                }
+            }
+        }
+
+        if (cpumaps != NULL) {
+            memset(cpumaps, 0, maplen * maxinfo);
+            if (priv->vcpupids != NULL) {
+                for (v = 0; v < maxinfo; v++) {
+                    unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
+                    virBitmapPtr map = NULL;
+                    unsigned char *tmpmap = NULL;
+                    int tmpmapLen = 0;
+
+                    if (virProcessGetAffinity(priv->vcpupids[v],
+                                              &map, maxcpu) < 0)
+                        return -1;
+                    virBitmapToData(map, &tmpmap, &tmpmapLen);
+                    if (tmpmapLen > maplen)
+                        tmpmapLen = maplen;
+                    memcpy(cpumap, tmpmap, tmpmapLen);
+
+                    VIR_FREE(tmpmap);
+                    virBitmapFree(map);
+                }
+            } else {
+                virReportError(VIR_ERR_OPERATION_INVALID,
+                               "%s", _("cpu affinity is not available"));
+                return -1;
+            }
+        }
+    }
+    return maxinfo;
+}
+
+
 static virDomainPtr qemuDomainLookupByID(virConnectPtr conn,
                                          int id)
 {
@@ -5001,10 +5071,7 @@ qemuDomainGetVcpus(virDomainPtr dom,
                    int maplen)
 {
     virDomainObjPtr vm;
-    size_t i;
-    int v, maxcpu, hostcpus;
     int ret = -1;
-    qemuDomainObjPrivatePtr priv;
 
     if (!(vm = qemuDomObjFromDomain(dom)))
         goto cleanup;
@@ -5019,67 +5086,7 @@ qemuDomainGetVcpus(virDomainPtr dom,
         goto cleanup;
     }
 
-    priv = vm->privateData;
-
-    if ((hostcpus = nodeGetCPUCount()) < 0)
-        goto cleanup;
-
-    maxcpu = maplen * 8;
-    if (maxcpu > hostcpus)
-        maxcpu = hostcpus;
-
-    /* Clamp to actual number of vcpus */
-    if (maxinfo > priv->nvcpupids)
-        maxinfo = priv->nvcpupids;
-
-    if (maxinfo >= 1) {
-        if (info != NULL) {
-            memset(info, 0, sizeof(*info) * maxinfo);
-            for (i = 0; i < maxinfo; i++) {
-                info[i].number = i;
-                info[i].state = VIR_VCPU_RUNNING;
-
-                if (priv->vcpupids != NULL &&
-                    qemuGetProcessInfo(&(info[i].cpuTime),
-                                       &(info[i].cpu),
-                                       NULL,
-                                       vm->pid,
-                                       priv->vcpupids[i]) < 0) {
-                    virReportSystemError(errno, "%s",
-                                         _("cannot get vCPU placement & pCPU time"));
-                    goto cleanup;
-                }
-            }
-        }
-
-        if (cpumaps != NULL) {
-            memset(cpumaps, 0, maplen * maxinfo);
-            if (priv->vcpupids != NULL) {
-                for (v = 0; v < maxinfo; v++) {
-                    unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
-                    virBitmapPtr map = NULL;
-                    unsigned char *tmpmap = NULL;
-                    int tmpmapLen = 0;
-
-                    if (virProcessGetAffinity(priv->vcpupids[v],
-                                              &map, maxcpu) < 0)
-                        goto cleanup;
-                    virBitmapToData(map, &tmpmap, &tmpmapLen);
-                    if (tmpmapLen > maplen)
-                        tmpmapLen = maplen;
-                    memcpy(cpumap, tmpmap, tmpmapLen);
-
-                    VIR_FREE(tmpmap);
-                    virBitmapFree(map);
-                }
-            } else {
-                virReportError(VIR_ERR_OPERATION_INVALID,
-                               "%s", _("cpu affinity is not available"));
-                goto cleanup;
-            }
-        }
-    }
-    ret = maxinfo;
+    ret = qemuDomainHelperGetVcpus(vm, info, maxinfo, cpumaps, maplen);
 
  cleanup:
     if (vm)
@@ -17388,6 +17395,70 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr driver,
     return 0;
 }
 
+
+static int
+qemuDomainGetStatsVcpu(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+                       virDomainObjPtr dom,
+                       virDomainStatsRecordPtr record,
+                       int *maxparams,
+                       unsigned int privflags ATTRIBUTE_UNUSED)
+{
+    size_t i;
+    int ret = -1;
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
+    virVcpuInfoPtr cpuinfo = NULL;
+
+    if (virTypedParamsAddUInt(&record->params,
+                              &record->nparams,
+                              maxparams,
+                              "vcpu.current",
+                              (unsigned) dom->def->vcpus) < 0)
+        return -1;
+
+    if (virTypedParamsAddUInt(&record->params,
+                              &record->nparams,
+                              maxparams,
+                              "vcpu.maximum",
+                              (unsigned) dom->def->maxvcpus) < 0)
+        return -1;
+
+    if (VIR_ALLOC_N(cpuinfo, dom->def->vcpus) < 0)
+        return -1;
+
+    if (qemuDomainHelperGetVcpus(dom, cpuinfo, dom->def->vcpus,
+                                 NULL, 0) < 0) {
+        ret = 0; /* it's ok to be silent and go ahead */
+        goto cleanup;
+    }
+
+    for (i = 0; i < dom->def->vcpus; i++) {
+        snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+                 "vcpu.%u.state", cpuinfo[i].number);
+        if (virTypedParamsAddInt(&record->params,
+                                 &record->nparams,
+                                 maxparams,
+                                 param_name,
+                                 cpuinfo[i].state) < 0)
+            goto cleanup;
+
+        snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+                 "vcpu.%u.time", cpuinfo[i].number);
+        if (virTypedParamsAddULLong(&record->params,
+                                    &record->nparams,
+                                    maxparams,
+                                    param_name,
+                                    cpuinfo[i].cpuTime) < 0)
+            goto cleanup;
+    }
+
+    ret = 0;
+
+ cleanup:
+    VIR_FREE(cpuinfo);
+    return ret;
+}
+
+
 typedef int
 (*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
                           virDomainObjPtr dom,
@@ -17405,6 +17476,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
     { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
     { qemuDomainGetStatsCpu, VIR_DOMAIN_STATS_CPU_TOTAL, false },
     { qemuDomainGetStatsBalloon, VIR_DOMAIN_STATS_BALLOON, true },
+    { qemuDomainGetStatsVcpu, VIR_DOMAIN_STATS_VCPU, false },
     { NULL, 0, false }
 };
 
-- 
1.9.3




More information about the libvir-list mailing list