[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[libvirt] [PATCH 5/5] vcpupin: add query option to virsh vcpupin command



This patch adds --query option to "virsh vcpupin" command.
Its feature is to show CPU affnity information in more
reader-friendly way.

 # virsh vcpupin VM --query --config
 VCPU: CPU Affinity
 ----------------------------------
    0: 1-6,9-20
    1: 10
    2: 5,9-11,15-20
    3: 1,3,5,7,9,11,13,15

When --query is specified, cpulist is not required and vcpu number
is optional. When vcpu number is provided, information of only specified
vcpu is displayed.

Signed-off-by: Taku Izumi <izumi taku jp fujitsu com>
---
 tools/virsh.c   |   97 ++++++++++++++++++++++++++++++++++++++++++++++++--------
 tools/virsh.pod |   11 +++++-
 2 files changed, 93 insertions(+), 15 deletions(-)

Index: libvirt/tools/virsh.c
===================================================================
--- libvirt.orig/tools/virsh.c
+++ libvirt/tools/virsh.c
@@ -2992,15 +2992,16 @@ cmdVcpuinfo(vshControl *ctl, const vshCm
  * "vcpupin" command
  */
 static const vshCmdInfo info_vcpupin[] = {
-    {"help", N_("control domain vcpu affinity")},
+    {"help", N_("control or query domain vcpu affinity")},
     {"desc", N_("Pin domain VCPUs to host physical CPUs.")},
     {NULL, NULL}
 };
 
 static const vshCmdOptDef opts_vcpupin[] = {
     {"domain", VSH_OT_DATA, VSH_OFLAG_REQ, N_("domain name, id or uuid")},
-    {"vcpu", VSH_OT_INT, VSH_OFLAG_REQ, N_("vcpu number")},
-    {"cpulist", VSH_OT_DATA, VSH_OFLAG_REQ, N_("host cpu number(s) (comma separated)")},
+    {"vcpu", VSH_OT_INT, 0, N_("vcpu number")},
+    {"cpulist", VSH_OT_DATA, VSH_OFLAG_EMPTY_OK, N_("host cpu number(s)")},
+    {"query", VSH_OT_BOOL, 0, N_("query CPU affinitiy information")},
     {"config", VSH_OT_BOOL, 0, N_("affect next boot")},
     {"live", VSH_OT_BOOL, 0, N_("affect running domain")},
     {"current", VSH_OT_BOOL, 0, N_("affect current domain")},
@@ -3013,14 +3014,18 @@ cmdVcpupin(vshControl *ctl, const vshCmd
     virDomainInfo info;
     virDomainPtr dom;
     virNodeInfo nodeinfo;
-    int vcpu;
+    int vcpu = -1;
     const char *cpulist = NULL;
     bool ret = true;
-    unsigned char *cpumap;
+    unsigned char *cpumap = NULL;
+    unsigned char *cpumaps = NULL;
     int cpumaplen;
-    int i, cpu, lastcpu, maxcpu;
+    int bit, lastbit;
+    bool isInvert;
+    int i, cpu, lastcpu, maxcpu, ncpus;
     bool unuse = false;
     const char *cur;
+    int query = vshCommandOptBool(cmd, "query");
     int config = vshCommandOptBool(cmd, "config");
     int live = vshCommandOptBool(cmd, "live");
     int current = vshCommandOptBool(cmd, "current");
@@ -3049,14 +3054,22 @@ cmdVcpupin(vshControl *ctl, const vshCmd
         return false;
 
     if (vshCommandOptInt(cmd, "vcpu", &vcpu) <= 0) {
-        vshError(ctl, "%s", _("vcpupin: Invalid or missing vCPU number."));
-        virDomainFree(dom);
-        return false;
+        /* When query mode, "vcpu" is optional */
+        if (!query) {
+            vshError(ctl, "%s",
+                     _("vcpupin: Invalid or missing vCPU number."));
+            virDomainFree(dom);
+            return false;
+        }
     }
 
     if (vshCommandOptString(cmd, "cpulist", &cpulist) <= 0) {
-        virDomainFree(dom);
-        return false;
+         /* When query mode, "cpulist" is optional */
+        if (!query) {
+            vshError(ctl, "%s", _("vcpupin: Missing cpulist."));
+            virDomainFree(dom);
+            return false;
+        }
     }
 
     if (virNodeGetInfo(ctl->conn, &nodeinfo) != 0) {
@@ -3078,8 +3091,65 @@ cmdVcpupin(vshControl *ctl, const vshCmd
 
     maxcpu = VIR_NODEINFO_MAXCPUS(nodeinfo);
     cpumaplen = VIR_CPU_MAPLEN(maxcpu);
-    cpumap = vshCalloc(ctl, 0, cpumaplen);
 
+    /* Query mode: show CPU affinity information then exit.*/
+    if (query) {
+        /* When query mode and neither "live", "config" nor "curent" is specified,
+         * set VIR_DOMAIN_AFFECT_CURRENT as flags */
+        if (flags == -1)
+            flags = VIR_DOMAIN_AFFECT_CURRENT;
+
+        cpumaps = vshMalloc(ctl, info.nrVirtCpu * cpumaplen);
+        if ((ncpus = virDomainGetVcpupinInfo(dom, info.nrVirtCpu,
+                                             cpumaps, cpumaplen, flags)) >= 0) {
+
+            vshPrint(ctl, "%s %s\n", _("VCPU:"), _("CPU Affinity"));
+            vshPrint(ctl, "----------------------------------\n");
+            for (i = 0; i < ncpus; i++) {
+
+               if (vcpu != -1 && i != vcpu)
+                   continue;
+
+               bit = lastbit = 0;
+               isInvert = false;
+               lastcpu = -1;
+
+               vshPrint(ctl, "%4d: ", i);
+               for (cpu = 0; cpu < maxcpu; cpu++) {
+
+                  if (VIR_CPU_USABLE(cpumaps, cpumaplen, i, cpu))
+                      bit = 1;
+                  else
+                      bit = 0;
+
+                  isInvert = (bit ^ lastbit) ? true : false;
+                  if (bit && isInvert) {
+                      if (lastcpu == -1)
+                          vshPrint(ctl, "%d", cpu);
+                      else
+                          vshPrint(ctl, ",%d", cpu);
+                      lastcpu = cpu;
+                  }
+                  if (!bit && isInvert && lastcpu != cpu - 1)
+                      vshPrint(ctl, "-%d", cpu - 1);
+                  lastbit = bit;
+               }
+               if (bit && !isInvert) {
+                  vshPrint(ctl, "-%d", maxcpu - 1);
+               }
+               vshPrint(ctl, "\n");
+            }
+
+        } else {
+            ret = false;
+        }
+        VIR_FREE(cpumaps);
+        goto cleanup;
+    }
+
+    /* Pin mode: pinning specified vcpu to specified physical cpus*/
+
+    cpumap = vshCalloc(ctl, 0, cpumaplen);
     /* Parse cpulist */
     cur = cpulist;
     if (*cur == 0) {
@@ -3161,7 +3231,8 @@ cmdVcpupin(vshControl *ctl, const vshCmd
     }
 
 cleanup:
-    VIR_FREE(cpumap);
+    if (cpumap)
+        VIR_FREE(cpumap);
     virDomainFree(dom);
     return ret;
 
Index: libvirt/tools/virsh.pod
===================================================================
--- libvirt.orig/tools/virsh.pod
+++ libvirt/tools/virsh.pod
@@ -838,8 +838,15 @@ vCPUs, the running time, the affinity to
 =item B<vcpupin> I<domain-id> I<vcpu> I<cpulist> optional I<--live> I<--config>
 I<--current>
 
-Pin domain VCPUs to host physical CPUs. The I<vcpu> number must be provided
-and I<cpulist> is a list of physical CPU numbers. Its syntax is a comma
+=item B<vcpupin> I<domain-id> I<--query> optional I<vcpu> I<--live> I<--config>
+I<--current>
+
+Pin domain VCPUs to host physical CPUs, or query CPU affinity information
+(specify I<--query>). When pinning vCPU, the I<vcpu> number and I<cpulist> must
+be provided. When querrying affinity information, I<cpulist> is not required
+and I<vcpu> is optional.
+
+I<cpulist> is a list of physical CPU numbers. Its syntax is a comma
 separated list and a special markup using '-' and '^' (ex. '0-4', '0-3,^2') can
 also be allowed. The '-' denotes the range and the '^' denotes exclusive.
 If you want to reset vcpupin setting, that is, to pin vcpu all physical cpus,


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]