[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

Re: [libvirt] [PATCHv4 07/51] snapshot: properly revert qemu to offline snapshots



On 09/01/2011 10:24 PM, Eric Blake wrote:
Commit 5e47785 broke reverts to offline system checkpoint snapshots
with older qemu, since there is no longer any code path to use
qemu -loadvm on next boot.  Meanwhile, reverts to offline system
checkpoints have been broken for newer qemu, both before and
after that commit, since -loadvm no longer works to revert to
disk state without accompanying vm state.  Fix both of these by
using qemu-img to revert disk state.

qemuDomainSnapshotRevertInactive has the same FIXMEs as
qemuDomainSnapshotCreateInactive, so algorithmic fixes to properly
handle partial loop iterations should be applied later to both
functions, but we're not making the situation any worse in
this patch.

I don't like copy-and-paste FIXME's; it's too easy to later fix one and not the other. Just because it was already the same code twice doesn't mean I should be the third instance.

I'm squashing this in before pushing the set including this patch.

diff --git i/src/qemu/qemu_driver.c w/src/qemu/qemu_driver.c
index c8836cd..6dd6826 100644
--- i/src/qemu/qemu_driver.c
+++ w/src/qemu/qemu_driver.c
@@ -8478,14 +8478,18 @@ static int qemuDomainSnapshotIsAllowed(virDomainObjPtr vm)
     return 1;
 }

-/* The domain is expected to be locked and inactive. */
+/* The domain is expected to be locked and inactive. Return -1 on normal
+ * failure, 1 if we skipped a disk due to try_all.  */
 static int
-qemuDomainSnapshotCreateInactive(virDomainObjPtr vm,
-                                 virDomainSnapshotObjPtr snap)
+qemuDomainSnapshotForEachQcow2(virDomainObjPtr vm,
+                               virDomainSnapshotObjPtr snap,
+                               const char *op,
+                               bool try_all)
 {
- const char *qemuimgarg[] = { NULL, "snapshot", "-c", NULL, NULL, NULL }; + const char *qemuimgarg[] = { NULL, "snapshot", NULL, NULL, NULL, NULL };
     int ret = -1;
     int i;
+    bool skipped = false;

     qemuimgarg[0] = qemuFindQemuImgBinary();
     if (qemuimgarg[0] == NULL) {
@@ -8493,6 +8497,7 @@ qemuDomainSnapshotCreateInactive(virDomainObjPtr vm,
         goto cleanup;
     }

+    qemuimgarg[2] = op;
     qemuimgarg[3] = snap->def->name;

     for (i = 0; i < vm->def->ndisks; i++) {
@@ -8503,6 +8508,15 @@ qemuDomainSnapshotCreateInactive(virDomainObjPtr vm,
         if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
             if (!vm->def->disks[i]->driverType ||
                 STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
+                if (try_all) {
+                    /* Continue on even in the face of error, since other
+                     * disks in this VM may have the same snapshot name.
+                     */
+                    VIR_WARN("skipping snapshot action on %s",
+                             vm->def->disks[i]->info.alias);
+                    skipped = true;
+                    continue;
+                }
                 qemuReportError(VIR_ERR_OPERATION_INVALID,
                                 _("Disk device '%s' does not support"
                                   " snapshotting"),
@@ -8512,18 +8526,33 @@ qemuDomainSnapshotCreateInactive(virDomainObjPtr vm,

             qemuimgarg[4] = vm->def->disks[i]->src;

-            if (virRun(qemuimgarg, NULL) < 0)
+            if (virRun(qemuimgarg, NULL) < 0) {
+                if (try_all) {
+                    VIR_WARN("skipping snapshot action on %s",
+                             vm->def->disks[i]->info.alias);
+                    skipped = true;
+                    continue;
+                }
                 goto cleanup;
+            }
         }
     }

-    ret = 0;
+    ret = skipped ? 1 : 0;

 cleanup:
     VIR_FREE(qemuimgarg[0]);
     return ret;
 }

+/* The domain is expected to be locked and inactive. */
+static int
+qemuDomainSnapshotCreateInactive(virDomainObjPtr vm,
+                                 virDomainSnapshotObjPtr snap)
+{
+    return qemuDomainSnapshotForEachQcow2(vm, snap, "-c", false);
+}
+
 /* The domain is expected to be locked and active. */
 static int
 qemuDomainSnapshotCreateActive(virConnectPtr conn,
@@ -8873,45 +8902,9 @@ static int
 qemuDomainSnapshotRevertInactive(virDomainObjPtr vm,
                                  virDomainSnapshotObjPtr snap)
 {
- const char *qemuimgarg[] = { NULL, "snapshot", "-a", NULL, NULL, NULL };
-    int ret = -1;
-    int i;
-
-    qemuimgarg[0] = qemuFindQemuImgBinary();
-    if (qemuimgarg[0] == NULL) {
-        /* qemuFindQemuImgBinary set the error */
-        goto cleanup;
-    }
-
-    qemuimgarg[3] = snap->def->name;
-
-    for (i = 0; i < vm->def->ndisks; i++) {
-        /* FIXME: we also need to handle LVM here */
-        /* FIXME: if we fail halfway through this loop, we are in an
-         * inconsistent state.  I'm not quite sure what to do about that
-         */
-        if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
-            if (!vm->def->disks[i]->driverType ||
-                STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
-                qemuReportError(VIR_ERR_OPERATION_INVALID,
-                                _("Disk device '%s' does not support"
-                                  " snapshotting"),
-                                vm->def->disks[i]->info.alias);
-                goto cleanup;
-            }
-
-            qemuimgarg[4] = vm->def->disks[i]->src;
-
-            if (virRun(qemuimgarg, NULL) < 0)
-                goto cleanup;
-        }
-    }
-
-    ret = 0;
-
-cleanup:
-    VIR_FREE(qemuimgarg[0]);
-    return ret;
+    /* Try all disks, but report failure if we skipped any.  */
+    int ret = qemuDomainSnapshotForEachQcow2(vm, snap, "-a", true);
+    return ret > 0 ? -1 : ret;
 }

 static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
@@ -9135,42 +9128,15 @@ static int qemuDomainSnapshotDiscard(struct qemud_driver *driver,
                                      virDomainObjPtr vm,
                                      virDomainSnapshotObjPtr snap)
 {
- const char *qemuimgarg[] = { NULL, "snapshot", "-d", NULL, NULL, NULL };
     char *snapFile = NULL;
     int ret = -1;
-    int i;
     qemuDomainObjPrivatePtr priv;
     virDomainSnapshotObjPtr parentsnap = NULL;

     if (!virDomainObjIsActive(vm)) {
-        qemuimgarg[0] = qemuFindQemuImgBinary();
-        if (qemuimgarg[0] == NULL)
-            /* qemuFindQemuImgBinary set the error */
+        /* Ignore any skipped disks */
+        if (qemuDomainSnapshotForEachQcow2(vm, snap, "-d", true) < 0)
             goto cleanup;
-
-        qemuimgarg[3] = snap->def->name;
-
-        for (i = 0; i < vm->def->ndisks; i++) {
-            /* FIXME: we also need to handle LVM here */
-            if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
-                if (!vm->def->disks[i]->driverType ||
-                    STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
- /* we continue on even in the face of error, since other
-                     * disks in this VM may have this snapshot in place
-                     */
-                    continue;
-                }
-
-                qemuimgarg[4] = vm->def->disks[i]->src;
-
-                if (virRun(qemuimgarg, NULL) < 0) {
- /* we continue on even in the face of error, since other
-                     * disks in this VM may have this snapshot in place
-                     */
-                    continue;
-                }
-            }
-        }
     } else {
         priv = vm->privateData;
         qemuDomainObjEnterMonitorWithDriver(driver, vm);
@@ -9214,7 +9180,6 @@ static int qemuDomainSnapshotDiscard(struct qemud_driver *driver,

 cleanup:
     VIR_FREE(snapFile);
-    VIR_FREE(qemuimgarg[0]);

     return ret;
 }


--
Eric Blake   eblake redhat com    +1-801-349-2682
Libvirt virtualization library http://libvirt.org


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]