[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

Re: [libvirt] [PATCH] implement virsh dump for qemu guests



On Thu, Jul 09, 2009 at 07:24:11PM +0200, Paolo Bonzini wrote:
> This patch uses a "migrate"+"cont" combination to implement "virsh dump"
> for QEMU guests (BZ507551).
> 
> The code is mostly based on qemudDomainSave, except that the XML
> prolog is not included as it is not needed to examine the dump
> with e.g. crash.

Does crash actually understand the QEMU migrate data format ? I'm
rather (pleasantly) surprised if it does ... 

> 2009-07-08  Paolo Bonzini  <bonzini gnu org>
> 
> 	* qemu_driver.c (qemudDomainCoreDump): New.
> 	(qemuDriver): Add core dump function.
> ---
>  src/qemu_driver.c |   85 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 84 insertions(+), 1 deletions(-)
> 
> diff --git a/src/qemu_driver.c b/src/qemu_driver.c
> index 95ea882..546a691 100644
> --- a/src/qemu_driver.c
> +++ b/src/qemu_driver.c
> @@ -2844,6 +2844,89 @@ cleanup:
>  }
>  
>  
> +static int qemudDomainCoreDump(virDomainPtr dom,
> +                               const char *path,
> +                               int flags ATTRIBUTE_UNUSED) {
> +    struct qemud_driver *driver = dom->conn->privateData;
> +    virDomainObjPtr vm;
> +    char *command = NULL;
> +    char *info = NULL;
> +    char *safe_path = NULL;
> +    int ret = -1;
> +
> +    qemuDriverLock(driver);
> +    vm = virDomainFindByUUID(&driver->domains, dom->uuid);

Since you don't touch 'driver' again after this point you can safely
unlock it right here. This avoids blocking the whole QEMU driver
while the dump is taking place

> +
> +    if (!vm) {
> +        char uuidstr[VIR_UUID_STRING_BUFLEN];
> +        virUUIDFormat(dom->uuid, uuidstr);
> +        qemudReportError(dom->conn, dom, NULL, VIR_ERR_NO_DOMAIN,
> +                         _("no domain with matching uuid '%s'"), uuidstr);
> +        goto cleanup;
> +    }
> +
> +    if (!virDomainIsActive(vm)) {
> +        qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_INVALID,
> +                         "%s", _("domain is not running"));
> +        goto cleanup;
> +    }
> +
> +    /* Migrate to file */
> +    safe_path = qemudEscapeShellArg(path);
> +    if (!safe_path) {
> +        virReportOOMError(dom->conn);
> +        goto cleanup;
> +    }
> +    if (virAsprintf(&command, "migrate \"exec:"
> +                  "dd of='%s' 2>/dev/null"
> +                  "\"", safe_path) == -1) {
> +        virReportOOMError(dom->conn);
> +        command = NULL;
> +        goto cleanup;
> +    }
> +
> +    if (qemudMonitorCommand(vm, command, &info) < 0) {
> +        qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
> +                         "%s", _("migrate operation failed"));
> +        goto cleanup;
> +    }
> +
> +    DEBUG ("%s: migrate reply: %s", vm->def->name, info);
> +
> +    /* If the command isn't supported then qemu prints:
> +     * unknown command: migrate" */
> +    if (strstr(info, "unknown command:")) {
> +        qemudReportError (dom->conn, dom, NULL, VIR_ERR_NO_SUPPORT,
> +                          "%s",
> +                          _("'migrate' not supported by this qemu"));
> +        goto cleanup;
> +    }
> +
> +    /* Migrate always stops the VM.  However, since the monitor is always
> +       attached to a pty for libvirt, it will support synchronous
> +       operations so we get here just after the end of the migration.  */
> +    if (vm->state == VIR_DOMAIN_RUNNING) {
> +        if (qemudMonitorCommand(vm, "cont", &info) < 0) {
> +            qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
> +                             "%s", _("continue operation failed"));
> +            goto cleanup;
> +        }
> +        DEBUG("Reply %s", info);
> +        VIR_FREE(info);
> +    }
> +    ret = 0;
> +
> +cleanup:
> +    VIR_FREE(safe_path);
> +    VIR_FREE(command);
> +    VIR_FREE(info);
> +    if (vm)
> +        virDomainObjUnlock(vm);
> +    qemuDriverUnlock(driver);
> +    return ret;
> +}
> +
> +
>  static int qemudDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus) {
>      struct qemud_driver *driver = dom->conn->privateData;
>      virDomainObjPtr vm;
> @@ -5310,7 +5393,7 @@ static virDriver qemuDriver = {
>      qemudDomainGetInfo, /* domainGetInfo */
>      qemudDomainSave, /* domainSave */
>      qemudDomainRestore, /* domainRestore */
> -    NULL, /* domainCoreDump */
> +    qemudDomainCoreDump, /* domainCoreDump */
>      qemudDomainSetVcpus, /* domainSetVcpus */
>  #if HAVE_SCHED_GETAFFINITY
>      qemudDomainPinVcpu, /* domainPinVcpu */

ACK, only a minor locking optimization needed


Daniel
-- 
|: Red Hat, Engineering, London   -o-   http://people.redhat.com/berrange/ :|
|: http://libvirt.org  -o-  http://virt-manager.org  -o-  http://ovirt.org :|
|: http://autobuild.org       -o-         http://search.cpan.org/~danberr/ :|
|: GnuPG: 7D3B9505  -o-  F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :|


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]