[libvirt] [PATCH 8/8] qemu: remove qemuDomainObjBegin/EndJobWithAgent()

Jonathon Jongsma jjongsma at redhat.com
Thu Dec 5 16:08:57 UTC 2019


This function potentially grabs both a monitor job and an agent job at
the same time. This is problematic because it means that a malicious (or
just buggy) guest agent can cause a denial of service on the host. The
presence of this function makes it easy to do the wrong thing and hold
both jobs at the same time. All existing uses have already been removed
by previous commits.

Signed-off-by: Jonathon Jongsma <jjongsma at redhat.com>
---
 src/qemu/THREADS.txt   | 58 +++++-------------------------------------
 src/qemu/qemu_domain.c | 56 ++++------------------------------------
 src/qemu/qemu_domain.h |  7 -----
 3 files changed, 11 insertions(+), 110 deletions(-)

diff --git a/src/qemu/THREADS.txt b/src/qemu/THREADS.txt
index aa428fda6a..a7d8709a43 100644
--- a/src/qemu/THREADS.txt
+++ b/src/qemu/THREADS.txt
@@ -61,11 +61,12 @@ There are a number of locks on various objects
 
     Agent job condition is then used when thread wishes to talk to qemu
     agent monitor. It is possible to acquire just agent job
-    (qemuDomainObjBeginAgentJob), or only normal job
-    (qemuDomainObjBeginJob) or both at the same time
-    (qemuDomainObjBeginJobWithAgent). Which type of job to grab depends
-    whether caller wishes to communicate only with agent socket, or only
-    with qemu monitor socket or both, respectively.
+    (qemuDomainObjBeginAgentJob), or only normal job (qemuDomainObjBeginJob)
+    but not both at the same time. Holding an agent job and a normal job would
+    allow an unresponsive or malicious agent to block normal libvirt API and
+    potentially result in a denial of service. Which type of job to grab
+    depends whether caller wishes to communicate only with agent socket, or
+    only with qemu monitor socket.
 
     Immediately after acquiring the virDomainObjPtr lock, any method
     which intends to update state must acquire asynchronous, normal or
@@ -141,18 +142,6 @@ To acquire the agent job condition
 
 
 
-To acquire both normal and agent job condition
-
-  qemuDomainObjBeginJobWithAgent()
-    - Waits until there is no normal and no agent job set
-    - Sets both job.active and job.agentActive with required job types
-
-  qemuDomainObjEndJobWithAgent()
-    - Sets both job.active and job.agentActive to 0
-    - Signals on job.cond condition
-
-
-
 To acquire the asynchronous job condition
 
   qemuDomainObjBeginAsyncJob()
@@ -292,41 +281,6 @@ Design patterns
      virDomainObjEndAPI(&obj);
 
 
- * Invoking both monitor and agent commands on a virDomainObjPtr
-
-     virDomainObjPtr obj;
-     qemuAgentPtr agent;
-
-     obj = qemuDomObjFromDomain(dom);
-
-     qemuDomainObjBeginJobWithAgent(obj, QEMU_JOB_TYPE, QEMU_AGENT_JOB_TYPE);
-
-     if (!virDomainObjIsActive(dom))
-         goto cleanup;
-
-     ...do prep work...
-
-     if (!qemuDomainAgentAvailable(obj, true))
-         goto cleanup;
-
-     agent = qemuDomainObjEnterAgent(obj);
-     qemuAgentXXXX(agent, ..);
-     qemuDomainObjExitAgent(obj, agent);
-
-     ...
-
-     qemuDomainObjEnterMonitor(obj);
-     qemuMonitorXXXX(priv->mon);
-     qemuDomainObjExitMonitor(obj);
-
-     /* Alternatively, talk to the monitor first and then talk to the agent. */
-
-     ...do final work...
-
-     qemuDomainObjEndJobWithAgent(obj);
-     virDomainObjEndAPI(&obj);
-
-
  * Running asynchronous job
 
      virDomainObjPtr obj;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 470d342afc..97cf6b2255 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -8569,26 +8569,6 @@ qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
                                          QEMU_ASYNC_JOB_NONE, false);
 }
 
-/**
- * qemuDomainObjBeginJobWithAgent:
- *
- * Grabs both monitor and agent types of job. Use if caller talks to
- * both monitor and guest agent. However, if @job (or @agentJob) is
- * QEMU_JOB_NONE (or QEMU_AGENT_JOB_NONE) only agent job is acquired (or
- * monitor job).
- *
- * To end job call qemuDomainObjEndJobWithAgent.
- */
-int
-qemuDomainObjBeginJobWithAgent(virQEMUDriverPtr driver,
-                               virDomainObjPtr obj,
-                               qemuDomainJob job,
-                               qemuDomainAgentJob agentJob)
-{
-    return qemuDomainObjBeginJobInternal(driver, obj, job, agentJob,
-                                         QEMU_ASYNC_JOB_NONE, false);
-}
-
 int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
                                virDomainObjPtr obj,
                                qemuDomainAsyncJob asyncJob,
@@ -8703,31 +8683,6 @@ qemuDomainObjEndAgentJob(virDomainObjPtr obj)
     virCondBroadcast(&priv->job.cond);
 }
 
-void
-qemuDomainObjEndJobWithAgent(virQEMUDriverPtr driver,
-                             virDomainObjPtr obj)
-{
-    qemuDomainObjPrivatePtr priv = obj->privateData;
-    qemuDomainJob job = priv->job.active;
-    qemuDomainAgentJob agentJob = priv->job.agentActive;
-
-    priv->jobs_queued--;
-
-    VIR_DEBUG("Stopping both jobs: %s %s (async=%s vm=%p name=%s)",
-              qemuDomainJobTypeToString(job),
-              qemuDomainAgentJobTypeToString(agentJob),
-              qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
-              obj, obj->def->name);
-
-    qemuDomainObjResetJob(priv);
-    qemuDomainObjResetAgentJob(priv);
-    if (qemuDomainTrackJob(job))
-        qemuDomainObjSaveStatus(driver, obj);
-    /* We indeed need to wake up ALL threads waiting because
-     * grabbing a job requires checking more variables. */
-    virCondBroadcast(&priv->job.cond);
-}
-
 void
 qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
 {
@@ -8761,9 +8716,9 @@ qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
  * obj must be locked before calling
  *
  * To be called immediately before any QEMU monitor API call
- * Must have already either called qemuDomainObjBeginJob() or
- * qemuDomainObjBeginJobWithAgent() and checked that the VM is
- * still active; may not be used for nested async jobs.
+ * Must have already called qemuDomainObjBeginJob() and checked
+ * that the VM is still active; may not be used for nested async
+ * jobs.
  *
  * To be followed with qemuDomainObjExitMonitor() once complete
  */
@@ -8885,9 +8840,8 @@ qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
  * obj must be locked before calling
  *
  * To be called immediately before any QEMU agent API call.
- * Must have already called qemuDomainObjBeginAgentJob() or
- * qemuDomainObjBeginJobWithAgent() and checked that the VM is
- * still active.
+ * Must have already called qemuDomainObjBeginAgentJob() and
+ * checked that the VM is still active.
  *
  * To be followed with qemuDomainObjExitAgent() once complete
  */
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index f626d3a54c..fb4c9e0467 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -622,11 +622,6 @@ int qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
                                virDomainObjPtr obj,
                                qemuDomainAgentJob agentJob)
     G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginJobWithAgent(virQEMUDriverPtr driver,
-                                   virDomainObjPtr obj,
-                                   qemuDomainJob job,
-                                   qemuDomainAgentJob agentJob)
-    G_GNUC_WARN_UNUSED_RESULT;
 int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
                                virDomainObjPtr obj,
                                qemuDomainAsyncJob asyncJob,
@@ -645,8 +640,6 @@ int qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver,
 void qemuDomainObjEndJob(virQEMUDriverPtr driver,
                          virDomainObjPtr obj);
 void qemuDomainObjEndAgentJob(virDomainObjPtr obj);
-void qemuDomainObjEndJobWithAgent(virQEMUDriverPtr driver,
-                                  virDomainObjPtr obj);
 void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
                               virDomainObjPtr obj);
 void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
-- 
2.21.0




More information about the libvir-list mailing list