|
|
ae23c9 |
From 7c16384f8ce4d46d6baa11db376c488ed8478744 Mon Sep 17 00:00:00 2001
|
|
|
ae23c9 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Date: Wed, 10 Oct 2018 20:22:06 +0100
|
|
|
ae23c9 |
Subject: [PATCH 40/49] blockjob: Lie better in child_job_drained_poll()
|
|
|
ae23c9 |
|
|
|
ae23c9 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Message-id: <20181010202213.7372-28-kwolf@redhat.com>
|
|
|
ae23c9 |
Patchwork-id: 82616
|
|
|
ae23c9 |
O-Subject: [RHEL-8 qemu-kvm PATCH 37/44] blockjob: Lie better in child_job_drained_poll()
|
|
|
ae23c9 |
Bugzilla: 1637976
|
|
|
ae23c9 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: Thomas Huth <thuth@redhat.com>
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Block jobs claim in .drained_poll() that they are in a quiescent state
|
|
|
ae23c9 |
as soon as job->deferred_to_main_loop is true. This is obviously wrong,
|
|
|
ae23c9 |
they still have a completion BH to run. We only get away with this
|
|
|
ae23c9 |
because commit 91af091f923 added an unconditional aio_poll(false) to the
|
|
|
ae23c9 |
drain functions, but this is bypassing the regular drain mechanisms.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
However, just removing this and telling that the job is still active
|
|
|
ae23c9 |
doesn't work either: The completion callbacks themselves call drain
|
|
|
ae23c9 |
functions (directly, or indirectly with bdrv_reopen), so they would
|
|
|
ae23c9 |
deadlock then.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
As a better lie, tell that the job is active as long as the BH is
|
|
|
ae23c9 |
pending, but falsely call it quiescent from the point in the BH when the
|
|
|
ae23c9 |
completion callback is called. At this point, nested drain calls won't
|
|
|
ae23c9 |
deadlock because they ignore the job, and outer drains will wait for the
|
|
|
ae23c9 |
job to really reach a quiescent state because the callback is already
|
|
|
ae23c9 |
running.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Reviewed-by: Max Reitz <mreitz@redhat.com>
|
|
|
ae23c9 |
(cherry picked from commit b5a7a0573530698ee448b063ac01d485e30446bd)
|
|
|
ae23c9 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
ae23c9 |
---
|
|
|
ae23c9 |
blockjob.c | 2 +-
|
|
|
ae23c9 |
include/qemu/job.h | 3 +++
|
|
|
ae23c9 |
job.c | 11 ++++++++++-
|
|
|
ae23c9 |
3 files changed, 14 insertions(+), 2 deletions(-)
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/blockjob.c b/blockjob.c
|
|
|
ae23c9 |
index 8d27e8e..617d86f 100644
|
|
|
ae23c9 |
--- a/blockjob.c
|
|
|
ae23c9 |
+++ b/blockjob.c
|
|
|
ae23c9 |
@@ -164,7 +164,7 @@ static bool child_job_drained_poll(BdrvChild *c)
|
|
|
ae23c9 |
/* An inactive or completed job doesn't have any pending requests. Jobs
|
|
|
ae23c9 |
* with !job->busy are either already paused or have a pause point after
|
|
|
ae23c9 |
* being reentered, so no job driver code will run before they pause. */
|
|
|
ae23c9 |
- if (!job->busy || job_is_completed(job) || job->deferred_to_main_loop) {
|
|
|
ae23c9 |
+ if (!job->busy || job_is_completed(job)) {
|
|
|
ae23c9 |
return false;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/include/qemu/job.h b/include/qemu/job.h
|
|
|
ae23c9 |
index 35ac7a9..d1710f3 100644
|
|
|
ae23c9 |
--- a/include/qemu/job.h
|
|
|
ae23c9 |
+++ b/include/qemu/job.h
|
|
|
ae23c9 |
@@ -76,6 +76,9 @@ typedef struct Job {
|
|
|
ae23c9 |
* Set to false by the job while the coroutine has yielded and may be
|
|
|
ae23c9 |
* re-entered by job_enter(). There may still be I/O or event loop activity
|
|
|
ae23c9 |
* pending. Accessed under block_job_mutex (in blockjob.c).
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * When the job is deferred to the main loop, busy is true as long as the
|
|
|
ae23c9 |
+ * bottom half is still pending.
|
|
|
ae23c9 |
*/
|
|
|
ae23c9 |
bool busy;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/job.c b/job.c
|
|
|
ae23c9 |
index 47b5a11..42af9e2 100644
|
|
|
ae23c9 |
--- a/job.c
|
|
|
ae23c9 |
+++ b/job.c
|
|
|
ae23c9 |
@@ -852,7 +852,16 @@ static void job_exit(void *opaque)
|
|
|
ae23c9 |
AioContext *ctx = job->aio_context;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
aio_context_acquire(ctx);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* This is a lie, we're not quiescent, but still doing the completion
|
|
|
ae23c9 |
+ * callbacks. However, completion callbacks tend to involve operations that
|
|
|
ae23c9 |
+ * drain block nodes, and if .drained_poll still returned true, we would
|
|
|
ae23c9 |
+ * deadlock. */
|
|
|
ae23c9 |
+ job->busy = false;
|
|
|
ae23c9 |
+ job_event_idle(job);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
job_completed(job);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
aio_context_release(ctx);
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
@@ -867,8 +876,8 @@ static void coroutine_fn job_co_entry(void *opaque)
|
|
|
ae23c9 |
assert(job && job->driver && job->driver->run);
|
|
|
ae23c9 |
job_pause_point(job);
|
|
|
ae23c9 |
job->ret = job->driver->run(job, &job->err);
|
|
|
ae23c9 |
- job_event_idle(job);
|
|
|
ae23c9 |
job->deferred_to_main_loop = true;
|
|
|
ae23c9 |
+ job->busy = true;
|
|
|
ae23c9 |
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
--
|
|
|
ae23c9 |
1.8.3.1
|
|
|
ae23c9 |
|