|
|
ae23c9 |
From 4f12612c2c25fb3093d1afa45030e424477344c7 Mon Sep 17 00:00:00 2001
|
|
|
ae23c9 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Date: Wed, 10 Oct 2018 20:08:40 +0100
|
|
|
ae23c9 |
Subject: [PATCH 09/49] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
ae23c9 |
|
|
|
ae23c9 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Message-id: <20181010200843.6710-7-kwolf@redhat.com>
|
|
|
ae23c9 |
Patchwork-id: 82586
|
|
|
ae23c9 |
O-Subject: [RHEL-8 qemu-kvm PATCH 06/44] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
ae23c9 |
Bugzilla: 1637976
|
|
|
ae23c9 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: Thomas Huth <thuth@redhat.com>
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Commit 91af091f923 added an additional aio_poll() to BDRV_POLL_WHILE()
|
|
|
ae23c9 |
in order to make sure that all pending BHs are executed on drain. This
|
|
|
ae23c9 |
was the wrong place to make the fix, as it is useless overhead for all
|
|
|
ae23c9 |
other users of the macro and unnecessarily complicates the mechanism.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This patch effectively reverts said commit (the context has changed a
|
|
|
ae23c9 |
bit and the code has moved to AIO_WAIT_WHILE()) and instead polls in the
|
|
|
ae23c9 |
loop condition for drain.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
The effect is probably hard to measure in any real-world use case
|
|
|
ae23c9 |
because actual I/O will dominate, but if I run only the initialisation
|
|
|
ae23c9 |
part of 'qemu-img convert' where it calls bdrv_block_status() for the
|
|
|
ae23c9 |
whole image to find out how much data there is copy, this phase actually
|
|
|
ae23c9 |
needs only roughly half the time after this patch.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
ae23c9 |
(cherry picked from commit 1cc8e54ada97f7ac479554e15ca9e426c895b158)
|
|
|
ae23c9 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
ae23c9 |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
ae23c9 |
---
|
|
|
ae23c9 |
block/io.c | 11 ++++++++++-
|
|
|
ae23c9 |
include/block/aio-wait.h | 22 ++++++++--------------
|
|
|
ae23c9 |
2 files changed, 18 insertions(+), 15 deletions(-)
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/block/io.c b/block/io.c
|
|
|
ae23c9 |
index e5fc42c..4d332c3 100644
|
|
|
ae23c9 |
--- a/block/io.c
|
|
|
ae23c9 |
+++ b/block/io.c
|
|
|
ae23c9 |
@@ -181,13 +181,22 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
|
|
|
ae23c9 |
BDRV_POLL_WHILE(bs, !data.done);
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
|
|
|
ae23c9 |
+static bool bdrv_drain_poll(BlockDriverState *bs)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ /* Execute pending BHs first and check everything else only after the BHs
|
|
|
ae23c9 |
+ * have executed. */
|
|
|
ae23c9 |
+ while (aio_poll(bs->aio_context, false));
|
|
|
ae23c9 |
+ return atomic_read(&bs->in_flight);
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
static bool bdrv_drain_recurse(BlockDriverState *bs)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
BdrvChild *child, *tmp;
|
|
|
ae23c9 |
bool waited;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
/* Wait for drained requests to finish */
|
|
|
ae23c9 |
- waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
|
|
|
ae23c9 |
+ waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll(bs));
|
|
|
ae23c9 |
|
|
|
ae23c9 |
QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
|
|
|
ae23c9 |
BlockDriverState *bs = child->bs;
|
|
|
ae23c9 |
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
|
|
|
ae23c9 |
index 8c90a2e..783d367 100644
|
|
|
ae23c9 |
--- a/include/block/aio-wait.h
|
|
|
ae23c9 |
+++ b/include/block/aio-wait.h
|
|
|
ae23c9 |
@@ -73,29 +73,23 @@ typedef struct {
|
|
|
ae23c9 |
*/
|
|
|
ae23c9 |
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
|
|
ae23c9 |
bool waited_ = false; \
|
|
|
ae23c9 |
- bool busy_ = true; \
|
|
|
ae23c9 |
AioWait *wait_ = (wait); \
|
|
|
ae23c9 |
AioContext *ctx_ = (ctx); \
|
|
|
ae23c9 |
if (in_aio_context_home_thread(ctx_)) { \
|
|
|
ae23c9 |
- while ((cond) || busy_) { \
|
|
|
ae23c9 |
- busy_ = aio_poll(ctx_, (cond)); \
|
|
|
ae23c9 |
- waited_ |= !!(cond) | busy_; \
|
|
|
ae23c9 |
+ while ((cond)) { \
|
|
|
ae23c9 |
+ aio_poll(ctx_, true); \
|
|
|
ae23c9 |
+ waited_ = true; \
|
|
|
ae23c9 |
} \
|
|
|
ae23c9 |
} else { \
|
|
|
ae23c9 |
assert(qemu_get_current_aio_context() == \
|
|
|
ae23c9 |
qemu_get_aio_context()); \
|
|
|
ae23c9 |
/* Increment wait_->num_waiters before evaluating cond. */ \
|
|
|
ae23c9 |
atomic_inc(&wait_->num_waiters); \
|
|
|
ae23c9 |
- while (busy_) { \
|
|
|
ae23c9 |
- if ((cond)) { \
|
|
|
ae23c9 |
- waited_ = busy_ = true; \
|
|
|
ae23c9 |
- aio_context_release(ctx_); \
|
|
|
ae23c9 |
- aio_poll(qemu_get_aio_context(), true); \
|
|
|
ae23c9 |
- aio_context_acquire(ctx_); \
|
|
|
ae23c9 |
- } else { \
|
|
|
ae23c9 |
- busy_ = aio_poll(ctx_, false); \
|
|
|
ae23c9 |
- waited_ |= busy_; \
|
|
|
ae23c9 |
- } \
|
|
|
ae23c9 |
+ while ((cond)) { \
|
|
|
ae23c9 |
+ aio_context_release(ctx_); \
|
|
|
ae23c9 |
+ aio_poll(qemu_get_aio_context(), true); \
|
|
|
ae23c9 |
+ aio_context_acquire(ctx_); \
|
|
|
ae23c9 |
+ waited_ = true; \
|
|
|
ae23c9 |
} \
|
|
|
ae23c9 |
atomic_dec(&wait_->num_waiters); \
|
|
|
ae23c9 |
} \
|
|
|
ae23c9 |
--
|
|
|
ae23c9 |
1.8.3.1
|
|
|
ae23c9 |
|