|
|
9ae3a8 |
From 600b6421fd97881aad2471ea0a6f465a2d55e9d6 Mon Sep 17 00:00:00 2001
|
|
|
9ae3a8 |
From: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
|
|
|
9ae3a8 |
Date: Thu, 8 May 2014 10:58:39 +0200
|
|
|
9ae3a8 |
Subject: [PATCH 04/31] XBZRLE: Fix qemu crash when resize the xbzrle cache
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
RH-Author: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
|
|
|
9ae3a8 |
Message-id: <1399546722-6350-2-git-send-email-dgilbert@redhat.com>
|
|
|
9ae3a8 |
Patchwork-id: 58741
|
|
|
9ae3a8 |
O-Subject: [RHEL7.1/RHEL7.0.z qemu-kvm PATCH 1/4] XBZRLE: Fix qemu crash when resize the xbzrle cache
|
|
|
9ae3a8 |
Bugzilla: 1066338
|
|
|
9ae3a8 |
RH-Acked-by: Juan Quintela <quintela@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Amit Shah <amit.shah@redhat.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
From: Gonglei <arei.gonglei@huawei.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Resizing the xbzrle cache during migration causes qemu-crash,
|
|
|
9ae3a8 |
because the main-thread and migration-thread modify the xbzrle
|
|
|
9ae3a8 |
cache size concurrently without lock-protection.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: ChenLiang <chenliang88@huawei.com>
|
|
|
9ae3a8 |
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
|
|
|
9ae3a8 |
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
9ae3a8 |
Signed-off-by: Juan Quintela <quintela@redhat.com>
|
|
|
9ae3a8 |
(cherry picked from commit fd8cec932c2ddc687e2da954978954b46a926f90)
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
arch_init.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++---
|
|
|
9ae3a8 |
1 file changed, 49 insertions(+), 3 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
arch_init.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++---
|
|
|
9ae3a8 |
1 files changed, 49 insertions(+), 3 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
diff --git a/arch_init.c b/arch_init.c
|
|
|
9ae3a8 |
index f5d521a..8641afa 100644
|
|
|
9ae3a8 |
--- a/arch_init.c
|
|
|
9ae3a8 |
+++ b/arch_init.c
|
|
|
9ae3a8 |
@@ -164,8 +164,9 @@ static struct {
|
|
|
9ae3a8 |
uint8_t *encoded_buf;
|
|
|
9ae3a8 |
/* buffer for storing page content */
|
|
|
9ae3a8 |
uint8_t *current_buf;
|
|
|
9ae3a8 |
- /* Cache for XBZRLE */
|
|
|
9ae3a8 |
+ /* Cache for XBZRLE, Protected by lock. */
|
|
|
9ae3a8 |
PageCache *cache;
|
|
|
9ae3a8 |
+ QemuMutex lock;
|
|
|
9ae3a8 |
} XBZRLE = {
|
|
|
9ae3a8 |
.encoded_buf = NULL,
|
|
|
9ae3a8 |
.current_buf = NULL,
|
|
|
9ae3a8 |
@@ -174,16 +175,52 @@ static struct {
|
|
|
9ae3a8 |
/* buffer used for XBZRLE decoding */
|
|
|
9ae3a8 |
static uint8_t *xbzrle_decoded_buf;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+static void XBZRLE_cache_lock(void)
|
|
|
9ae3a8 |
+{
|
|
|
9ae3a8 |
+ if (migrate_use_xbzrle())
|
|
|
9ae3a8 |
+ qemu_mutex_lock(&XBZRLE.lock);
|
|
|
9ae3a8 |
+}
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
+static void XBZRLE_cache_unlock(void)
|
|
|
9ae3a8 |
+{
|
|
|
9ae3a8 |
+ if (migrate_use_xbzrle())
|
|
|
9ae3a8 |
+ qemu_mutex_unlock(&XBZRLE.lock);
|
|
|
9ae3a8 |
+}
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
int64_t xbzrle_cache_resize(int64_t new_size)
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
+ PageCache *new_cache, *cache_to_free;
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
if (new_size < TARGET_PAGE_SIZE) {
|
|
|
9ae3a8 |
return -1;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ /* no need to lock, the current thread holds qemu big lock */
|
|
|
9ae3a8 |
if (XBZRLE.cache != NULL) {
|
|
|
9ae3a8 |
- return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
|
|
|
9ae3a8 |
- TARGET_PAGE_SIZE;
|
|
|
9ae3a8 |
+ /* check XBZRLE.cache again later */
|
|
|
9ae3a8 |
+ if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
|
|
|
9ae3a8 |
+ return pow2floor(new_size);
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
|
|
|
9ae3a8 |
+ TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
+ if (!new_cache) {
|
|
|
9ae3a8 |
+ DPRINTF("Error creating cache\n");
|
|
|
9ae3a8 |
+ return -1;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
+ XBZRLE_cache_lock();
|
|
|
9ae3a8 |
+ /* the XBZRLE.cache may have be destroyed, check it again */
|
|
|
9ae3a8 |
+ if (XBZRLE.cache != NULL) {
|
|
|
9ae3a8 |
+ cache_to_free = XBZRLE.cache;
|
|
|
9ae3a8 |
+ XBZRLE.cache = new_cache;
|
|
|
9ae3a8 |
+ } else {
|
|
|
9ae3a8 |
+ cache_to_free = new_cache;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ XBZRLE_cache_unlock();
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
+ cache_fini(cache_to_free);
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
return pow2floor(new_size);
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
@@ -539,6 +576,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
|
|
9ae3a8 |
ret = ram_control_save_page(f, block->offset,
|
|
|
9ae3a8 |
offset, TARGET_PAGE_SIZE, &bytes_sent);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ XBZRLE_cache_lock();
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
current_addr = block->offset + offset;
|
|
|
9ae3a8 |
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
|
|
|
9ae3a8 |
if (ret != RAM_SAVE_CONTROL_DELAYED) {
|
|
|
9ae3a8 |
@@ -587,6 +626,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
|
|
9ae3a8 |
acct_info.norm_pages++;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ XBZRLE_cache_unlock();
|
|
|
9ae3a8 |
/* if page is unmodified, continue to the next */
|
|
|
9ae3a8 |
if (bytes_sent > 0) {
|
|
|
9ae3a8 |
last_sent_block = block;
|
|
|
9ae3a8 |
@@ -654,6 +694,7 @@ static void migration_end(void)
|
|
|
9ae3a8 |
migration_bitmap = NULL;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ XBZRLE_cache_lock();
|
|
|
9ae3a8 |
if (XBZRLE.cache) {
|
|
|
9ae3a8 |
cache_fini(XBZRLE.cache);
|
|
|
9ae3a8 |
g_free(XBZRLE.cache);
|
|
|
9ae3a8 |
@@ -663,6 +704,7 @@ static void migration_end(void)
|
|
|
9ae3a8 |
XBZRLE.encoded_buf = NULL;
|
|
|
9ae3a8 |
XBZRLE.current_buf = NULL;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+ XBZRLE_cache_unlock();
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
static void ram_migration_cancel(void *opaque)
|
|
|
9ae3a8 |
@@ -693,13 +735,17 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|
|
9ae3a8 |
dirty_rate_high_cnt = 0;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (migrate_use_xbzrle()) {
|
|
|
9ae3a8 |
+ qemu_mutex_lock_iothread();
|
|
|
9ae3a8 |
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
|
|
|
9ae3a8 |
TARGET_PAGE_SIZE,
|
|
|
9ae3a8 |
TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
if (!XBZRLE.cache) {
|
|
|
9ae3a8 |
+ qemu_mutex_unlock_iothread();
|
|
|
9ae3a8 |
DPRINTF("Error creating cache\n");
|
|
|
9ae3a8 |
return -1;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+ qemu_mutex_init(&XBZRLE.lock);
|
|
|
9ae3a8 |
+ qemu_mutex_unlock_iothread();
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
/* We prefer not to abort if there is no memory */
|
|
|
9ae3a8 |
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
--
|
|
|
9ae3a8 |
1.7.1
|
|
|
9ae3a8 |
|