|
|
9ae3a8 |
From c66cd34696f1f7f04b367a5f8b4d79802cddafb4 Mon Sep 17 00:00:00 2001
|
|
|
9ae3a8 |
From: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
|
|
|
9ae3a8 |
Date: Thu, 8 May 2014 10:58:42 +0200
|
|
|
9ae3a8 |
Subject: [PATCH 07/31] XBZRLE: Fix one XBZRLE corruption issues
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
RH-Author: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
|
|
|
9ae3a8 |
Message-id: <1399546722-6350-5-git-send-email-dgilbert@redhat.com>
|
|
|
9ae3a8 |
Patchwork-id: 58744
|
|
|
9ae3a8 |
O-Subject: [RHEL7.1/RHEL7.0.z qemu-kvm PATCH 4/4] XBZRLE: Fix one XBZRLE corruption issues
|
|
|
9ae3a8 |
Bugzilla: 1066338
|
|
|
9ae3a8 |
RH-Acked-by: Juan Quintela <quintela@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Amit Shah <amit.shah@redhat.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
From: ChenLiang <chenliang88@huawei.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
The page may not be inserted into cache after executing save_xbzrle_page.
|
|
|
9ae3a8 |
In case of failure to insert, the original page should be sent rather
|
|
|
9ae3a8 |
than the page in the cache.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: ChenLiang <chenliang88@huawei.com>
|
|
|
9ae3a8 |
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
|
|
|
9ae3a8 |
Reviewed-by: Juan Quintela <quintela@redhat.com>
|
|
|
9ae3a8 |
Signed-off-by: Juan Quintela <quintela@redhat.com>
|
|
|
9ae3a8 |
(cherry picked from commit 1534ee93cc6be992c05577886b24bd44c37ecff6)
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
arch_init.c | 25 +++++++++++++------------
|
|
|
9ae3a8 |
1 file changed, 13 insertions(+), 12 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
arch_init.c | 25 +++++++++++++------------
|
|
|
9ae3a8 |
1 files changed, 13 insertions(+), 12 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
diff --git a/arch_init.c b/arch_init.c
|
|
|
9ae3a8 |
index 80e48f2..22f7def 100644
|
|
|
9ae3a8 |
--- a/arch_init.c
|
|
|
9ae3a8 |
+++ b/arch_init.c
|
|
|
9ae3a8 |
@@ -341,7 +341,7 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
#define ENCODING_FLAG_XBZRLE 0x1
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
-static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
|
|
|
9ae3a8 |
+static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
|
|
|
9ae3a8 |
ram_addr_t current_addr, RAMBlock *block,
|
|
|
9ae3a8 |
ram_addr_t offset, int cont, bool last_stage)
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
@@ -349,19 +349,23 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
|
|
|
9ae3a8 |
uint8_t *prev_cached_page;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (!cache_is_cached(XBZRLE.cache, current_addr)) {
|
|
|
9ae3a8 |
+ acct_info.xbzrle_cache_miss++;
|
|
|
9ae3a8 |
if (!last_stage) {
|
|
|
9ae3a8 |
- if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
|
|
|
9ae3a8 |
+ if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
|
|
|
9ae3a8 |
return -1;
|
|
|
9ae3a8 |
+ } else {
|
|
|
9ae3a8 |
+ /* update *current_data when the page has been
|
|
|
9ae3a8 |
+ inserted into cache */
|
|
|
9ae3a8 |
+ *current_data = get_cached_data(XBZRLE.cache, current_addr);
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- acct_info.xbzrle_cache_miss++;
|
|
|
9ae3a8 |
return -1;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
/* save current buffer into memory */
|
|
|
9ae3a8 |
- memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
+ memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
/* XBZRLE encoding (if there is no overflow) */
|
|
|
9ae3a8 |
encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
|
|
|
9ae3a8 |
@@ -374,7 +378,10 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
|
|
|
9ae3a8 |
DPRINTF("Overflow\n");
|
|
|
9ae3a8 |
acct_info.xbzrle_overflows++;
|
|
|
9ae3a8 |
/* update data in the cache */
|
|
|
9ae3a8 |
- memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
+ if (!last_stage) {
|
|
|
9ae3a8 |
+ memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
|
|
|
9ae3a8 |
+ *current_data = prev_cached_page;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
return -1;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
@@ -599,15 +606,9 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
|
|
9ae3a8 |
*/
|
|
|
9ae3a8 |
xbzrle_cache_zero_page(current_addr);
|
|
|
9ae3a8 |
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
|
|
|
9ae3a8 |
- bytes_sent = save_xbzrle_page(f, p, current_addr, block,
|
|
|
9ae3a8 |
+ bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
|
|
|
9ae3a8 |
offset, cont, last_stage);
|
|
|
9ae3a8 |
if (!last_stage) {
|
|
|
9ae3a8 |
- /* We must send exactly what's in the xbzrle cache
|
|
|
9ae3a8 |
- * even if the page wasn't xbzrle compressed, so that
|
|
|
9ae3a8 |
- * it's right next time.
|
|
|
9ae3a8 |
- */
|
|
|
9ae3a8 |
- p = get_cached_data(XBZRLE.cache, current_addr);
|
|
|
9ae3a8 |
-
|
|
|
9ae3a8 |
/* Can't send this cached data async, since the cache page
|
|
|
9ae3a8 |
* might get updated before it gets to the wire
|
|
|
9ae3a8 |
*/
|
|
|
9ae3a8 |
--
|
|
|
9ae3a8 |
1.7.1
|
|
|
9ae3a8 |
|