|
|
22c213 |
From 69c6a829f8136a8c95ccdf480f2fd0173d64b6ec Mon Sep 17 00:00:00 2001
|
|
|
22c213 |
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
|
|
|
22c213 |
Date: Mon, 27 Jan 2020 19:02:05 +0100
|
|
|
22c213 |
Subject: [PATCH 094/116] virtiofsd: prevent fv_queue_thread() vs virtio_loop()
|
|
|
22c213 |
races
|
|
|
22c213 |
MIME-Version: 1.0
|
|
|
22c213 |
Content-Type: text/plain; charset=UTF-8
|
|
|
22c213 |
Content-Transfer-Encoding: 8bit
|
|
|
22c213 |
|
|
|
22c213 |
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
22c213 |
Message-id: <20200127190227.40942-91-dgilbert@redhat.com>
|
|
|
22c213 |
Patchwork-id: 93544
|
|
|
22c213 |
O-Subject: [RHEL-AV-8.2 qemu-kvm PATCH 090/112] virtiofsd: prevent fv_queue_thread() vs virtio_loop() races
|
|
|
22c213 |
Bugzilla: 1694164
|
|
|
22c213 |
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
|
|
|
22c213 |
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
22c213 |
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
|
|
|
22c213 |
|
|
|
22c213 |
From: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
22c213 |
|
|
|
22c213 |
We call into libvhost-user from the virtqueue handler thread and the
|
|
|
22c213 |
vhost-user message processing thread without a lock. There is nothing
|
|
|
22c213 |
protecting the virtqueue handler thread if the vhost-user message
|
|
|
22c213 |
processing thread changes the virtqueue or memory table while it is
|
|
|
22c213 |
running.
|
|
|
22c213 |
|
|
|
22c213 |
This patch introduces a read-write lock. Virtqueue handler threads are
|
|
|
22c213 |
readers. The vhost-user message processing thread is a writer. This
|
|
|
22c213 |
will allow concurrency for multiqueue in the future while protecting
|
|
|
22c213 |
against fv_queue_thread() vs virtio_loop() races.
|
|
|
22c213 |
|
|
|
22c213 |
Note that the critical sections could be made smaller but it would be
|
|
|
22c213 |
more invasive and require libvhost-user changes. Let's start simple and
|
|
|
22c213 |
improve performance later, if necessary. Another option would be an
|
|
|
22c213 |
RCU-style approach with lighter-weight primitives.
|
|
|
22c213 |
|
|
|
22c213 |
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
22c213 |
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
|
|
22c213 |
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
22c213 |
(cherry picked from commit e7b337326d594b71b07cd6dbb332c49c122c80a4)
|
|
|
22c213 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
22c213 |
---
|
|
|
22c213 |
tools/virtiofsd/fuse_virtio.c | 34 +++++++++++++++++++++++++++++++++-
|
|
|
22c213 |
1 file changed, 33 insertions(+), 1 deletion(-)
|
|
|
22c213 |
|
|
|
22c213 |
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
index fb8d6d1..f6242f9 100644
|
|
|
22c213 |
--- a/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
+++ b/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
@@ -59,6 +59,18 @@ struct fv_VuDev {
|
|
|
22c213 |
struct fuse_session *se;
|
|
|
22c213 |
|
|
|
22c213 |
/*
|
|
|
22c213 |
+ * Either handle virtqueues or vhost-user protocol messages. Don't do
|
|
|
22c213 |
+ * both at the same time since that could lead to race conditions if
|
|
|
22c213 |
+ * virtqueues or memory tables change while another thread is accessing
|
|
|
22c213 |
+ * them.
|
|
|
22c213 |
+ *
|
|
|
22c213 |
+ * The assumptions are:
|
|
|
22c213 |
+ * 1. fv_queue_thread() reads/writes to virtqueues and only reads VuDev.
|
|
|
22c213 |
+ * 2. virtio_loop() reads/writes virtqueues and VuDev.
|
|
|
22c213 |
+ */
|
|
|
22c213 |
+ pthread_rwlock_t vu_dispatch_rwlock;
|
|
|
22c213 |
+
|
|
|
22c213 |
+ /*
|
|
|
22c213 |
* The following pair of fields are only accessed in the main
|
|
|
22c213 |
* virtio_loop
|
|
|
22c213 |
*/
|
|
|
22c213 |
@@ -415,6 +427,8 @@ static void *fv_queue_thread(void *opaque)
|
|
|
22c213 |
qi->qidx, qi->kick_fd);
|
|
|
22c213 |
while (1) {
|
|
|
22c213 |
struct pollfd pf[2];
|
|
|
22c213 |
+ int ret;
|
|
|
22c213 |
+
|
|
|
22c213 |
pf[0].fd = qi->kick_fd;
|
|
|
22c213 |
pf[0].events = POLLIN;
|
|
|
22c213 |
pf[0].revents = 0;
|
|
|
22c213 |
@@ -461,6 +475,9 @@ static void *fv_queue_thread(void *opaque)
|
|
|
22c213 |
fuse_log(FUSE_LOG_ERR, "Eventfd_read for queue: %m\n");
|
|
|
22c213 |
break;
|
|
|
22c213 |
}
|
|
|
22c213 |
+ /* Mutual exclusion with virtio_loop() */
|
|
|
22c213 |
+ ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
|
|
|
22c213 |
+ assert(ret == 0); /* there is no possible error case */
|
|
|
22c213 |
/* out is from guest, in is too guest */
|
|
|
22c213 |
unsigned int in_bytes, out_bytes;
|
|
|
22c213 |
vu_queue_get_avail_bytes(dev, q, &in_bytes, &out_bytes, ~0, ~0);
|
|
|
22c213 |
@@ -469,6 +486,7 @@ static void *fv_queue_thread(void *opaque)
|
|
|
22c213 |
"%s: Queue %d gave evalue: %zx available: in: %u out: %u\n",
|
|
|
22c213 |
__func__, qi->qidx, (size_t)evalue, in_bytes, out_bytes);
|
|
|
22c213 |
|
|
|
22c213 |
+
|
|
|
22c213 |
while (1) {
|
|
|
22c213 |
bool allocated_bufv = false;
|
|
|
22c213 |
struct fuse_bufvec bufv;
|
|
|
22c213 |
@@ -597,6 +615,8 @@ static void *fv_queue_thread(void *opaque)
|
|
|
22c213 |
free(elem);
|
|
|
22c213 |
elem = NULL;
|
|
|
22c213 |
}
|
|
|
22c213 |
+
|
|
|
22c213 |
+ pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
|
|
|
22c213 |
}
|
|
|
22c213 |
out:
|
|
|
22c213 |
pthread_mutex_destroy(&ch.lock);
|
|
|
22c213 |
@@ -711,6 +731,8 @@ int virtio_loop(struct fuse_session *se)
|
|
|
22c213 |
|
|
|
22c213 |
while (!fuse_session_exited(se)) {
|
|
|
22c213 |
struct pollfd pf[1];
|
|
|
22c213 |
+ bool ok;
|
|
|
22c213 |
+ int ret;
|
|
|
22c213 |
pf[0].fd = se->vu_socketfd;
|
|
|
22c213 |
pf[0].events = POLLIN;
|
|
|
22c213 |
pf[0].revents = 0;
|
|
|
22c213 |
@@ -735,7 +757,15 @@ int virtio_loop(struct fuse_session *se)
|
|
|
22c213 |
}
|
|
|
22c213 |
assert(pf[0].revents & POLLIN);
|
|
|
22c213 |
fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
|
|
|
22c213 |
- if (!vu_dispatch(&se->virtio_dev->dev)) {
|
|
|
22c213 |
+ /* Mutual exclusion with fv_queue_thread() */
|
|
|
22c213 |
+ ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
|
|
|
22c213 |
+ assert(ret == 0); /* there is no possible error case */
|
|
|
22c213 |
+
|
|
|
22c213 |
+ ok = vu_dispatch(&se->virtio_dev->dev);
|
|
|
22c213 |
+
|
|
|
22c213 |
+ pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
|
|
|
22c213 |
+
|
|
|
22c213 |
+ if (!ok) {
|
|
|
22c213 |
fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
|
|
|
22c213 |
break;
|
|
|
22c213 |
}
|
|
|
22c213 |
@@ -877,6 +907,7 @@ int virtio_session_mount(struct fuse_session *se)
|
|
|
22c213 |
|
|
|
22c213 |
se->vu_socketfd = data_sock;
|
|
|
22c213 |
se->virtio_dev->se = se;
|
|
|
22c213 |
+ pthread_rwlock_init(&se->virtio_dev->vu_dispatch_rwlock, NULL);
|
|
|
22c213 |
vu_init(&se->virtio_dev->dev, 2, se->vu_socketfd, fv_panic, fv_set_watch,
|
|
|
22c213 |
fv_remove_watch, &fv_iface);
|
|
|
22c213 |
|
|
|
22c213 |
@@ -892,6 +923,7 @@ void virtio_session_close(struct fuse_session *se)
|
|
|
22c213 |
}
|
|
|
22c213 |
|
|
|
22c213 |
free(se->virtio_dev->qi);
|
|
|
22c213 |
+ pthread_rwlock_destroy(&se->virtio_dev->vu_dispatch_rwlock);
|
|
|
22c213 |
free(se->virtio_dev);
|
|
|
22c213 |
se->virtio_dev = NULL;
|
|
|
22c213 |
}
|
|
|
22c213 |
--
|
|
|
22c213 |
1.8.3.1
|
|
|
22c213 |
|