diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b895746 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +SOURCES/virtiofsd-1.1.0-vendor.tar.gz +SOURCES/virtiofsd-1.1.0.crate diff --git a/.virtiofsd.metadata b/.virtiofsd.metadata new file mode 100644 index 0000000..34f7ce1 --- /dev/null +++ b/.virtiofsd.metadata @@ -0,0 +1,2 @@ +1c55f5d419b01ec49c1cd2a5b2dfd915fd9481a2 SOURCES/virtiofsd-1.1.0-vendor.tar.gz +ea25daee7b35d007c3786cae60ac178a276a54bd SOURCES/virtiofsd-1.1.0.crate diff --git a/SOURCES/Clean-up-flags-in-opendir-downstream.patch b/SOURCES/Clean-up-flags-in-opendir-downstream.patch new file mode 100644 index 0000000..18166ee --- /dev/null +++ b/SOURCES/Clean-up-flags-in-opendir-downstream.patch @@ -0,0 +1,33 @@ +From 1bb43a5cdcb48dc9a8add0d1c94e627cd76f80f6 Mon Sep 17 00:00:00 2001 +From: Sergio Lopez +Date: Tue, 22 Mar 2022 10:22:01 +0100 +Subject: [PATCH] Clean up flags in opendir (downstream) + +Clean up O_RDWR and O_WRONLY flags in opendir to work around a bug in +the Windows virtio-fs guest driver. + +Resolves: rhbz#2057252 +Signed-off-by: Sergio Lopez +--- + src/passthrough/mod.rs | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/src/passthrough/mod.rs b/src/passthrough/mod.rs +index b2b265c..6e4b236 100644 +--- a/src/passthrough/mod.rs ++++ b/src/passthrough/mod.rs +@@ -1133,7 +1133,10 @@ impl FileSystem for PassthroughFs { + inode: Inode, + flags: u32, + ) -> io::Result<(Option, OpenOptions)> { +- self.do_open(inode, false, flags | (libc::O_DIRECTORY as u32)) ++ // Clean up O_RDWR and O_WRONLY from flags to work around a bug in the Windows ++ // virtio-fs guest driver. BZ#2057252 ++ let clean_flags: u32 = flags & !((libc::O_RDWR | libc::O_WRONLY) as u32); ++ self.do_open(inode, false, clean_flags | (libc::O_DIRECTORY as u32)) + } + + fn releasedir( +-- +2.35.1 + diff --git a/SOURCES/Set-the-number-of-written-bytes-for-used-descs.patch b/SOURCES/Set-the-number-of-written-bytes-for-used-descs.patch new file mode 100644 index 0000000..f5a9217 --- /dev/null +++ b/SOURCES/Set-the-number-of-written-bytes-for-used-descs.patch @@ -0,0 +1,88 @@ +From 80034cacde8f9c4d7cd4ae73316eeec6cc2fd67a Mon Sep 17 00:00:00 2001 +From: Sergio Lopez +Date: Thu, 10 Mar 2022 12:03:13 +0100 +Subject: [PATCH 2/3] Set the number of written bytes for used descs + +As the Linux driver ignores the "len" field of used descriptors, we +didn't bother to set it properly when return those descriptors to the +queue. + +The problem is that other implementations (at least, the Windows one), +do care about it, so let's do the right thing and set it properly. + +Resolves: rhbz#2057252 +Signed-off-by: Sergio Lopez +(cherry picked from commit 2bc7c2102d18f47b309fd5f767b5349d9e08d2b8) +Signed-off-by: Sergio Lopez +--- + src/main.rs | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +diff --git a/src/main.rs b/src/main.rs +index 5a9914f..ca183e8 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ -6,7 +6,7 @@ use futures::executor::{ThreadPool, ThreadPoolBuilder}; + use libc::EFD_NONBLOCK; + use log::*; + use passthrough::xattrmap::XattrMap; +-use std::convert::{self, TryFrom}; ++use std::convert::{self, TryFrom, TryInto}; + use std::ffi::CString; + use std::os::unix::io::{FromRawFd, RawFd}; + use std::sync::{Arc, Mutex, RwLock}; +@@ -128,8 +128,18 @@ impl VhostUserFsThread { + }) + } + +- fn return_descriptor(vring_state: &mut VringState, head_index: u16, event_idx: bool) { +- if vring_state.add_used(head_index, 0).is_err() { ++ fn return_descriptor( ++ vring_state: &mut VringState, ++ head_index: u16, ++ event_idx: bool, ++ len: usize, ++ ) { ++ let used_len: u32 = match len.try_into() { ++ Ok(l) => l, ++ Err(_) => panic!("Invalid used length, can't return used descritors to the ring"), ++ }; ++ ++ if vring_state.add_used(head_index, used_len).is_err() { + warn!("Couldn't return used descriptors to the ring"); + } + +@@ -185,12 +195,12 @@ impl VhostUserFsThread { + .map_err(Error::QueueWriter) + .unwrap(); + +- server ++ let len = server + .handle_message(reader, writer, vu_req.as_mut()) + .map_err(Error::ProcessQueue) + .unwrap(); + +- Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx); ++ Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx, len); + }); + } + +@@ -222,12 +232,13 @@ impl VhostUserFsThread { + .map_err(Error::QueueWriter) + .unwrap(); + +- self.server ++ let len = self ++ .server + .handle_message(reader, writer, self.vu_req.as_mut()) + .map_err(Error::ProcessQueue) + .unwrap(); + +- Self::return_descriptor(vring_state, head_index, self.event_idx); ++ Self::return_descriptor(vring_state, head_index, self.event_idx, len); + } + + Ok(used_any) +-- +2.35.1 + diff --git a/SOURCES/process_queue_pool-Only-acquire-the-VringMutex-lock-.patch b/SOURCES/process_queue_pool-Only-acquire-the-VringMutex-lock-.patch new file mode 100644 index 0000000..d3e0d93 --- /dev/null +++ b/SOURCES/process_queue_pool-Only-acquire-the-VringMutex-lock-.patch @@ -0,0 +1,130 @@ +From 9e55bb375937f8cc93666d9998f09d23a31185f1 Mon Sep 17 00:00:00 2001 +From: Sebastian Hasler +Date: Wed, 2 Feb 2022 17:50:34 +0100 +Subject: [PATCH 1/3] process_queue_pool: Only acquire the VringMutex lock once + +Previously, the worker task in `process_queue_pool()` called +up to 3 functions on `worker_vring` where each function is a +wrapper that first locks the mutex. This is unneccessary +congestion. We fix this by locking the mutex once. + +Signed-off-by: Sebastian Hasler +(cherry picked from commit c904bd8dbd9557d1a59fa0934e092443c7264d43) +Signed-off-by: Sergio Lopez +--- + src/main.rs | 72 +++++++++++++++++++---------------------------------- + 1 file changed, 26 insertions(+), 46 deletions(-) + +diff --git a/src/main.rs b/src/main.rs +index 2049eda..5a9914f 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ -9,7 +9,7 @@ use passthrough::xattrmap::XattrMap; + use std::convert::{self, TryFrom}; + use std::ffi::CString; + use std::os::unix::io::{FromRawFd, RawFd}; +-use std::sync::{Arc, Mutex, MutexGuard, RwLock}; ++use std::sync::{Arc, Mutex, RwLock}; + use std::{env, error, fmt, io, process}; + + use structopt::StructOpt; +@@ -128,6 +128,28 @@ impl VhostUserFsThread { + }) + } + ++ fn return_descriptor(vring_state: &mut VringState, head_index: u16, event_idx: bool) { ++ if vring_state.add_used(head_index, 0).is_err() { ++ warn!("Couldn't return used descriptors to the ring"); ++ } ++ ++ if event_idx { ++ match vring_state.needs_notification() { ++ Err(_) => { ++ warn!("Couldn't check if queue needs to be notified"); ++ vring_state.signal_used_queue().unwrap(); ++ } ++ Ok(needs_notification) => { ++ if needs_notification { ++ vring_state.signal_used_queue().unwrap(); ++ } ++ } ++ } ++ } else { ++ vring_state.signal_used_queue().unwrap(); ++ } ++ } ++ + fn process_queue_pool(&mut self, vring: VringMutex) -> Result { + let mut used_any = false; + let atomic_mem = match &self.mem { +@@ -168,35 +190,14 @@ impl VhostUserFsThread { + .map_err(Error::ProcessQueue) + .unwrap(); + +- if event_idx { +- if worker_vring.add_used(head_index, 0).is_err() { +- warn!("Couldn't return used descriptors to the ring"); +- } +- +- match worker_vring.needs_notification() { +- Err(_) => { +- warn!("Couldn't check if queue needs to be notified"); +- worker_vring.signal_used_queue().unwrap(); +- } +- Ok(needs_notification) => { +- if needs_notification { +- worker_vring.signal_used_queue().unwrap(); +- } +- } +- } +- } else { +- if worker_vring.add_used(head_index, 0).is_err() { +- warn!("Couldn't return used descriptors to the ring"); +- } +- worker_vring.signal_used_queue().unwrap(); +- } ++ Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx); + }); + } + + Ok(used_any) + } + +- fn process_queue_serial(&mut self, vring_state: &mut MutexGuard) -> Result { ++ fn process_queue_serial(&mut self, vring_state: &mut VringState) -> Result { + let mut used_any = false; + let mem = match &self.mem { + Some(m) => m.memory(), +@@ -226,28 +227,7 @@ impl VhostUserFsThread { + .map_err(Error::ProcessQueue) + .unwrap(); + +- if self.event_idx { +- if vring_state.add_used(head_index, 0).is_err() { +- warn!("Couldn't return used descriptors to the ring"); +- } +- +- match vring_state.needs_notification() { +- Err(_) => { +- warn!("Couldn't check if queue needs to be notified"); +- vring_state.signal_used_queue().unwrap(); +- } +- Ok(needs_notification) => { +- if needs_notification { +- vring_state.signal_used_queue().unwrap(); +- } +- } +- } +- } else { +- if vring_state.add_used(head_index, 0).is_err() { +- warn!("Couldn't return used descriptors to the ring"); +- } +- vring_state.signal_used_queue().unwrap(); +- } ++ Self::return_descriptor(vring_state, head_index, self.event_idx); + } + + Ok(used_any) +-- +2.35.1 + diff --git a/SPECS/virtiofsd.spec b/SPECS/virtiofsd.spec new file mode 100644 index 0000000..0bde5c3 --- /dev/null +++ b/SPECS/virtiofsd.spec @@ -0,0 +1,71 @@ +Name: virtiofsd +Version: 1.1.0 +Release: 4%{?dist} +Summary: Virtio-fs vhost-user device daemon (Rust version) + +# Upstream license specification: Apache-2.0 AND BSD-3-Clause +License: ASL 2.0 and BSD +URL: https://gitlab.com/virtio-fs/virtiofsd +Source0: %{crates_source} +Source1: %{name}-%{version}-vendor.tar.gz + +# For bz#2057252 - process_queue_pool: Only acquire the VringMutex lock once +Patch1: process_queue_pool-Only-acquire-the-VringMutex-lock-.patch +# For bz#2057252 - Set the number of written bytes for used descs +Patch2: Set-the-number-of-written-bytes-for-used-descs.patch +# For bz#2057252 - Clean up flags in opendir (downstream) +Patch3: Clean-up-flags-in-opendir-downstream.patch + +ExclusiveArch: x86_64 aarch64 s390x +BuildRequires: rust-toolset +BuildRequires: libcap-ng-devel +BuildRequires: libseccomp-devel +Provides: virtiofsd +Obsoletes: qemu-virtiofsd = 17:6.2.0 +# Both qemu-virtiofsd and virtiofsd ship the same binary +Conflicts: qemu-virtiofsd = 17:6.2.0 + +%description +%{summary}. + +%prep +%setup -q -n %{name}-%{version} +%autopatch -p1 + +%cargo_prep -V 1 + +%build +%cargo_build + +%install +mkdir -p %{buildroot}%{_libexecdir} +install -D -p -m 0755 target/release/virtiofsd %{buildroot}%{_libexecdir}/virtiofsd +install -D -p -m 0644 50-qemu-virtiofsd.json %{buildroot}%{_datadir}/qemu/vhost-user/50-qemu-virtiofsd.json + +%files +%license LICENSE-APACHE LICENSE-BSD-3-Clause +%doc README.md +%{_libexecdir}/virtiofsd +%{_datadir}/qemu/vhost-user/50-qemu-virtiofsd.json + +%changelog +* Tue Mar 22 2022 Sergio Lopez - 1.1.0-4 +- process_queue_pool-Only-acquire-the-VringMutex-lock-.patch [bz#2057252] +- Set-the-number-of-written-bytes-for-used-descs.patch [bz#2057252] +- Clean-up-flags-in-opendir-downstream.patch [bz#2057252] +- Resolves: bz#2057252 + ([virtiofsd]Can't access to the shared directory on windows guest with the new virtiofsd(rust)) + +* Fri Feb 18 2022 Sergio Lopez - 1.1.0-3 +- Restore "Provides: virtiofsd", despite rpmdeplint complains, to + satisfy qemu-kvm dependencies + +* Fri Jan 28 2022 Sergio Lopez - 1.1.0-2 +- Explicitly declare the conflict with qemu-virtiofsd +- Remove explicit library dependencies +- Remove useless "Provides: virtiosfd" +- Remove Windows binaries from vendor tarball + +* Thu Jan 27 2022 Sergio Lopez - 1.1.0-1 +- Initial package +