From 731417bbf4d2641a42fedd729950918806246649 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Feb 22 2022 08:41:00 +0000 Subject: import kpatch-patch-3_10_0-1160_41_1-1-4.el7 --- diff --git a/SOURCES/CVE-2020-0466.patch b/SOURCES/CVE-2020-0466.patch new file mode 100644 index 0000000..209bded --- /dev/null +++ b/SOURCES/CVE-2020-0466.patch @@ -0,0 +1,148 @@ +From c17b58ebffbfa862b3f1815e208db340bd1664eb Mon Sep 17 00:00:00 2001 +From: Yannick Cote +Date: Tue, 1 Feb 2022 14:14:41 -0500 +Subject: [KPATCH CVE-2020-0466] epoll: kpatch fixes for CVE-2020-0466 + +Kernels: +3.10.0-1160.15.2.el7 +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Changes since last build: +[x86_64]: +eventpoll.o: changed function: SyS_epoll_ctl +eventpoll.o: changed function: clear_tfile_check_list +eventpoll.o: changed function: ep_loop_check_proc + +[ppc64le]: +eventpoll.o: changed function: SyS_epoll_ctl +eventpoll.o: changed function: ep_loop_check_proc + +--------------------------- + +Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/22 +Approved-by: Artem Savkov (@artem.savkov) +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Modifications: none + +commit f771ed0537c55c506dc846cb8f3da60f6383a2b3 +Author: Carlos Maiolino +Date: Sat Dec 18 09:23:31 2021 +0100 + + epoll: Keep a reference on files added to the check list + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2042760 + Tested: Sanity check only + CVE: CVE-2020-0466 + + Conflicts: + - RHEL7 has no support for non-blocking do_epoll_ctl(), so the + original patch got this part stripped. + + When adding a new fd to an epoll, and that this new fd is an + epoll fd itself, we recursively scan the fds attached to it + to detect cycles, and add non-epool files to a "check list" + that gets subsequently parsed. + + However, this check list isn't completely safe when deletions + can happen concurrently. To sidestep the issue, make sure that + a struct file placed on the check list sees its f_count increased, + ensuring that a concurrent deletion won't result in the file + disapearing from under our feet. + + Cc: stable@vger.kernel.org + Signed-off-by: Marc Zyngier + Signed-off-by: Al Viro + (cherry picked from commit a9ed4a6560b8562b7e2e2bed9527e88001f7b682) + + Signed-off-by: Carlos Maiolino + +commit 0875a380011a7ff7f4504b72890c29fec420d1cd +Author: Carlos Maiolino +Date: Sat Dec 18 09:23:47 2021 +0100 + + fix regression in "epoll: Keep a reference on files added to the check list" + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2042760 + Tested: Sanity check only + CVE: CVE-2020-0466 + + epoll_loop_check_proc() can run into a file already committed to destruction; + we can't grab a reference on those and don't need to add them to the set for + reverse path check anyway. + + Tested-by: Marc Zyngier + Fixes: a9ed4a6560b8 ("epoll: Keep a reference on files added to the check list") + Signed-off-by: Al Viro + (cherry picked from commit 77f4689de17c0887775bb77896f4cc11a39bf848) + + Signed-off-by: Carlos Maiolino + +Signed-off-by: Yannick Cote +--- + fs/eventpoll.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 6731b99a481f..ca0eb701eeb4 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1750,9 +1750,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + * not already there, and calling reverse_path_check() + * during ep_insert(). + */ +- if (list_empty(&epi->ffd.file->f_tfile_llink)) +- list_add(&epi->ffd.file->f_tfile_llink, +- &tfile_check_list); ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) { ++ if (get_file_rcu(epi->ffd.file)) ++ list_add(&epi->ffd.file->f_tfile_llink, ++ &tfile_check_list); ++ } + } + } + mutex_unlock(&ep->mtx); +@@ -1796,6 +1798,7 @@ static void clear_tfile_check_list(void) + file = list_first_entry(&tfile_check_list, struct file, + f_tfile_llink); + list_del_init(&file->f_tfile_llink); ++ fput(file); + } + INIT_LIST_HEAD(&tfile_check_list); + } +@@ -1951,9 +1954,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + clear_tfile_check_list(); + goto error_tgt_fput; + } +- } else ++ } else { ++ get_file(tf.file); + list_add(&tf.file->f_tfile_llink, + &tfile_check_list); ++ } + mutex_lock_nested(&ep->mtx, 0); + if (is_file_epoll(tf.file)) { + tep = tf.file->private_data; +-- +2.26.3 + + diff --git a/SOURCES/CVE-2021-0920.patch b/SOURCES/CVE-2021-0920.patch new file mode 100644 index 0000000..b43d0ff --- /dev/null +++ b/SOURCES/CVE-2021-0920.patch @@ -0,0 +1,195 @@ +From 10bde99768d3c92f5fde1ec51f74e926fc4cf779 Mon Sep 17 00:00:00 2001 +From: Artem Savkov +Date: Thu, 27 Jan 2022 11:44:06 +0100 +Subject: [KPATCH CVE-2021-0920] af_unix: fix garbage collect vs MSG_PEEK + +Kernels: +- 3.10.0-1160.15.2.el7 +- 3.10.0-1160.21.1.el7 +- 3.10.0-1160.24.1.el7 +- 3.10.0-1160.25.1.el7 +- 3.10.0-1160.31.1.el7 +- 3.10.0-1160.36.2.el7 +- 3.10.0-1160.41.1.el7 +- 3.10.0-1160.42.2.el7 +- 3.10.0-1160.45.1.el7 +- 3.10.0-1160.49.1.el7 +- 3.10.0-1160.53.1.el7 + +Changes since last build: + +arches: x86_64 ppc64le +- af_unix.o: changed function: unix_dgram_recvmsg +- af_unix.o: changed function: unix_stream_read_generic +- garbage.o: new function: unix_peek_fds +---- + +Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/18 +Approved-by: Joe Lawrence (@joe.lawrence) +Approved-by: Yannick Cote (@ycote1) +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Modifications: moved unix_peek_fds() to net/unix/garbage.c to avoid +changing unix_gc_lock scope. + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2031986 + +Z-MR: https://gitlab.com/redhat/rhel/src/kernel/rhel-7/-/merge_requests/379 + +commit ab0fd1713f1efeb2c859d32721625ea98cd9e663 +Author: William Zhao +Date: Wed Jan 19 09:29:17 2022 -0500 + + af_unix: fix garbage collect vs MSG_PEEK + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2031970 + Upstream Status: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + CVE: CVE-2021-0920 + Conflicts: The code still uses the old "sock_iocb" structure since rhel-7 + does not have the commit 7cc05662682da ("net: remove sock_iocb"). Thus + passing the "scm_cookie" pointer to the "unix_peek_fds" was slightly + modified to use "siocb->scm" instead of just "&scm". Additionally + the "unix_gc_lock" is not exposed to "af_unix.c" via a header file since + rhel-7 does not have the commit f4e65870e5ce ("net: split out functions + related to registering inflight socket files"). Bringing in the commit + will add a new UNIX_SCM kernel config; this can be avoided by adding + extern-ing the "unix_gc_lock". + + commit cbcf01128d0a92e131bd09f1688fe032480b65ca + Author: Miklos Szeredi + Date: Wed Jul 28 14:47:20 2021 +0200 + + af_unix: fix garbage collect vs MSG_PEEK + + unix_gc() assumes that candidate sockets can never gain an external + reference (i.e. be installed into an fd) while the unix_gc_lock is + held. Except for MSG_PEEK this is guaranteed by modifying inflight + count under the unix_gc_lock. + + MSG_PEEK does not touch any variable protected by unix_gc_lock (file + count is not), yet it needs to be serialized with garbage collection. + Do this by locking/unlocking unix_gc_lock: + + 1) increment file count + + 2) lock/unlock barrier to make sure incremented file count is visible + to garbage collection + + 3) install file into fd + + This is a lock barrier (unlike smp_mb()) that ensures that garbage + collection is run completely before or completely after the barrier. + + Cc: + Signed-off-by: Greg Kroah-Hartman + Signed-off-by: Miklos Szeredi + Signed-off-by: Linus Torvalds + Signed-off-by: William Zhao + +Signed-off-by: Artem Savkov +--- + net/unix/af_unix.c | 7 +++++-- + net/unix/garbage.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 52 insertions(+), 2 deletions(-) + +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index bcb0ad54b9b3..a264b4598872 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1468,6 +1468,8 @@ out: + return err; + } + ++void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb); ++ + static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) + { + int i; +@@ -2182,7 +2184,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, + sk_peek_offset_fwd(sk, size); + + if (UNIXCB(skb).fp) +- siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); ++ unix_peek_fds(siocb->scm, skb); ++ + } + err = (flags & MSG_TRUNC) ? skb->len - skip : size; + +@@ -2432,7 +2435,7 @@ unlock: + /* It is questionable, see note in unix_dgram_recvmsg. + */ + if (UNIXCB(skb).fp) +- siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); ++ unix_peek_fds(siocb->scm, skb); + + sk_peek_offset_fwd(sk, chunk); + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index c36757e72844..f242268477ba 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -374,3 +374,50 @@ void unix_gc(void) + out: + spin_unlock(&unix_gc_lock); + } ++ ++void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) ++{ ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp); ++ ++ /* ++ * Garbage collection of unix sockets starts by selecting a set of ++ * candidate sockets which have reference only from being in flight ++ * (total_refs == inflight_refs). This condition is checked once during ++ * the candidate collection phase, and candidates are marked as such, so ++ * that non-candidates can later be ignored. While inflight_refs is ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this ++ * is an instantaneous decision. ++ * ++ * Once a candidate, however, the socket must not be reinstalled into a ++ * file descriptor while the garbage collection is in progress. ++ * ++ * If the above conditions are met, then the directed graph of ++ * candidates (*) does not change while unix_gc_lock is held. ++ * ++ * Any operations that changes the file count through file descriptors ++ * (dup, close, sendmsg) does not change the graph since candidates are ++ * not installed in fds. ++ * ++ * Dequeing a candidate via recvmsg would install it into an fd, but ++ * that takes unix_gc_lock to decrement the inflight count, so it's ++ * serialized with garbage collection. ++ * ++ * MSG_PEEK is special in that it does not change the inflight count, ++ * yet does install the socket into an fd. The following lock/unlock ++ * pair is to ensure serialization with garbage collection. It must be ++ * done between incrementing the file count and installing the file into ++ * an fd. ++ * ++ * If garbage collection starts after the barrier provided by the ++ * lock/unlock, then it will see the elevated refcount and not mark this ++ * as a candidate. If a garbage collection is already in progress ++ * before the file count was incremented, then the lock/unlock pair will ++ * ensure that garbage collection is finished before progressing to ++ * installing the fd. ++ * ++ * (*) A -> B where B is on the queue of A or B is on the queue of C ++ * which is on the queue of listening socket A. ++ */ ++ spin_lock(&unix_gc_lock); ++ spin_unlock(&unix_gc_lock); ++} +-- +2.26.3 + + diff --git a/SOURCES/CVE-2021-4155.patch b/SOURCES/CVE-2021-4155.patch new file mode 100644 index 0000000..60c59ce --- /dev/null +++ b/SOURCES/CVE-2021-4155.patch @@ -0,0 +1,97 @@ +From 01df9f93baafe0243264cf24f42d84e124c9ee0a Mon Sep 17 00:00:00 2001 +From: Joe Lawrence +Date: Tue, 4 Jan 2022 13:53:44 -0500 +Subject: [KPATCH CVE-2021-4155] xfs: kpatch fixes for CVE-2021-4155 + +Kernels: +3.10.0-1160.15.2.el7 +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Changes since last build: +arches: x86_64 ppc64le +xfs_ioctl.o: changed function: xfs_ioc_space +--------------------------- + +Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/15 +Approved-by: Yannick Cote (@ycote1) +Approved-by: Artem Savkov (@artem.savkov) +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Modifications: none + +Z-MR: https://gitlab.com/redhat/prdsc/rhel/src/kernel-private/rhel-7/-/merge_requests/18 + +KT0 test PASS: https://beaker.engineering.redhat.com/jobs/6164756 +for kpatch-patch-3_10_0-1160_15_2-1-11.el7 scratch build: +https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=42181339 + +commit 65cb42abca9a5a600cbbdbbef8ddbafd028b7b5d +Author: Carlos Maiolino +Date: Tue Jan 4 08:29:12 2022 +0100 + + xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2034857 + CVE: CVE-2021-4155 + Tested: xfstests and specific reproducer + Upstream status: Posted privately due to embargo + + Conflicts: + - el7 required small adjustment to the patch, to fit the old + code. + + The old ALLOCSP/FREESP ioctls in XFS can be used to preallocate space at + the end of files, just like fallocate and RESVSP. Make the behavior + consistent with the other ioctls. + + Reported-by: Kirill Tkhai + Signed-off-by: Darrick J. Wong + Signed-off-by: Darrick J. Wong + Reviewed-by: Dave Chinner + Reviewed-by: Eric Sandeen + (cherry picked from commit 983d8e60f50806f90534cc5373d0ce867e5aaf79) + + Signed-off-by: Carlos Maiolino + +Signed-off-by: Joe Lawrence +--- + fs/xfs/xfs_ioctl.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index 722396680482..8f1c795c8765 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -773,7 +773,8 @@ xfs_ioc_space( + flags |= XFS_PREALLOC_CLEAR; + if (bf->l_start > XFS_ISIZE(ip)) { + error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), +- bf->l_start - XFS_ISIZE(ip), 0); ++ bf->l_start - XFS_ISIZE(ip), ++ XFS_BMAPI_PREALLOC); + if (error) + goto out_unlock; + } +-- +2.26.3 + + diff --git a/SOURCES/CVE-2022-0330.patch b/SOURCES/CVE-2022-0330.patch new file mode 100644 index 0000000..bb1a90a --- /dev/null +++ b/SOURCES/CVE-2022-0330.patch @@ -0,0 +1,271 @@ +From c2dd834b3e366fff19a868fa446643f7f30201c7 Mon Sep 17 00:00:00 2001 +From: Yannick Cote +Date: Tue, 8 Feb 2022 17:10:45 -0500 +Subject: [KPATCH CVE-2022-0330] drm/i915: kpatch fixes for CVE-2022-0330 + +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Changes since last build: +arches: x86_64 +i915_drv.o: changed function: i915_driver_destroy +i915_gem.o: changed function: __i915_gem_object_unset_pages +i915_gem.o: changed function: i915_gem_fault +i915_gem.o: new function: assert_rpm_wakelock_held.part.56 +i915_gem.o: new function: tlb_invalidate_lock_ctor +i915_vma.o: changed function: i915_vma_bind +--------------------------- + +Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/24 +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Modifications: +- Move new bit definition to .c files avoiding changes to .h files. +- Redefine tlb_invalidate_lock as a klp shadow variable and avoid +changes to global structure definition (struct drm_i915_private). + +commit c96aee1f92b3a81d8a36efd91cfc5ff33ca3ac80 +Author: Dave Airlie +Date: Tue Jan 25 18:19:06 2022 -0500 + + drm/i915: Flush TLBs before releasing backing store + + Bugzilla: http://bugzilla.redhat.com/2044319 + CVE: CVE-2022-0330 + + commit 7938d61591d33394a21bdd7797a245b65428f44c + Author: Tvrtko Ursulin + Date: Tue Oct 19 13:27:10 2021 +0100 + + drm/i915: Flush TLBs before releasing backing store + + We need to flush TLBs before releasing backing store otherwise userspace + is able to encounter stale entries if a) it is not declaring access to + certain buffers and b) it races with the backing store release from a + such undeclared execution already executing on the GPU in parallel. + + The approach taken is to mark any buffer objects which were ever bound + to the GPU and to trigger a serialized TLB flush when their backing + store is released. + + Alternatively the flushing could be done on VMA unbind, at which point + we would be able to ascertain whether there is potential a parallel GPU + execution (which could race), but essentially it boils down to paying + the cost of TLB flushes potentially needlessly at VMA unbind time (when + the backing store is not known to be going away so not needed for + safety), versus potentially needlessly at backing store relase time + (since we at that point cannot tell whether there is anything executing + on the GPU which uses that object). + + Thereforce simplicity of implementation has been chosen for now with + scope to benchmark and refine later as required. + + Signed-off-by: Tvrtko Ursulin + Reported-by: Sushma Venkatesh Reddy + Reviewed-by: Daniel Vetter + Acked-by: Dave Airlie + Cc: Daniel Vetter + Cc: Jon Bloomfield + Cc: Joonas Lahtinen + Cc: Jani Nikula + Cc: stable@vger.kernel.org + Signed-off-by: Linus Torvalds + + Signed-off-by: Dave Airlie + +Signed-off-by: Yannick Cote +--- + drivers/gpu/drm/i915/i915_drv.c | 4 ++ + drivers/gpu/drm/i915/i915_gem.c | 104 ++++++++++++++++++++++++++++++++ + drivers/gpu/drm/i915/i915_vma.c | 6 ++ + 3 files changed, 114 insertions(+) + +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index db8a0e6d2f2f..9c12def30f4b 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -1683,11 +1683,15 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) + return i915; + } + ++#include ++#define KLP_CVE_2022_0330_MUTEX 0x2022033000000001 ++ + static void i915_driver_destroy(struct drm_i915_private *i915) + { + struct pci_dev *pdev = i915->drm.pdev; + + drm_dev_fini(&i915->drm); ++ klp_shadow_free(i915, KLP_CVE_2022_0330_MUTEX, NULL); + kfree(i915); + + /* And make sure we never chase our dangling pointer from pci_dev */ +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index c96ccd9001bf..b882a08b32f9 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2464,6 +2464,101 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) + rcu_read_unlock(); + } + ++struct reg_and_bit { ++ i915_reg_t reg; ++ u32 bit; ++}; ++ ++static struct reg_and_bit ++get_reg_and_bit(const struct intel_engine_cs *engine, ++ const i915_reg_t *regs, const unsigned int num) ++{ ++ const unsigned int class = engine->class; ++ struct reg_and_bit rb = { .bit = 1 }; ++ ++ if (WARN_ON_ONCE(class >= num || !regs[class].reg)) ++ return rb; ++ ++ rb.reg = regs[class]; ++ if (class == VIDEO_DECODE_CLASS) ++ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ ++ ++ return rb; ++} ++ ++#include ++#define KLP_CVE_2022_0330_MUTEX 0x2022033000000001 ++#define I915_BO_WAS_BOUND_BIT 1 ++#define GEN8_RTCR _MMIO(0x4260) ++#define GEN8_M1TCR _MMIO(0x4264) ++#define GEN8_M2TCR _MMIO(0x4268) ++#define GEN8_BTCR _MMIO(0x426c) ++#define GEN8_VTCR _MMIO(0x4270) ++ ++static int tlb_invalidate_lock_ctor(void *obj, void *shadow_data, void *ctor_data) ++{ ++ struct mutex *m = shadow_data; ++ mutex_init(m); ++ ++ return 0; ++} ++ ++static void invalidate_tlbs(struct drm_i915_private *dev_priv) ++{ ++ static const i915_reg_t gen8_regs[] = { ++ [RENDER_CLASS] = GEN8_RTCR, ++ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ ++ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, ++ [COPY_ENGINE_CLASS] = GEN8_BTCR, ++ }; ++ const unsigned int num = ARRAY_SIZE(gen8_regs); ++ const i915_reg_t *regs = gen8_regs; ++ struct intel_engine_cs *engine; ++ enum intel_engine_id id; ++ struct mutex *tlb_invalidate_lock; ++ ++ if (INTEL_GEN(dev_priv) < 8) ++ return; ++ ++ GEM_TRACE("\n"); ++ ++ assert_rpm_wakelock_held(dev_priv); ++ ++ tlb_invalidate_lock = klp_shadow_get_or_alloc(dev_priv, KLP_CVE_2022_0330_MUTEX, ++ sizeof(*tlb_invalidate_lock), GFP_KERNEL, ++ tlb_invalidate_lock_ctor, NULL); ++ if (tlb_invalidate_lock) { ++ mutex_lock(tlb_invalidate_lock); ++ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); ++ ++ for_each_engine(engine, dev_priv, id) { ++ /* ++ * HW architecture suggest typical invalidation time at 40us, ++ * with pessimistic cases up to 100us and a recommendation to ++ * cap at 1ms. We go a bit higher just in case. ++ */ ++ const unsigned int timeout_us = 100; ++ const unsigned int timeout_ms = 4; ++ struct reg_and_bit rb; ++ ++ rb = get_reg_and_bit(engine, regs, num); ++ if (!i915_mmio_reg_offset(rb.reg)) ++ continue; ++ ++ I915_WRITE_FW(rb.reg, rb.bit); ++ if (__intel_wait_for_register_fw(dev_priv, ++ rb.reg, rb.bit, 0, ++ timeout_us, timeout_ms, ++ NULL)) ++ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n", ++ engine->name, timeout_ms); ++ } ++ ++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); ++ mutex_unlock(tlb_invalidate_lock); ++ } ++} ++ + static struct sg_table * + __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) + { +@@ -2493,6 +2588,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) + __i915_gem_object_reset_page_iter(obj); + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + ++ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { ++ struct drm_i915_private *i915 = to_i915(obj->base.dev); ++ ++ if (intel_runtime_pm_get_if_in_use(i915)) { ++ invalidate_tlbs(i915); ++ intel_runtime_pm_put(i915); ++ } ++ } ++ + return pages; + } + +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c +index 5b4d78cdb4ca..906e6321ad77 100644 +--- a/drivers/gpu/drm/i915/i915_vma.c ++++ b/drivers/gpu/drm/i915/i915_vma.c +@@ -285,6 +285,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj, + return vma; + } + ++#define I915_BO_WAS_BOUND_BIT 1 ++ + /** + * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. + * @vma: VMA to map +@@ -335,6 +337,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + return ret; + + vma->flags |= bind_flags; ++ ++ if (vma->obj) ++ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); ++ + return 0; + } + +-- +2.26.3 + + diff --git a/SOURCES/CVE-2022-22942.patch b/SOURCES/CVE-2022-22942.patch new file mode 100644 index 0000000..94ddd4b --- /dev/null +++ b/SOURCES/CVE-2022-22942.patch @@ -0,0 +1,222 @@ +From 05639043d88c1ed4c13670721d222b8621d2fbca Mon Sep 17 00:00:00 2001 +From: Joe Lawrence +Date: Wed, 9 Feb 2022 13:27:43 -0500 +Subject: [KPATCH CVE-2022-22942] drm/vmwgfx: kpatch fixes for CVE-2022-22942 + +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Changes since last build: +arches: x86_64 +vmwgfx_execbuf.o: changed function: vmw_execbuf_copy_fence_user +vmwgfx_execbuf.o: changed function: vmw_execbuf_process +vmwgfx_fence.o: changed function: vmw_fence_event_ioctl +vmwgfx_kms.o: changed function: vmw_kms_helper_validation_finish +--------------------------- + +Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/23 +Approved-by: Yannick Cote (@ycote1) +Approved-by: Artem Savkov (@artem.savkov) +Kernels: +3.10.0-1160.21.1.el7 +3.10.0-1160.24.1.el7 +3.10.0-1160.25.1.el7 +3.10.0-1160.31.1.el7 +3.10.0-1160.36.2.el7 +3.10.0-1160.41.1.el7 +3.10.0-1160.42.2.el7 +3.10.0-1160.45.1.el7 +3.10.0-1160.49.1.el7 +3.10.0-1160.53.1.el7 + +Modifications: none + +commit 26695bc7aeaf0bae32d7ab7fcf8950143acb5020 +Author: Dave Airlie +Date: Sat Jan 29 02:36:46 2022 -0500 + + drm/vmwgfx: Fix stale file descriptors on failed usercopy + + Bugzilla: http://bugzilla.redhat.com/2047597 + CVE: CVE-2022-22942 + + commit a0f90c8815706981c483a652a6aefca51a5e191c + Author: Mathias Krause + Date: Thu Jan 27 18:34:19 2022 +1000 + + drm/vmwgfx: Fix stale file descriptors on failed usercopy + + A failing usercopy of the fence_rep object will lead to a stale entry in + the file descriptor table as put_unused_fd() won't release it. This + enables userland to refer to a dangling 'file' object through that still + valid file descriptor, leading to all kinds of use-after-free + exploitation scenarios. + + Fix this by deferring the call to fd_install() until after the usercopy + has succeeded. + + Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support") + Signed-off-by: Mathias Krause + Signed-off-by: Zack Rusin + Signed-off-by: Dave Airlie + Signed-off-by: Linus Torvalds + + Signed-off-by: Dave Airlie + +Signed-off-by: Joe Lawrence +--- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 5 ++-- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 34 ++++++++++++------------- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- + 4 files changed, 21 insertions(+), 22 deletions(-) + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index cd607ba9c2fe..efd540f02227 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -924,15 +924,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, + struct vmw_private *dev_priv, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle); +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user + *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file); ++ int32_t out_fence_fd); + bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); + + /** +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index 88b8178d4687..4f792fb89bb8 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -3595,20 +3595,19 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, + * object so we wait for it immediately, and then unreference the + * user-space reference. + */ +-void ++int + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file) ++ int32_t out_fence_fd) + { + struct drm_vmw_fence_rep fence_rep; + + if (user_fence_rep == NULL) +- return; ++ return 0; + + memset(&fence_rep, 0, sizeof(fence_rep)); + +@@ -3636,20 +3635,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + * and unreference the handle. + */ + if (unlikely(ret != 0) && (fence_rep.error == 0)) { +- if (sync_file) +- fput(sync_file->file); +- +- if (fence_rep.fd != -1) { +- put_unused_fd(fence_rep.fd); +- fence_rep.fd = -1; +- } +- + ttm_ref_object_base_unref(vmw_fp->tfile, + fence_handle, TTM_REF_USAGE); + DRM_ERROR("Fence copy error. Syncing.\n"); + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); + } ++ ++ return ret ? -EFAULT : 0; + } + + /** +@@ -3997,16 +3990,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, + + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); ++ } ++ } ++ ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, ++ user_fence_rep, fence, handle, out_fence_fd); ++ ++ if (sync_file) { ++ if (ret) { ++ /* usercopy of fence failed, put the file object */ ++ fput(sync_file->file); ++ put_unused_fd(out_fence_fd); + } else { + /* Link the fence with the FD created earlier */ + fd_install(out_fence_fd, sync_file->file); + } + } + +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, +- user_fence_rep, fence, handle, +- out_fence_fd, sync_file); +- + /* Don't unreference when handing fence out */ + if (unlikely(out_fence != NULL)) { + *out_fence = fence; +@@ -4024,7 +4024,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, + */ + vmw_validation_unref_lists(&val_ctx); + +- return 0; ++ return ret; + + out_unlock_binding: + mutex_unlock(&dev_priv->binding_mutex); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +index 301260e23e52..624754cc4fc1 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -1167,7 +1167,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, + } + + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + vmw_fence_obj_unreference(&fence); + return 0; + out_no_create: +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index ed2f67822f45..fdf58090aa4a 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2565,7 +2565,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, + if (file_priv) + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), + ret, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + if (out_fence) + *out_fence = fence; + else +-- +2.26.3 + + diff --git a/SPECS/kpatch-patch.spec b/SPECS/kpatch-patch.spec index f42c2bd..f321808 100644 --- a/SPECS/kpatch-patch.spec +++ b/SPECS/kpatch-patch.spec @@ -6,7 +6,7 @@ %define kernel_ver 3.10.0-1160.41.1.el7 %define kpatch_ver 0.9.2 %define rpm_ver 1 -%define rpm_rel 3 +%define rpm_rel 4 %if !%{empty_package} # Patch sources below. DO NOT REMOVE THIS LINE. @@ -22,6 +22,21 @@ Source102: CVE-2021-37576.patch # # https://bugzilla.redhat.com/1981703 Source103: CVE-2020-36385.patch +# +# https://bugzilla.redhat.com/2034878 +Source104: CVE-2021-4155.patch +# +# https://bugzilla.redhat.com/2031986 +Source105: CVE-2021-0920.patch +# +# https://bugzilla.redhat.com/2042766 +Source106: CVE-2020-0466.patch +# +# https://bugzilla.redhat.com/2044373 +Source107: CVE-2022-0330.patch +# +# https://bugzilla.redhat.com/2047616 +Source108: CVE-2022-22942.patch # End of patch sources. DO NOT REMOVE THIS LINE. %endif @@ -154,6 +169,13 @@ It is only a method to subscribe to the kpatch stream for kernel-%{kernel_ver}. %endif %changelog +* Fri Feb 11 2022 Joe Lawrence [1-4.el7] +- kernel: failing usercopy allows for use-after-free exploitation [2047616] {CVE-2022-22942} +- kernel: possible privileges escalation due to missing TLB flush [2044373] {CVE-2022-0330} +- kernel: use after free in eventpoll.c may lead to escalation of privilege [2042766] {CVE-2020-0466} +- kernel: Use After Free in unix_gc() which could result in a local privilege escalation [2031986] {CVE-2021-0920} +- kernel: xfs: raw block device data leak in XFS_IOC_ALLOCSP IOCTL [2034878] {CVE-2021-4155} + * Fri Nov 12 2021 Joe Lawrence [1-3.el7] - kernel: use-after-free in drivers/infiniband/core/ucma.c ctx use-after-free [1981703] {CVE-2020-36385}