Blame SOURCES/CVE-2022-0330.patch

ec80f5
From e314ba42cb4dccd4d9edb2ffcb2295b4c4e2d00d Mon Sep 17 00:00:00 2001
ec80f5
From: Yannick Cote <ycote@redhat.com>
ec80f5
Date: Tue, 1 Mar 2022 19:54:52 -0500
ec80f5
Subject: [KPATCH CVE-2022-0330] drm/i915: kpatch fixes for CVE-2022-0330
ec80f5
ec80f5
Kernels:
ec80f5
4.18.0-348.el8
ec80f5
4.18.0-348.2.1.el8_5
ec80f5
4.18.0-348.7.1.el8_5
ec80f5
4.18.0-348.12.2.el8_5
ec80f5
ec80f5
Changes since last build:
ec80f5
arches: x86_64
ec80f5
i915_drv.o: changed function: i915_driver_release
ec80f5
i915_vma.o: changed function: i915_vma_bind
ec80f5
intel_gt.o: new function: intel_gt_invalidate_tlbs
ec80f5
intel_gt.o: new function: tlb_invalidate_lock_ctor
ec80f5
intel_uncore.o: changed function: __intel_uncore_forcewake_put
ec80f5
intel_uncore.o: changed function: __intel_wait_for_register
ec80f5
intel_uncore.o: changed function: i915_pmic_bus_access_notifier
ec80f5
intel_uncore.o: changed function: intel_uncore_forcewake_put
ec80f5
intel_uncore.o: changed function: intel_uncore_forcewake_put__locked
ec80f5
intel_uncore.o: changed function: intel_uncore_forcewake_user_put
ec80f5
intel_uncore.o: new function: intel_uncore_forcewake_put_delayed
ec80f5
---------------------------
ec80f5
ec80f5
Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-8/-/merge_requests/33
ec80f5
Approved-by: Joe Lawrence (@joe.lawrence)
ec80f5
Kernels:
ec80f5
4.18.0-348.el8
ec80f5
4.18.0-348.2.1.el8_5
ec80f5
4.18.0-348.7.1.el8_5
ec80f5
4.18.0-348.12.2.el8_5
ec80f5
ec80f5
Modifications:
ec80f5
- Move new bit definition to .c files avoiding changes to .h files.
ec80f5
- Redefine tlb_invalidate_lock as a klp shadow variable and avoid
ec80f5
changes to global structure definition (struct intel_gt).
ec80f5
ec80f5
commit 01dfa79afb751b4fec242c7d05ee2e0f78fe9a78
ec80f5
Author: Patrick Talbert <ptalbert@redhat.com>
ec80f5
Date:   Mon Jan 31 10:33:24 2022 +0100
ec80f5
ec80f5
    drm/i915: Flush TLBs before releasing backing store
ec80f5
ec80f5
    Bugzilla: https://bugzilla.redhat.com/2044328
ec80f5
    CVE: CVE-2022-0330
ec80f5
    Y-Commit: 5dfb7de610e0b38a03d4d71bdc6cb23a8af0161d
ec80f5
ec80f5
    commit 7938d61591d33394a21bdd7797a245b65428f44c
ec80f5
    Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
ec80f5
    Date:   Tue Oct 19 13:27:10 2021 +0100
ec80f5
ec80f5
        drm/i915: Flush TLBs before releasing backing store
ec80f5
ec80f5
        We need to flush TLBs before releasing backing store otherwise userspace
ec80f5
        is able to encounter stale entries if a) it is not declaring GPU access to
ec80f5
        certain buffers and b) this GPU execution then races with the backing
ec80f5
        store release getting triggered asynchronously.
ec80f5
ec80f5
        Approach taken is to mark any buffer objects which were ever bound to the
ec80f5
        GPU and triggering a serialized TLB flush when their backing store is
ec80f5
        released.
ec80f5
ec80f5
        Alternatively the flushing could be done on VMA unbind, at which point we
ec80f5
        would be able to ascertain whether there is potential parallel GPU
ec80f5
        execution (which could race), but choice essentially boils down to paying
ec80f5
        the cost of TLB flushes maybe needlessly at VMA unbind time (when the
ec80f5
        backing store is not known to be definitely going away, so flushing not
ec80f5
        always required for safety), versus potentially needlessly at backing
ec80f5
        store relase time since at that point cannot tell whether there is a
ec80f5
        parallel GPU execution happening.
ec80f5
ec80f5
        Therefore simplicity of implementation has been chosen for now, with scope
ec80f5
        to benchmark and refine later as required.
ec80f5
ec80f5
        Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
ec80f5
        Reported-by: Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com>
ec80f5
        Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
ec80f5
        Cc: Jon Bloomfield <jon.bloomfield@intel.com>
ec80f5
        Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
ec80f5
        Cc: Jani Nikula <jani.nikula@intel.com>
ec80f5
        Cc: stable@vger.kernel.org
ec80f5
ec80f5
    Signed-off-by: Patrick Talbert <ptalbert@redhat.com>
ec80f5
ec80f5
Signed-off-by: Yannick Cote <ycote@redhat.com>
ec80f5
---
ec80f5
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  13 +++
ec80f5
 drivers/gpu/drm/i915/gt/intel_gt.c        | 130 ++++++++++++++++++++++
ec80f5
 drivers/gpu/drm/i915/i915_drv.c           |   5 +
ec80f5
 drivers/gpu/drm/i915/i915_vma.c           |   6 +
ec80f5
 drivers/gpu/drm/i915/intel_uncore.c       |  26 ++++-
ec80f5
 5 files changed, 176 insertions(+), 4 deletions(-)
ec80f5
ec80f5
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
ec80f5
index 76574e245916..ba7fce675ee7 100644
ec80f5
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
ec80f5
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
ec80f5
@@ -173,6 +173,11 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
ec80f5
 		vunmap(ptr);
ec80f5
 }
ec80f5
 
ec80f5
+/* CVE-2022-0330 - kpatch gathered definitions */
ec80f5
+#define I915_BO_WAS_BOUND_BIT	4
ec80f5
+
ec80f5
+void intel_gt_invalidate_tlbs(struct intel_gt *gt);
ec80f5
+
ec80f5
 struct sg_table *
ec80f5
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
ec80f5
 {
ec80f5
@@ -195,6 +200,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
ec80f5
 	__i915_gem_object_reset_page_iter(obj);
ec80f5
 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
ec80f5
 
ec80f5
+	if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
ec80f5
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
ec80f5
+		intel_wakeref_t wakeref;
ec80f5
+
ec80f5
+		with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
ec80f5
+			intel_gt_invalidate_tlbs(&i915->gt);
ec80f5
+	}
ec80f5
+
ec80f5
 	return pages;
ec80f5
 }
ec80f5
 
ec80f5
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
ec80f5
index d8e1ab412634..da0b144ea418 100644
ec80f5
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
ec80f5
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
ec80f5
@@ -662,3 +662,133 @@ void intel_gt_info_print(const struct intel_gt_info *info,
ec80f5
 
ec80f5
 	intel_sseu_dump(&info->sseu, p);
ec80f5
 }
ec80f5
+
ec80f5
+struct reg_and_bit {
ec80f5
+	i915_reg_t reg;
ec80f5
+	u32 bit;
ec80f5
+};
ec80f5
+
ec80f5
+static struct reg_and_bit
ec80f5
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
ec80f5
+		const i915_reg_t *regs, const unsigned int num)
ec80f5
+{
ec80f5
+	const unsigned int class = engine->class;
ec80f5
+	struct reg_and_bit rb = { };
ec80f5
+
ec80f5
+	if (drm_WARN_ON_ONCE(&engine->i915->drm,
ec80f5
+			     class >= num || !regs[class].reg))
ec80f5
+		return rb;
ec80f5
+
ec80f5
+	rb.reg = regs[class];
ec80f5
+	if (gen8 && class == VIDEO_DECODE_CLASS)
ec80f5
+		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
ec80f5
+	else
ec80f5
+		rb.bit = engine->instance;
ec80f5
+
ec80f5
+	rb.bit = BIT(rb.bit);
ec80f5
+
ec80f5
+	return rb;
ec80f5
+}
ec80f5
+
ec80f5
+/* CVE-2022-0330 - kpatch gathered definitions */
ec80f5
+#include <linux/livepatch.h>
ec80f5
+#define KLP_CVE_2022_0330_MUTEX	0x2022033000000001
ec80f5
+#define GEN8_RTCR		_MMIO(0x4260)
ec80f5
+#define GEN8_M1TCR		_MMIO(0x4264)
ec80f5
+#define GEN8_M2TCR		_MMIO(0x4268)
ec80f5
+#define GEN8_BTCR		_MMIO(0x426c)
ec80f5
+#define GEN8_VTCR		_MMIO(0x4270)
ec80f5
+#define GEN12_GFX_TLB_INV_CR	_MMIO(0xced8)
ec80f5
+#define GEN12_VD_TLB_INV_CR	_MMIO(0xcedc)
ec80f5
+#define GEN12_VE_TLB_INV_CR	_MMIO(0xcee0)
ec80f5
+#define GEN12_BLT_TLB_INV_CR	_MMIO(0xcee4)
ec80f5
+
ec80f5
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
ec80f5
+					enum forcewake_domains domains);
ec80f5
+
ec80f5
+static int tlb_invalidate_lock_ctor(void *obj, void *shadow_data, void *ctor_data)
ec80f5
+{
ec80f5
+	struct mutex *m = shadow_data;
ec80f5
+	mutex_init(m);
ec80f5
+
ec80f5
+	return 0;
ec80f5
+}
ec80f5
+
ec80f5
+void intel_gt_invalidate_tlbs(struct intel_gt *gt)
ec80f5
+{
ec80f5
+	static const i915_reg_t gen8_regs[] = {
ec80f5
+		[RENDER_CLASS]			= GEN8_RTCR,
ec80f5
+		[VIDEO_DECODE_CLASS]		= GEN8_M1TCR, /* , GEN8_M2TCR */
ec80f5
+		[VIDEO_ENHANCEMENT_CLASS]	= GEN8_VTCR,
ec80f5
+		[COPY_ENGINE_CLASS]		= GEN8_BTCR,
ec80f5
+	};
ec80f5
+	static const i915_reg_t gen12_regs[] = {
ec80f5
+		[RENDER_CLASS]			= GEN12_GFX_TLB_INV_CR,
ec80f5
+		[VIDEO_DECODE_CLASS]		= GEN12_VD_TLB_INV_CR,
ec80f5
+		[VIDEO_ENHANCEMENT_CLASS]	= GEN12_VE_TLB_INV_CR,
ec80f5
+		[COPY_ENGINE_CLASS]		= GEN12_BLT_TLB_INV_CR,
ec80f5
+	};
ec80f5
+	struct drm_i915_private *i915 = gt->i915;
ec80f5
+	struct intel_uncore *uncore = gt->uncore;
ec80f5
+	struct intel_engine_cs *engine;
ec80f5
+	enum intel_engine_id id;
ec80f5
+	const i915_reg_t *regs;
ec80f5
+	unsigned int num = 0;
ec80f5
+	struct mutex *tlb_invalidate_lock;
ec80f5
+
ec80f5
+	if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
ec80f5
+		return;
ec80f5
+
ec80f5
+	if (INTEL_GEN(i915) == 12) {
ec80f5
+		regs = gen12_regs;
ec80f5
+		num = ARRAY_SIZE(gen12_regs);
ec80f5
+	} else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) {
ec80f5
+		regs = gen8_regs;
ec80f5
+		num = ARRAY_SIZE(gen8_regs);
ec80f5
+	} else if (INTEL_GEN(i915) < 8) {
ec80f5
+		return;
ec80f5
+	}
ec80f5
+
ec80f5
+	if (drm_WARN_ONCE(&i915->drm, !num,
ec80f5
+			  "Platform does not implement TLB invalidation!"))
ec80f5
+		return;
ec80f5
+
ec80f5
+	GEM_TRACE("\n");
ec80f5
+
ec80f5
+	assert_rpm_wakelock_held(&i915->runtime_pm);
ec80f5
+
ec80f5
+	tlb_invalidate_lock = klp_shadow_get_or_alloc(i915, KLP_CVE_2022_0330_MUTEX,
ec80f5
+						      sizeof(*tlb_invalidate_lock), GFP_KERNEL,
ec80f5
+						      tlb_invalidate_lock_ctor, NULL);
ec80f5
+	if (tlb_invalidate_lock) {
ec80f5
+		mutex_lock(tlb_invalidate_lock);
ec80f5
+		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
ec80f5
+
ec80f5
+		for_each_engine(engine, gt, id) {
ec80f5
+			/*
ec80f5
+			 * HW architecture suggest typical invalidation time at 40us,
ec80f5
+			 * with pessimistic cases up to 100us and a recommendation to
ec80f5
+			 * cap at 1ms. We go a bit higher just in case.
ec80f5
+			 */
ec80f5
+			const unsigned int timeout_us = 100;
ec80f5
+			const unsigned int timeout_ms = 4;
ec80f5
+			struct reg_and_bit rb;
ec80f5
+
ec80f5
+			rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
ec80f5
+			if (!i915_mmio_reg_offset(rb.reg))
ec80f5
+				continue;
ec80f5
+
ec80f5
+			intel_uncore_write_fw(uncore, rb.reg, rb.bit);
ec80f5
+			if (__intel_wait_for_register_fw(uncore,
ec80f5
+							 rb.reg, rb.bit, 0,
ec80f5
+							 timeout_us, timeout_ms,
ec80f5
+							 NULL))
ec80f5
+				drm_err_ratelimited(&gt->i915->drm,
ec80f5
+						    "%s TLB invalidation did not complete in %ums!\n",
ec80f5
+						    engine->name, timeout_ms);
ec80f5
+		}
ec80f5
+
ec80f5
+		intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
ec80f5
+		mutex_unlock(tlb_invalidate_lock);
ec80f5
+	}
ec80f5
+}
ec80f5
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
ec80f5
index 92668bcbece0..31b298618e7a 100644
ec80f5
--- a/drivers/gpu/drm/i915/i915_drv.c
ec80f5
+++ b/drivers/gpu/drm/i915/i915_drv.c
ec80f5
@@ -957,6 +957,10 @@ void i915_driver_remove(struct drm_i915_private *i915)
ec80f5
 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
ec80f5
 }
ec80f5
 
ec80f5
+/* CVE-2022-0330 - kpatch gathered definitions */
ec80f5
+#include <linux/livepatch.h>
ec80f5
+#define KLP_CVE_2022_0330_MUTEX 0x2022033000000001
ec80f5
+
ec80f5
 static void i915_driver_release(struct drm_device *dev)
ec80f5
 {
ec80f5
 	struct drm_i915_private *dev_priv = to_i915(dev);
ec80f5
@@ -979,6 +983,7 @@ static void i915_driver_release(struct drm_device *dev)
ec80f5
 	intel_runtime_pm_driver_release(rpm);
ec80f5
 
ec80f5
 	i915_driver_late_release(dev_priv);
ec80f5
+	klp_shadow_free(dev_priv, KLP_CVE_2022_0330_MUTEX, NULL);
ec80f5
 }
ec80f5
 
ec80f5
 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
ec80f5
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
ec80f5
index caa9b041616b..8b2f1c8b2170 100644
ec80f5
--- a/drivers/gpu/drm/i915/i915_vma.c
ec80f5
+++ b/drivers/gpu/drm/i915/i915_vma.c
ec80f5
@@ -362,6 +362,9 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
ec80f5
 	return err;
ec80f5
 }
ec80f5
 
ec80f5
+/* CVE-2022-0330 - kpatch gathered definitions */
ec80f5
+#define I915_BO_WAS_BOUND_BIT	4
ec80f5
+
ec80f5
 /**
ec80f5
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
ec80f5
  * @vma: VMA to map
ec80f5
@@ -439,6 +442,9 @@ int i915_vma_bind(struct i915_vma *vma,
ec80f5
 		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
ec80f5
 	}
ec80f5
 
ec80f5
+	if (vma->obj)
ec80f5
+		set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
ec80f5
+
ec80f5
 	atomic_or(bind_flags, &vma->flags);
ec80f5
 	return 0;
ec80f5
 }
ec80f5
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
ec80f5
index 9ac501bcfdad..9eb5d9e8e5a8 100644
ec80f5
--- a/drivers/gpu/drm/i915/intel_uncore.c
ec80f5
+++ b/drivers/gpu/drm/i915/intel_uncore.c
ec80f5
@@ -694,7 +694,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
ec80f5
 }
ec80f5
 
ec80f5
 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
ec80f5
-					 enum forcewake_domains fw_domains)
ec80f5
+					 enum forcewake_domains fw_domains,
ec80f5
+					 bool delayed)
ec80f5
 {
ec80f5
 	struct intel_uncore_forcewake_domain *domain;
ec80f5
 	unsigned int tmp;
ec80f5
@@ -709,7 +710,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
ec80f5
 			continue;
ec80f5
 		}
ec80f5
 
ec80f5
-		uncore->funcs.force_wake_put(uncore, domain->mask);
ec80f5
+		if (delayed &&
ec80f5
+		    !(domain->uncore->fw_domains_timer & domain->mask))
ec80f5
+			fw_domain_arm_timer(domain);
ec80f5
+		else
ec80f5
+			uncore->funcs.force_wake_put(uncore, domain->mask);
ec80f5
 	}
ec80f5
 }
ec80f5
 
ec80f5
@@ -730,7 +735,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
ec80f5
 		return;
ec80f5
 
ec80f5
 	spin_lock_irqsave(&uncore->lock, irqflags);
ec80f5
-	__intel_uncore_forcewake_put(uncore, fw_domains);
ec80f5
+	__intel_uncore_forcewake_put(uncore, fw_domains, false);
ec80f5
+	spin_unlock_irqrestore(&uncore->lock, irqflags);
ec80f5
+}
ec80f5
+
ec80f5
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
ec80f5
+					enum forcewake_domains fw_domains)
ec80f5
+{
ec80f5
+	unsigned long irqflags;
ec80f5
+
ec80f5
+	if (!uncore->funcs.force_wake_put)
ec80f5
+		return;
ec80f5
+
ec80f5
+	spin_lock_irqsave(&uncore->lock, irqflags);
ec80f5
+	__intel_uncore_forcewake_put(uncore, fw_domains, true);
ec80f5
 	spin_unlock_irqrestore(&uncore->lock, irqflags);
ec80f5
 }
ec80f5
 
ec80f5
@@ -772,7 +790,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
ec80f5
 	if (!uncore->funcs.force_wake_put)
ec80f5
 		return;
ec80f5
 
ec80f5
-	__intel_uncore_forcewake_put(uncore, fw_domains);
ec80f5
+	__intel_uncore_forcewake_put(uncore, fw_domains, false);
ec80f5
 }
ec80f5
 
ec80f5
 void assert_forcewakes_inactive(struct intel_uncore *uncore)
ec80f5
-- 
ec80f5
2.34.1
ec80f5
ec80f5