From f659bd462a6b605c36a89fa205b41bf8f55c41be Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Fri, 17 Dec 2021 12:14:51 -0600
Subject: [PATCH] fix(lvm): restore setting LVM_MD_PV_ACTIVATED
The 69-dm-lvm-metad.rules udev rule has been removed from
the initrd, because it's been dropped by recent upstream
lvm versions, and it never performed any primary function
within the initrd. But, it did have the job of setting
LVM_MD_PV_ACTIVATED=1 for active md devices used by PVs.
That step needs to be restored, and is now included in
64-lvm.rules.
(cherry picked from commit 164e5ebb1199ea3e3d641ce402d8257f0055a529)
Resolves: #2037955
---
modules.d/90lvm/64-lvm.rules | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/modules.d/90lvm/64-lvm.rules b/modules.d/90lvm/64-lvm.rules
index ca718ce0..1ad49111 100644
--- a/modules.d/90lvm/64-lvm.rules
+++ b/modules.d/90lvm/64-lvm.rules
@@ -6,6 +6,14 @@
SUBSYSTEM!="block", GOTO="lvm_end"
ACTION!="add|change", GOTO="lvm_end"
+
+# If the md device is active (indicated by array_state), then set the flag
+# LVM_MD_PV_ACTIVATED=1 indicating that the md device for the PV is ready
+# to be used. The lvm udev rule running in root will check that this flag
+# is set before it will process the md device (it wants to avoid
+# processing an md device that exists but is not yet ready to be used.)
+KERNEL=="md[0-9]*", ACTION=="change", ENV{ID_FS_TYPE}=="LVM2_member", ENV{LVM_MD_PV_ACTIVATED}!="1", TEST=="md/array_state", ENV{LVM_MD_PV_ACTIVATED}="1"
+
# Also don't process disks that are slated to be a multipath device
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end"
KERNEL=="dm-[0-9]*", ACTION=="add", GOTO="lvm_end"