neil / rpms / python-blivet

Forked from rpms/python-blivet a year ago
Clone

Blame SOURCES/0022-Add-support-for-creating-LVM-cache-pools.patch

d891b6
From 08f0e12c74e4c2ba25629fe92108283dd5ae3ff3 Mon Sep 17 00:00:00 2001
d891b6
From: Vojtech Trefny <vtrefny@redhat.com>
d891b6
Date: Thu, 30 Dec 2021 16:08:43 +0100
d891b6
Subject: [PATCH 1/4] Add support for creating LVM cache pools
d891b6
d891b6
Resolves: rhbz#2055200
d891b6
---
d891b6
 blivet/blivet.py               |   9 +-
d891b6
 blivet/devicelibs/lvm.py       |   9 ++
d891b6
 blivet/devices/lvm.py          | 160 +++++++++++++++++++++++++++++++--
d891b6
 tests/devices_test/lvm_test.py |  26 ++++++
d891b6
 4 files changed, 196 insertions(+), 8 deletions(-)
d891b6
d891b6
diff --git a/blivet/blivet.py b/blivet/blivet.py
d891b6
index c6908eb0..d29fadd0 100644
d891b6
--- a/blivet/blivet.py
d891b6
+++ b/blivet/blivet.py
d891b6
@@ -576,6 +576,8 @@ class Blivet(object):
d891b6
             :type vdo_pool: bool
d891b6
             :keyword vdo_lv: whether to create a vdo lv
d891b6
             :type vdo_lv: bool
d891b6
+            :keyword cache_pool: whether to create a cache pool
d891b6
+            :type cache_pool: bool
d891b6
             :returns: the new device
d891b6
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
d891b6
 
d891b6
@@ -594,6 +596,7 @@ class Blivet(object):
d891b6
         thin_pool = kwargs.pop("thin_pool", False)
d891b6
         vdo_pool = kwargs.pop("vdo_pool", False)
d891b6
         vdo_lv = kwargs.pop("vdo_lv", False)
d891b6
+        cache_pool = kwargs.pop("cache_pool", False)
d891b6
         parent = kwargs.get("parents", [None])[0]
d891b6
         if (thin_volume or vdo_lv) and parent:
d891b6
             # kwargs["parents"] will contain the pool device, so...
d891b6
@@ -609,6 +612,8 @@ class Blivet(object):
d891b6
             kwargs["seg_type"] = "vdo-pool"
d891b6
         if vdo_lv:
d891b6
             kwargs["seg_type"] = "vdo"
d891b6
+        if cache_pool:
d891b6
+            kwargs["seg_type"] = "cache-pool"
d891b6
 
d891b6
         mountpoint = kwargs.pop("mountpoint", None)
d891b6
         if 'fmt_type' in kwargs:
d891b6
@@ -640,7 +645,7 @@ class Blivet(object):
d891b6
                 swap = False
d891b6
 
d891b6
             prefix = ""
d891b6
-            if thin_pool or vdo_pool:
d891b6
+            if thin_pool or vdo_pool or cache_pool:
d891b6
                 prefix = "pool"
d891b6
 
d891b6
             name = self.suggest_device_name(parent=vg,
d891b6
@@ -651,7 +656,7 @@ class Blivet(object):
d891b6
         if "%s-%s" % (vg.name, name) in self.names:
d891b6
             raise ValueError("name '%s' is already in use" % name)
d891b6
 
d891b6
-        if thin_pool or thin_volume or vdo_pool or vdo_lv:
d891b6
+        if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
d891b6
             cache_req = kwargs.pop("cache_request", None)
d891b6
             if cache_req:
d891b6
                 raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
d891b6
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
d891b6
index bbde6303..23935009 100644
d891b6
--- a/blivet/devicelibs/lvm.py
d891b6
+++ b/blivet/devicelibs/lvm.py
d891b6
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
d891b6
 LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
d891b6
 LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B")  # 15.88 TiB
d891b6
 
d891b6
+# cache constants
d891b6
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
d891b6
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
d891b6
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
d891b6
+
d891b6
 raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
d891b6
 raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
d891b6
 
d891b6
@@ -248,3 +253,7 @@ def recommend_thpool_chunk_size(thpool_size):
d891b6
     # for every ~15.88 TiB of thinpool data size
d891b6
     return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
d891b6
                LVM_THINP_MAX_CHUNK_SIZE)
d891b6
+
d891b6
+
d891b6
+def is_valid_cache_md_size(md_size):
d891b6
+    return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
d891b6
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
d891b6
index a971da8e..7cb482ab 100644
d891b6
--- a/blivet/devices/lvm.py
d891b6
+++ b/blivet/devices/lvm.py
d891b6
@@ -43,6 +43,7 @@ from .. import util
d891b6
 from ..storage_log import log_method_call
d891b6
 from .. import udev
d891b6
 from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
d891b6
+from ..static_data.lvm_info import lvs_info
d891b6
 from ..tasks import availability
d891b6
 
d891b6
 import logging
d891b6
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
d891b6
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
d891b6
 
d891b6
         if not exists:
d891b6
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
d891b6
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
d891b6
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
d891b6
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
d891b6
                 raise ValueError("List of PVs has to be given for every non-linear LV")
d891b6
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
d891b6
             # we reserve space for it
d891b6
             self._metadata_size = self.vg.pe_size
d891b6
             self._size -= self._metadata_size
d891b6
-        elif self.seg_type == "thin-pool":
d891b6
-            # LVMThinPoolMixin sets self._metadata_size on its own
d891b6
+        elif self.seg_type in ("thin-pool", "cache_pool"):
d891b6
+            # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
d891b6
             if not self.exists and not from_lvs and not grow:
d891b6
                 # a thin pool we are not going to grow -> lets calculate metadata
d891b6
                 # size now if not given explicitly
d891b6
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
d891b6
         """ A list of this pool's LVs """
d891b6
         return self._lvs[:]     # we don't want folks changing our list
d891b6
 
d891b6
-    @util.requires_property("is_thin_pool")
d891b6
     def autoset_md_size(self, enforced=False):
d891b6
         """ If self._metadata_size not set already, it calculates the recommended value
d891b6
         and sets it while subtracting the size from self.size.
d891b6
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
d891b6
             self.pool._add_log_vol(self)
d891b6
 
d891b6
 
d891b6
+class LVMCachePoolMixin(object):
d891b6
+    def __init__(self, metadata_size, cache_mode=None):
d891b6
+        self._metadata_size = metadata_size or Size(0)
d891b6
+        self._cache_mode = cache_mode
d891b6
+
d891b6
+    def _init_check(self):
d891b6
+        if not self.is_cache_pool:
d891b6
+            return
d891b6
+
d891b6
+        if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
d891b6
+            raise ValueError("invalid metadatasize value")
d891b6
+
d891b6
+        if not self.exists and not self._pv_specs:
d891b6
+            raise ValueError("at least one fast PV must be specified to create a cache pool")
d891b6
+
d891b6
+    def _check_from_lvs(self):
d891b6
+        if self._from_lvs:
d891b6
+            if len(self._from_lvs) != 2:
d891b6
+                raise errors.DeviceError("two LVs required to create a cache pool")
d891b6
+
d891b6
+    def _convert_from_lvs(self):
d891b6
+        data_lv, metadata_lv = self._from_lvs
d891b6
+
d891b6
+        data_lv.parent_lv = self  # also adds the LV to self._internal_lvs
d891b6
+        data_lv.int_lv_type = LVMInternalLVtype.data
d891b6
+        metadata_lv.parent_lv = self
d891b6
+        metadata_lv.int_lv_type = LVMInternalLVtype.meta
d891b6
+
d891b6
+        self.size = data_lv.size
d891b6
+
d891b6
+    @property
d891b6
+    def is_cache_pool(self):
d891b6
+        return self.seg_type == "cache-pool"
d891b6
+
d891b6
+    @property
d891b6
+    def profile(self):
d891b6
+        return self._profile
d891b6
+
d891b6
+    @property
d891b6
+    def type(self):
d891b6
+        return "lvmcachepool"
d891b6
+
d891b6
+    @property
d891b6
+    def resizable(self):
d891b6
+        return False
d891b6
+
d891b6
+    def read_current_size(self):
d891b6
+        log_method_call(self, exists=self.exists, path=self.path,
d891b6
+                        sysfs_path=self.sysfs_path)
d891b6
+        if self.size != Size(0):
d891b6
+            return self.size
d891b6
+
d891b6
+        if self.exists:
d891b6
+            # cache pools are not active and don't have th device mapper mapping
d891b6
+            # so we can't get this from sysfs
d891b6
+            lv_info = lvs_info.cache.get(self.name)
d891b6
+            if lv_info is None:
d891b6
+                log.error("Failed to get size for existing cache pool '%s'", self.name)
d891b6
+                return Size(0)
d891b6
+            else:
d891b6
+                return Size(lv_info.size)
d891b6
+
d891b6
+        return Size(0)
d891b6
+
d891b6
+    def autoset_md_size(self, enforced=False):
d891b6
+        """ If self._metadata_size not set already, it calculates the recommended value
d891b6
+        and sets it while subtracting the size from self.size.
d891b6
+
d891b6
+        """
d891b6
+
d891b6
+        log.debug("Auto-setting cache pool metadata size")
d891b6
+
d891b6
+        if self._size <= Size(0):
d891b6
+            log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
d891b6
+            self._metadata_size = 0
d891b6
+            return
d891b6
+
d891b6
+        old_md_size = self._metadata_size
d891b6
+        if self._metadata_size == 0 or enforced:
d891b6
+            self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
d891b6
+            log.debug("Using recommended metadata size: %s", self._metadata_size)
d891b6
+
d891b6
+        self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
d891b6
+        log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
d891b6
+
d891b6
+        if self._metadata_size == old_md_size:
d891b6
+            log.debug("Rounded metadata size unchanged")
d891b6
+        else:
d891b6
+            new_size = self.size - (self._metadata_size - old_md_size)
d891b6
+            log.debug("Adjusting size from %s MiB to %s MiB",
d891b6
+                      self.size.convert_to("MiB"), new_size.convert_to("MiB"))
d891b6
+            self.size = new_size
d891b6
+
d891b6
+    def _pre_create(self):
d891b6
+        # make sure all the LVs this LV should be created from exist (if any)
d891b6
+        if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
d891b6
+            raise errors.DeviceError("Component LVs need to be created first")
d891b6
+
d891b6
+    def _create(self):
d891b6
+        """ Create the device. """
d891b6
+        log_method_call(self, self.name, status=self.status)
d891b6
+        if self._cache_mode:
d891b6
+            try:
d891b6
+                cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
d891b6
+            except blockdev.LVMError as e:
d891b6
+                raise errors.DeviceError from e
d891b6
+        else:
d891b6
+            cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
d891b6
+
d891b6
+        if self._from_lvs:
d891b6
+            extra = dict()
d891b6
+            if self.mode:
d891b6
+                # we need the string here, it will be passed directly to he lvm command
d891b6
+                extra["cachemode"] = self._cache_mode
d891b6
+            data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
d891b6
+            meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
d891b6
+            blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
d891b6
+        else:
d891b6
+            blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
d891b6
+                                           self.metadata_size,
d891b6
+                                           cache_mode,
d891b6
+                                           0,
d891b6
+                                           [spec.pv.path for spec in self._pv_specs])
d891b6
+
d891b6
+    def dracut_setup_args(self):
d891b6
+        return set()
d891b6
+
d891b6
+    @property
d891b6
+    def direct(self):
d891b6
+        """ Is this device directly accessible? """
d891b6
+        return False
d891b6
+
d891b6
+
d891b6
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
d891b6
                              LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
d891b6
-                             LVMVDOLogicalVolumeMixin):
d891b6
+                             LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
d891b6
     """ An LVM Logical Volume """
d891b6
 
d891b6
     # generally resizable, see :property:`resizable` for details
d891b6
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
d891b6
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
d891b6
                  compression=False, deduplication=False, index_memory=0,
d891b6
-                 write_policy=None):
d891b6
+                 write_policy=None, cache_mode=None):
d891b6
         """
d891b6
             :param name: the device name (generally a device node's basename)
d891b6
             :type name: str
d891b6
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
             :keyword write_policy: write policy for the volume or None for default
d891b6
             :type write_policy: str
d891b6
 
d891b6
+            For cache pools only:
d891b6
+
d891b6
+            :keyword metadata_size: the size of the metadata LV
d891b6
+            :type metadata_size: :class:`~.size.Size`
d891b6
+            :keyword cache_mode: mode for the cache or None for default (writethrough)
d891b6
+            :type cache_mode: str
d891b6
+
d891b6
         """
d891b6
 
d891b6
         if isinstance(parents, (list, ParentList)):
d891b6
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
         LVMSnapshotMixin.__init__(self, origin, vorigin)
d891b6
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
d891b6
         LVMThinLogicalVolumeMixin.__init__(self)
d891b6
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
d891b6
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
d891b6
                                       fmt, exists, sysfs_path, grow, maxsize,
d891b6
                                       percent, cache_request, pvs, from_lvs)
d891b6
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
         LVMSnapshotMixin._init_check(self)
d891b6
         LVMThinPoolMixin._init_check(self)
d891b6
         LVMThinLogicalVolumeMixin._init_check(self)
d891b6
+        LVMCachePoolMixin._init_check(self)
d891b6
 
d891b6
         if self._from_lvs:
d891b6
             self._check_from_lvs()
d891b6
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
             ret.append(LVMVDOPoolMixin)
d891b6
         if self.is_vdo_lv:
d891b6
             ret.append(LVMVDOLogicalVolumeMixin)
d891b6
+        if self.is_cache_pool:
d891b6
+            ret.append(LVMCachePoolMixin)
d891b6
         return ret
d891b6
 
d891b6
     def _try_specific_call(self, name, *args, **kwargs):
d891b6
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
 
d891b6
         return True
d891b6
 
d891b6
+    @type_specific
d891b6
+    def autoset_md_size(self, enforced=False):
d891b6
+        pass
d891b6
+
d891b6
     def attach_cache(self, cache_pool_lv):
d891b6
         if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
d891b6
             raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
d891b6
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
d891b6
index 59c027da..0105bcae 100644
d891b6
--- a/tests/devices_test/lvm_test.py
d891b6
+++ b/tests/devices_test/lvm_test.py
d891b6
@@ -868,3 +868,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
d891b6
 
d891b6
                 vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
d891b6
                 self.assertFalse(vdo_supported)
d891b6
+
d891b6
+
d891b6
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
d891b6
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
d891b6
+
d891b6
+    def test_new_cache_pool(self):
d891b6
+        b = blivet.Blivet()
d891b6
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
d891b6
+                           size=Size("10 GiB"), exists=True)
d891b6
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
d891b6
+
d891b6
+        for dev in (pv, vg):
d891b6
+            b.devicetree._add_device(dev)
d891b6
+
d891b6
+        # check that all the above devices are in the expected places
d891b6
+        self.assertEqual(set(b.devices), {pv, vg})
d891b6
+        self.assertEqual(set(b.vgs), {vg})
d891b6
+
d891b6
+        self.assertEqual(vg.size, Size("10236 MiB"))
d891b6
+
d891b6
+        cachepool = b.new_lv(name="cachepool", cache_pool=True,
d891b6
+                             parents=[vg], pvs=[pv])
d891b6
+
d891b6
+        b.create_device(cachepool)
d891b6
+
d891b6
+        self.assertEqual(cachepool.type, "lvmcachepool")
d891b6
-- 
d891b6
2.34.3
d891b6
d891b6
d891b6
From bfb0e71a92f46baae098370207640962c97d8e77 Mon Sep 17 00:00:00 2001
d891b6
From: Vojtech Trefny <vtrefny@redhat.com>
d891b6
Date: Thu, 30 Dec 2021 16:09:04 +0100
d891b6
Subject: [PATCH 2/4] examples: Add LVM cache pool example
d891b6
d891b6
Related: rhbz#2055200
d891b6
---
d891b6
 examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
d891b6
 1 file changed, 59 insertions(+)
d891b6
 create mode 100644 examples/lvm_cachepool.py
d891b6
d891b6
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
d891b6
new file mode 100644
d891b6
index 00000000..ab2e8a72
d891b6
--- /dev/null
d891b6
+++ b/examples/lvm_cachepool.py
d891b6
@@ -0,0 +1,59 @@
d891b6
+import os
d891b6
+
d891b6
+import blivet
d891b6
+from blivet.size import Size
d891b6
+from blivet.util import set_up_logging, create_sparse_tempfile
d891b6
+
d891b6
+
d891b6
+set_up_logging()
d891b6
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
d891b6
+
d891b6
+# create a disk image file on which to create new devices
d891b6
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
d891b6
+b.disk_images["disk1"] = disk1_file
d891b6
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
d891b6
+b.disk_images["disk2"] = disk2_file
d891b6
+
d891b6
+b.reset()
d891b6
+
d891b6
+try:
d891b6
+    disk1 = b.devicetree.get_device_by_name("disk1")
d891b6
+    disk2 = b.devicetree.get_device_by_name("disk2")
d891b6
+
d891b6
+    b.initialize_disk(disk1)
d891b6
+    b.initialize_disk(disk2)
d891b6
+
d891b6
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
d891b6
+    b.create_device(pv)
d891b6
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
d891b6
+    b.create_device(pv2)
d891b6
+
d891b6
+    # allocate the partitions (decide where and on which disks they'll reside)
d891b6
+    blivet.partitioning.do_partitioning(b)
d891b6
+
d891b6
+    vg = b.new_vg(parents=[pv, pv2])
d891b6
+    b.create_device(vg)
d891b6
+
d891b6
+    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
d891b6
+    lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
d891b6
+    b.create_device(lv)
d891b6
+
d891b6
+    # new cache pool
d891b6
+    cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
d891b6
+    b.create_device(cpool)
d891b6
+
d891b6
+    # write the new partitions to disk and format them as specified
d891b6
+    b.do_it()
d891b6
+    print(b.devicetree)
d891b6
+
d891b6
+    # attach the newly created cache pool to the "slow" LV
d891b6
+    lv.attach_cache(cpool)
d891b6
+
d891b6
+    b.reset()
d891b6
+    print(b.devicetree)
d891b6
+
d891b6
+    input("Check the state and hit ENTER to trigger cleanup")
d891b6
+finally:
d891b6
+    b.devicetree.teardown_disk_images()
d891b6
+    os.unlink(disk1_file)
d891b6
+    os.unlink(disk2_file)
d891b6
-- 
d891b6
2.34.3
d891b6
d891b6
d891b6
From 1fece0e7f15f7b0d457d3db876d23c3272df09bd Mon Sep 17 00:00:00 2001
d891b6
From: Vojtech Trefny <vtrefny@redhat.com>
d891b6
Date: Thu, 30 Dec 2021 16:13:33 +0100
d891b6
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
d891b6
 active
d891b6
d891b6
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
d891b6
d891b6
Related: rhbz#2055200
d891b6
---
d891b6
 blivet/devices/lvm.py | 9 ++-------
d891b6
 1 file changed, 2 insertions(+), 7 deletions(-)
d891b6
d891b6
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
d891b6
index 7cb482ab..12d3d073 100644
d891b6
--- a/blivet/devices/lvm.py
d891b6
+++ b/blivet/devices/lvm.py
d891b6
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
d891b6
 
d891b6
         # special handling for incomplete VGs
d891b6
         if not self.complete:
d891b6
-            try:
d891b6
-                lvs_info = blockdev.lvm.lvs(vg_name=self.name)
d891b6
-            except blockdev.LVMError:
d891b6
-                lvs_info = []
d891b6
-
d891b6
-            for lv_info in lvs_info:
d891b6
-                if lv_info.attr and lv_info.attr[4] == 'a':
d891b6
+            for lv_info in lvs_info.cache.values():
d891b6
+                if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
d891b6
                     return True
d891b6
 
d891b6
             return False
d891b6
-- 
d891b6
2.34.3
d891b6
d891b6
d891b6
From 8d957f04c2d5f56386b978d1bf890450f38ad108 Mon Sep 17 00:00:00 2001
d891b6
From: Vojtech Trefny <vtrefny@redhat.com>
d891b6
Date: Mon, 30 May 2022 17:02:43 +0200
d891b6
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
d891b6
 existing LV
d891b6
d891b6
Because we do not have action for attaching the cache pool, we
d891b6
cannot schedule both adding the fast PV to the VG and attaching
d891b6
the cache pool to existing LV. This hack allows to schedule the
d891b6
attach to happen after the cache pool is created.
d891b6
d891b6
Related: rhbz#2055200
d891b6
---
d891b6
 blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
d891b6
 1 file changed, 35 insertions(+), 3 deletions(-)
d891b6
d891b6
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
d891b6
index 12d3d073..feb92f2e 100644
d891b6
--- a/blivet/devices/lvm.py
d891b6
+++ b/blivet/devices/lvm.py
d891b6
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
d891b6
 
d891b6
 
d891b6
 class LVMCachePoolMixin(object):
d891b6
-    def __init__(self, metadata_size, cache_mode=None):
d891b6
+    def __init__(self, metadata_size, cache_mode=None, attach_to=None):
d891b6
         self._metadata_size = metadata_size or Size(0)
d891b6
         self._cache_mode = cache_mode
d891b6
+        self._attach_to = attach_to
d891b6
 
d891b6
     def _init_check(self):
d891b6
         if not self.is_cache_pool:
d891b6
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
d891b6
         if not self.exists and not self._pv_specs:
d891b6
             raise ValueError("at least one fast PV must be specified to create a cache pool")
d891b6
 
d891b6
+        if self._attach_to and not self._attach_to.exists:
d891b6
+            raise ValueError("cache pool can be attached only to an existing LV")
d891b6
+
d891b6
     def _check_from_lvs(self):
d891b6
         if self._from_lvs:
d891b6
             if len(self._from_lvs) != 2:
d891b6
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
d891b6
                                            cache_mode,
d891b6
                                            0,
d891b6
                                            [spec.pv.path for spec in self._pv_specs])
d891b6
+        if self._attach_to:
d891b6
+            self._attach_to.attach_cache(self)
d891b6
+
d891b6
+    def _post_create(self):
d891b6
+        if self._attach_to:
d891b6
+            # post_create tries to activate the LV and after attaching it no longer exists
d891b6
+            return
d891b6
+
d891b6
+        # pylint: disable=bad-super-call
d891b6
+        super(LVMLogicalVolumeBase, self)._post_create()
d891b6
+
d891b6
+    def add_hook(self, new=True):
d891b6
+        if self._attach_to:
d891b6
+            self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
d891b6
+                                              pvs=self._pv_specs, mode=self._cache_mode)
d891b6
+
d891b6
+        # pylint: disable=bad-super-call
d891b6
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
d891b6
+
d891b6
+    def remove_hook(self, modparent=True):
d891b6
+        if self._attach_to:
d891b6
+            self._attach_to._cache = None
d891b6
+
d891b6
+        # pylint: disable=bad-super-call
d891b6
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
d891b6
 
d891b6
     def dracut_setup_args(self):
d891b6
         return set()
d891b6
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
d891b6
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
d891b6
                  compression=False, deduplication=False, index_memory=0,
d891b6
-                 write_policy=None, cache_mode=None):
d891b6
+                 write_policy=None, cache_mode=None, attach_to=None):
d891b6
         """
d891b6
             :param name: the device name (generally a device node's basename)
d891b6
             :type name: str
d891b6
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
             :type metadata_size: :class:`~.size.Size`
d891b6
             :keyword cache_mode: mode for the cache or None for default (writethrough)
d891b6
             :type cache_mode: str
d891b6
+            :keyword attach_to: for non-existing cache pools a logical volume the pool should
d891b6
+                                be attached to when created
d891b6
+            :type attach_to: :class:`LVMLogicalVolumeDevice`
d891b6
 
d891b6
         """
d891b6
 
d891b6
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
d891b6
         LVMSnapshotMixin.__init__(self, origin, vorigin)
d891b6
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
d891b6
         LVMThinLogicalVolumeMixin.__init__(self)
d891b6
-        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
d891b6
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
d891b6
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
d891b6
                                       fmt, exists, sysfs_path, grow, maxsize,
d891b6
                                       percent, cache_request, pvs, from_lvs)
d891b6
-- 
d891b6
2.34.3
d891b6