1c4b60
From 08f0e12c74e4c2ba25629fe92108283dd5ae3ff3 Mon Sep 17 00:00:00 2001
1c4b60
From: Vojtech Trefny <vtrefny@redhat.com>
1c4b60
Date: Thu, 30 Dec 2021 16:08:43 +0100
1c4b60
Subject: [PATCH 1/4] Add support for creating LVM cache pools
1c4b60
1c4b60
Resolves: rhbz#2055200
1c4b60
---
1c4b60
 blivet/blivet.py               |   9 +-
1c4b60
 blivet/devicelibs/lvm.py       |   9 ++
1c4b60
 blivet/devices/lvm.py          | 160 +++++++++++++++++++++++++++++++--
1c4b60
 tests/devices_test/lvm_test.py |  26 ++++++
1c4b60
 4 files changed, 196 insertions(+), 8 deletions(-)
1c4b60
1c4b60
diff --git a/blivet/blivet.py b/blivet/blivet.py
1c4b60
index c6908eb0..d29fadd0 100644
1c4b60
--- a/blivet/blivet.py
1c4b60
+++ b/blivet/blivet.py
1c4b60
@@ -576,6 +576,8 @@ class Blivet(object):
1c4b60
             :type vdo_pool: bool
1c4b60
             :keyword vdo_lv: whether to create a vdo lv
1c4b60
             :type vdo_lv: bool
1c4b60
+            :keyword cache_pool: whether to create a cache pool
1c4b60
+            :type cache_pool: bool
1c4b60
             :returns: the new device
1c4b60
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
1c4b60
 
1c4b60
@@ -594,6 +596,7 @@ class Blivet(object):
1c4b60
         thin_pool = kwargs.pop("thin_pool", False)
1c4b60
         vdo_pool = kwargs.pop("vdo_pool", False)
1c4b60
         vdo_lv = kwargs.pop("vdo_lv", False)
1c4b60
+        cache_pool = kwargs.pop("cache_pool", False)
1c4b60
         parent = kwargs.get("parents", [None])[0]
1c4b60
         if (thin_volume or vdo_lv) and parent:
1c4b60
             # kwargs["parents"] will contain the pool device, so...
1c4b60
@@ -609,6 +612,8 @@ class Blivet(object):
1c4b60
             kwargs["seg_type"] = "vdo-pool"
1c4b60
         if vdo_lv:
1c4b60
             kwargs["seg_type"] = "vdo"
1c4b60
+        if cache_pool:
1c4b60
+            kwargs["seg_type"] = "cache-pool"
1c4b60
 
1c4b60
         mountpoint = kwargs.pop("mountpoint", None)
1c4b60
         if 'fmt_type' in kwargs:
1c4b60
@@ -640,7 +645,7 @@ class Blivet(object):
1c4b60
                 swap = False
1c4b60
 
1c4b60
             prefix = ""
1c4b60
-            if thin_pool or vdo_pool:
1c4b60
+            if thin_pool or vdo_pool or cache_pool:
1c4b60
                 prefix = "pool"
1c4b60
 
1c4b60
             name = self.suggest_device_name(parent=vg,
1c4b60
@@ -651,7 +656,7 @@ class Blivet(object):
1c4b60
         if "%s-%s" % (vg.name, name) in self.names:
1c4b60
             raise ValueError("name '%s' is already in use" % name)
1c4b60
 
1c4b60
-        if thin_pool or thin_volume or vdo_pool or vdo_lv:
1c4b60
+        if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
1c4b60
             cache_req = kwargs.pop("cache_request", None)
1c4b60
             if cache_req:
1c4b60
                 raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
1c4b60
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
1c4b60
index bbde6303..23935009 100644
1c4b60
--- a/blivet/devicelibs/lvm.py
1c4b60
+++ b/blivet/devicelibs/lvm.py
1c4b60
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
1c4b60
 LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
1c4b60
 LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B")  # 15.88 TiB
1c4b60
 
1c4b60
+# cache constants
1c4b60
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
1c4b60
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
1c4b60
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
1c4b60
+
1c4b60
 raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
1c4b60
 raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
1c4b60
 
1c4b60
@@ -248,3 +253,7 @@ def recommend_thpool_chunk_size(thpool_size):
1c4b60
     # for every ~15.88 TiB of thinpool data size
1c4b60
     return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
1c4b60
                LVM_THINP_MAX_CHUNK_SIZE)
1c4b60
+
1c4b60
+
1c4b60
+def is_valid_cache_md_size(md_size):
1c4b60
+    return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
1c4b60
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
1c4b60
index a971da8e..7cb482ab 100644
1c4b60
--- a/blivet/devices/lvm.py
1c4b60
+++ b/blivet/devices/lvm.py
1c4b60
@@ -43,6 +43,7 @@ from .. import util
1c4b60
 from ..storage_log import log_method_call
1c4b60
 from .. import udev
1c4b60
 from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
1c4b60
+from ..static_data.lvm_info import lvs_info
1c4b60
 from ..tasks import availability
1c4b60
 
1c4b60
 import logging
1c4b60
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
1c4b60
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
1c4b60
 
1c4b60
         if not exists:
1c4b60
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
1c4b60
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
1c4b60
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
1c4b60
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
1c4b60
                 raise ValueError("List of PVs has to be given for every non-linear LV")
1c4b60
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
1c4b60
             # we reserve space for it
1c4b60
             self._metadata_size = self.vg.pe_size
1c4b60
             self._size -= self._metadata_size
1c4b60
-        elif self.seg_type == "thin-pool":
1c4b60
-            # LVMThinPoolMixin sets self._metadata_size on its own
1c4b60
+        elif self.seg_type in ("thin-pool", "cache_pool"):
1c4b60
+            # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
1c4b60
             if not self.exists and not from_lvs and not grow:
1c4b60
                 # a thin pool we are not going to grow -> lets calculate metadata
1c4b60
                 # size now if not given explicitly
1c4b60
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
1c4b60
         """ A list of this pool's LVs """
1c4b60
         return self._lvs[:]     # we don't want folks changing our list
1c4b60
 
1c4b60
-    @util.requires_property("is_thin_pool")
1c4b60
     def autoset_md_size(self, enforced=False):
1c4b60
         """ If self._metadata_size not set already, it calculates the recommended value
1c4b60
         and sets it while subtracting the size from self.size.
1c4b60
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
1c4b60
             self.pool._add_log_vol(self)
1c4b60
 
1c4b60
 
1c4b60
+class LVMCachePoolMixin(object):
1c4b60
+    def __init__(self, metadata_size, cache_mode=None):
1c4b60
+        self._metadata_size = metadata_size or Size(0)
1c4b60
+        self._cache_mode = cache_mode
1c4b60
+
1c4b60
+    def _init_check(self):
1c4b60
+        if not self.is_cache_pool:
1c4b60
+            return
1c4b60
+
1c4b60
+        if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
1c4b60
+            raise ValueError("invalid metadatasize value")
1c4b60
+
1c4b60
+        if not self.exists and not self._pv_specs:
1c4b60
+            raise ValueError("at least one fast PV must be specified to create a cache pool")
1c4b60
+
1c4b60
+    def _check_from_lvs(self):
1c4b60
+        if self._from_lvs:
1c4b60
+            if len(self._from_lvs) != 2:
1c4b60
+                raise errors.DeviceError("two LVs required to create a cache pool")
1c4b60
+
1c4b60
+    def _convert_from_lvs(self):
1c4b60
+        data_lv, metadata_lv = self._from_lvs
1c4b60
+
1c4b60
+        data_lv.parent_lv = self  # also adds the LV to self._internal_lvs
1c4b60
+        data_lv.int_lv_type = LVMInternalLVtype.data
1c4b60
+        metadata_lv.parent_lv = self
1c4b60
+        metadata_lv.int_lv_type = LVMInternalLVtype.meta
1c4b60
+
1c4b60
+        self.size = data_lv.size
1c4b60
+
1c4b60
+    @property
1c4b60
+    def is_cache_pool(self):
1c4b60
+        return self.seg_type == "cache-pool"
1c4b60
+
1c4b60
+    @property
1c4b60
+    def profile(self):
1c4b60
+        return self._profile
1c4b60
+
1c4b60
+    @property
1c4b60
+    def type(self):
1c4b60
+        return "lvmcachepool"
1c4b60
+
1c4b60
+    @property
1c4b60
+    def resizable(self):
1c4b60
+        return False
1c4b60
+
1c4b60
+    def read_current_size(self):
1c4b60
+        log_method_call(self, exists=self.exists, path=self.path,
1c4b60
+                        sysfs_path=self.sysfs_path)
1c4b60
+        if self.size != Size(0):
1c4b60
+            return self.size
1c4b60
+
1c4b60
+        if self.exists:
1c4b60
+            # cache pools are not active and don't have th device mapper mapping
1c4b60
+            # so we can't get this from sysfs
1c4b60
+            lv_info = lvs_info.cache.get(self.name)
1c4b60
+            if lv_info is None:
1c4b60
+                log.error("Failed to get size for existing cache pool '%s'", self.name)
1c4b60
+                return Size(0)
1c4b60
+            else:
1c4b60
+                return Size(lv_info.size)
1c4b60
+
1c4b60
+        return Size(0)
1c4b60
+
1c4b60
+    def autoset_md_size(self, enforced=False):
1c4b60
+        """ If self._metadata_size not set already, it calculates the recommended value
1c4b60
+        and sets it while subtracting the size from self.size.
1c4b60
+
1c4b60
+        """
1c4b60
+
1c4b60
+        log.debug("Auto-setting cache pool metadata size")
1c4b60
+
1c4b60
+        if self._size <= Size(0):
1c4b60
+            log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
1c4b60
+            self._metadata_size = 0
1c4b60
+            return
1c4b60
+
1c4b60
+        old_md_size = self._metadata_size
1c4b60
+        if self._metadata_size == 0 or enforced:
1c4b60
+            self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
1c4b60
+            log.debug("Using recommended metadata size: %s", self._metadata_size)
1c4b60
+
1c4b60
+        self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
1c4b60
+        log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
1c4b60
+
1c4b60
+        if self._metadata_size == old_md_size:
1c4b60
+            log.debug("Rounded metadata size unchanged")
1c4b60
+        else:
1c4b60
+            new_size = self.size - (self._metadata_size - old_md_size)
1c4b60
+            log.debug("Adjusting size from %s MiB to %s MiB",
1c4b60
+                      self.size.convert_to("MiB"), new_size.convert_to("MiB"))
1c4b60
+            self.size = new_size
1c4b60
+
1c4b60
+    def _pre_create(self):
1c4b60
+        # make sure all the LVs this LV should be created from exist (if any)
1c4b60
+        if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
1c4b60
+            raise errors.DeviceError("Component LVs need to be created first")
1c4b60
+
1c4b60
+    def _create(self):
1c4b60
+        """ Create the device. """
1c4b60
+        log_method_call(self, self.name, status=self.status)
1c4b60
+        if self._cache_mode:
1c4b60
+            try:
1c4b60
+                cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
1c4b60
+            except blockdev.LVMError as e:
1c4b60
+                raise errors.DeviceError from e
1c4b60
+        else:
1c4b60
+            cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
1c4b60
+
1c4b60
+        if self._from_lvs:
1c4b60
+            extra = dict()
1c4b60
+            if self.mode:
1c4b60
+                # we need the string here, it will be passed directly to he lvm command
1c4b60
+                extra["cachemode"] = self._cache_mode
1c4b60
+            data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
1c4b60
+            meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
1c4b60
+            blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
1c4b60
+        else:
1c4b60
+            blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
1c4b60
+                                           self.metadata_size,
1c4b60
+                                           cache_mode,
1c4b60
+                                           0,
1c4b60
+                                           [spec.pv.path for spec in self._pv_specs])
1c4b60
+
1c4b60
+    def dracut_setup_args(self):
1c4b60
+        return set()
1c4b60
+
1c4b60
+    @property
1c4b60
+    def direct(self):
1c4b60
+        """ Is this device directly accessible? """
1c4b60
+        return False
1c4b60
+
1c4b60
+
1c4b60
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
1c4b60
                              LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
1c4b60
-                             LVMVDOLogicalVolumeMixin):
1c4b60
+                             LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
1c4b60
     """ An LVM Logical Volume """
1c4b60
 
1c4b60
     # generally resizable, see :property:`resizable` for details
1c4b60
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
1c4b60
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
1c4b60
                  compression=False, deduplication=False, index_memory=0,
1c4b60
-                 write_policy=None):
1c4b60
+                 write_policy=None, cache_mode=None):
1c4b60
         """
1c4b60
             :param name: the device name (generally a device node's basename)
1c4b60
             :type name: str
1c4b60
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
             :keyword write_policy: write policy for the volume or None for default
1c4b60
             :type write_policy: str
1c4b60
 
1c4b60
+            For cache pools only:
1c4b60
+
1c4b60
+            :keyword metadata_size: the size of the metadata LV
1c4b60
+            :type metadata_size: :class:`~.size.Size`
1c4b60
+            :keyword cache_mode: mode for the cache or None for default (writethrough)
1c4b60
+            :type cache_mode: str
1c4b60
+
1c4b60
         """
1c4b60
 
1c4b60
         if isinstance(parents, (list, ParentList)):
1c4b60
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
         LVMSnapshotMixin.__init__(self, origin, vorigin)
1c4b60
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
1c4b60
         LVMThinLogicalVolumeMixin.__init__(self)
1c4b60
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
1c4b60
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
1c4b60
                                       fmt, exists, sysfs_path, grow, maxsize,
1c4b60
                                       percent, cache_request, pvs, from_lvs)
1c4b60
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
         LVMSnapshotMixin._init_check(self)
1c4b60
         LVMThinPoolMixin._init_check(self)
1c4b60
         LVMThinLogicalVolumeMixin._init_check(self)
1c4b60
+        LVMCachePoolMixin._init_check(self)
1c4b60
 
1c4b60
         if self._from_lvs:
1c4b60
             self._check_from_lvs()
1c4b60
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
             ret.append(LVMVDOPoolMixin)
1c4b60
         if self.is_vdo_lv:
1c4b60
             ret.append(LVMVDOLogicalVolumeMixin)
1c4b60
+        if self.is_cache_pool:
1c4b60
+            ret.append(LVMCachePoolMixin)
1c4b60
         return ret
1c4b60
 
1c4b60
     def _try_specific_call(self, name, *args, **kwargs):
1c4b60
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
 
1c4b60
         return True
1c4b60
 
1c4b60
+    @type_specific
1c4b60
+    def autoset_md_size(self, enforced=False):
1c4b60
+        pass
1c4b60
+
1c4b60
     def attach_cache(self, cache_pool_lv):
1c4b60
         if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
1c4b60
             raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
1c4b60
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
1c4b60
index 59c027da..0105bcae 100644
1c4b60
--- a/tests/devices_test/lvm_test.py
1c4b60
+++ b/tests/devices_test/lvm_test.py
1c4b60
@@ -868,3 +868,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
1c4b60
 
1c4b60
                 vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
1c4b60
                 self.assertFalse(vdo_supported)
1c4b60
+
1c4b60
+
1c4b60
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
1c4b60
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
1c4b60
+
1c4b60
+    def test_new_cache_pool(self):
1c4b60
+        b = blivet.Blivet()
1c4b60
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
1c4b60
+                           size=Size("10 GiB"), exists=True)
1c4b60
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
1c4b60
+
1c4b60
+        for dev in (pv, vg):
1c4b60
+            b.devicetree._add_device(dev)
1c4b60
+
1c4b60
+        # check that all the above devices are in the expected places
1c4b60
+        self.assertEqual(set(b.devices), {pv, vg})
1c4b60
+        self.assertEqual(set(b.vgs), {vg})
1c4b60
+
1c4b60
+        self.assertEqual(vg.size, Size("10236 MiB"))
1c4b60
+
1c4b60
+        cachepool = b.new_lv(name="cachepool", cache_pool=True,
1c4b60
+                             parents=[vg], pvs=[pv])
1c4b60
+
1c4b60
+        b.create_device(cachepool)
1c4b60
+
1c4b60
+        self.assertEqual(cachepool.type, "lvmcachepool")
1c4b60
-- 
1c4b60
2.34.3
1c4b60
1c4b60
1c4b60
From bfb0e71a92f46baae098370207640962c97d8e77 Mon Sep 17 00:00:00 2001
1c4b60
From: Vojtech Trefny <vtrefny@redhat.com>
1c4b60
Date: Thu, 30 Dec 2021 16:09:04 +0100
1c4b60
Subject: [PATCH 2/4] examples: Add LVM cache pool example
1c4b60
1c4b60
Related: rhbz#2055200
1c4b60
---
1c4b60
 examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
1c4b60
 1 file changed, 59 insertions(+)
1c4b60
 create mode 100644 examples/lvm_cachepool.py
1c4b60
1c4b60
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
1c4b60
new file mode 100644
1c4b60
index 00000000..ab2e8a72
1c4b60
--- /dev/null
1c4b60
+++ b/examples/lvm_cachepool.py
1c4b60
@@ -0,0 +1,59 @@
1c4b60
+import os
1c4b60
+
1c4b60
+import blivet
1c4b60
+from blivet.size import Size
1c4b60
+from blivet.util import set_up_logging, create_sparse_tempfile
1c4b60
+
1c4b60
+
1c4b60
+set_up_logging()
1c4b60
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
1c4b60
+
1c4b60
+# create a disk image file on which to create new devices
1c4b60
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
1c4b60
+b.disk_images["disk1"] = disk1_file
1c4b60
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
1c4b60
+b.disk_images["disk2"] = disk2_file
1c4b60
+
1c4b60
+b.reset()
1c4b60
+
1c4b60
+try:
1c4b60
+    disk1 = b.devicetree.get_device_by_name("disk1")
1c4b60
+    disk2 = b.devicetree.get_device_by_name("disk2")
1c4b60
+
1c4b60
+    b.initialize_disk(disk1)
1c4b60
+    b.initialize_disk(disk2)
1c4b60
+
1c4b60
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
1c4b60
+    b.create_device(pv)
1c4b60
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
1c4b60
+    b.create_device(pv2)
1c4b60
+
1c4b60
+    # allocate the partitions (decide where and on which disks they'll reside)
1c4b60
+    blivet.partitioning.do_partitioning(b)
1c4b60
+
1c4b60
+    vg = b.new_vg(parents=[pv, pv2])
1c4b60
+    b.create_device(vg)
1c4b60
+
1c4b60
+    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
1c4b60
+    lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
1c4b60
+    b.create_device(lv)
1c4b60
+
1c4b60
+    # new cache pool
1c4b60
+    cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
1c4b60
+    b.create_device(cpool)
1c4b60
+
1c4b60
+    # write the new partitions to disk and format them as specified
1c4b60
+    b.do_it()
1c4b60
+    print(b.devicetree)
1c4b60
+
1c4b60
+    # attach the newly created cache pool to the "slow" LV
1c4b60
+    lv.attach_cache(cpool)
1c4b60
+
1c4b60
+    b.reset()
1c4b60
+    print(b.devicetree)
1c4b60
+
1c4b60
+    input("Check the state and hit ENTER to trigger cleanup")
1c4b60
+finally:
1c4b60
+    b.devicetree.teardown_disk_images()
1c4b60
+    os.unlink(disk1_file)
1c4b60
+    os.unlink(disk2_file)
1c4b60
-- 
1c4b60
2.34.3
1c4b60
1c4b60
1c4b60
From 1fece0e7f15f7b0d457d3db876d23c3272df09bd Mon Sep 17 00:00:00 2001
1c4b60
From: Vojtech Trefny <vtrefny@redhat.com>
1c4b60
Date: Thu, 30 Dec 2021 16:13:33 +0100
1c4b60
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
1c4b60
 active
1c4b60
1c4b60
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
1c4b60
1c4b60
Related: rhbz#2055200
1c4b60
---
1c4b60
 blivet/devices/lvm.py | 9 ++-------
1c4b60
 1 file changed, 2 insertions(+), 7 deletions(-)
1c4b60
1c4b60
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
1c4b60
index 7cb482ab..12d3d073 100644
1c4b60
--- a/blivet/devices/lvm.py
1c4b60
+++ b/blivet/devices/lvm.py
1c4b60
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
1c4b60
 
1c4b60
         # special handling for incomplete VGs
1c4b60
         if not self.complete:
1c4b60
-            try:
1c4b60
-                lvs_info = blockdev.lvm.lvs(vg_name=self.name)
1c4b60
-            except blockdev.LVMError:
1c4b60
-                lvs_info = []
1c4b60
-
1c4b60
-            for lv_info in lvs_info:
1c4b60
-                if lv_info.attr and lv_info.attr[4] == 'a':
1c4b60
+            for lv_info in lvs_info.cache.values():
1c4b60
+                if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
1c4b60
                     return True
1c4b60
 
1c4b60
             return False
1c4b60
-- 
1c4b60
2.34.3
1c4b60
1c4b60
1c4b60
From 8d957f04c2d5f56386b978d1bf890450f38ad108 Mon Sep 17 00:00:00 2001
1c4b60
From: Vojtech Trefny <vtrefny@redhat.com>
1c4b60
Date: Mon, 30 May 2022 17:02:43 +0200
1c4b60
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
1c4b60
 existing LV
1c4b60
1c4b60
Because we do not have action for attaching the cache pool, we
1c4b60
cannot schedule both adding the fast PV to the VG and attaching
1c4b60
the cache pool to existing LV. This hack allows to schedule the
1c4b60
attach to happen after the cache pool is created.
1c4b60
1c4b60
Related: rhbz#2055200
1c4b60
---
1c4b60
 blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
1c4b60
 1 file changed, 35 insertions(+), 3 deletions(-)
1c4b60
1c4b60
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
1c4b60
index 12d3d073..feb92f2e 100644
1c4b60
--- a/blivet/devices/lvm.py
1c4b60
+++ b/blivet/devices/lvm.py
1c4b60
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
1c4b60
 
1c4b60
 
1c4b60
 class LVMCachePoolMixin(object):
1c4b60
-    def __init__(self, metadata_size, cache_mode=None):
1c4b60
+    def __init__(self, metadata_size, cache_mode=None, attach_to=None):
1c4b60
         self._metadata_size = metadata_size or Size(0)
1c4b60
         self._cache_mode = cache_mode
1c4b60
+        self._attach_to = attach_to
1c4b60
 
1c4b60
     def _init_check(self):
1c4b60
         if not self.is_cache_pool:
1c4b60
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
1c4b60
         if not self.exists and not self._pv_specs:
1c4b60
             raise ValueError("at least one fast PV must be specified to create a cache pool")
1c4b60
 
1c4b60
+        if self._attach_to and not self._attach_to.exists:
1c4b60
+            raise ValueError("cache pool can be attached only to an existing LV")
1c4b60
+
1c4b60
     def _check_from_lvs(self):
1c4b60
         if self._from_lvs:
1c4b60
             if len(self._from_lvs) != 2:
1c4b60
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
1c4b60
                                            cache_mode,
1c4b60
                                            0,
1c4b60
                                            [spec.pv.path for spec in self._pv_specs])
1c4b60
+        if self._attach_to:
1c4b60
+            self._attach_to.attach_cache(self)
1c4b60
+
1c4b60
+    def _post_create(self):
1c4b60
+        if self._attach_to:
1c4b60
+            # post_create tries to activate the LV and after attaching it no longer exists
1c4b60
+            return
1c4b60
+
1c4b60
+        # pylint: disable=bad-super-call
1c4b60
+        super(LVMLogicalVolumeBase, self)._post_create()
1c4b60
+
1c4b60
+    def add_hook(self, new=True):
1c4b60
+        if self._attach_to:
1c4b60
+            self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
1c4b60
+                                              pvs=self._pv_specs, mode=self._cache_mode)
1c4b60
+
1c4b60
+        # pylint: disable=bad-super-call
1c4b60
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
1c4b60
+
1c4b60
+    def remove_hook(self, modparent=True):
1c4b60
+        if self._attach_to:
1c4b60
+            self._attach_to._cache = None
1c4b60
+
1c4b60
+        # pylint: disable=bad-super-call
1c4b60
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
1c4b60
 
1c4b60
     def dracut_setup_args(self):
1c4b60
         return set()
1c4b60
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
1c4b60
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
1c4b60
                  compression=False, deduplication=False, index_memory=0,
1c4b60
-                 write_policy=None, cache_mode=None):
1c4b60
+                 write_policy=None, cache_mode=None, attach_to=None):
1c4b60
         """
1c4b60
             :param name: the device name (generally a device node's basename)
1c4b60
             :type name: str
1c4b60
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
             :type metadata_size: :class:`~.size.Size`
1c4b60
             :keyword cache_mode: mode for the cache or None for default (writethrough)
1c4b60
             :type cache_mode: str
1c4b60
+            :keyword attach_to: for non-existing cache pools a logical volume the pool should
1c4b60
+                                be attached to when created
1c4b60
+            :type attach_to: :class:`LVMLogicalVolumeDevice`
1c4b60
 
1c4b60
         """
1c4b60
 
1c4b60
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
1c4b60
         LVMSnapshotMixin.__init__(self, origin, vorigin)
1c4b60
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
1c4b60
         LVMThinLogicalVolumeMixin.__init__(self)
1c4b60
-        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
1c4b60
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
1c4b60
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
1c4b60
                                       fmt, exists, sysfs_path, grow, maxsize,
1c4b60
                                       percent, cache_request, pvs, from_lvs)
1c4b60
-- 
1c4b60
2.34.3
1c4b60