neil / rpms / python-blivet

Forked from rpms/python-blivet a year ago
Clone

Blame SOURCES/0018-Add-support-for-creating-LVM-cache-pools.patch

5b80eb
From 91e443af7b9f6b8d7f845f353a3897e3c91015b3 Mon Sep 17 00:00:00 2001
5b80eb
From: Vojtech Trefny <vtrefny@redhat.com>
5b80eb
Date: Thu, 30 Dec 2021 16:08:43 +0100
5b80eb
Subject: [PATCH 1/4] Add support for creating LVM cache pools
5b80eb
5b80eb
Resolves: rhbz#2055198
5b80eb
---
5b80eb
 blivet/blivet.py               |   9 +-
5b80eb
 blivet/devicelibs/lvm.py       |   9 ++
5b80eb
 blivet/devices/lvm.py          | 160 +++++++++++++++++++++++++++++++--
5b80eb
 tests/devices_test/lvm_test.py |  26 ++++++
5b80eb
 4 files changed, 196 insertions(+), 8 deletions(-)
5b80eb
5b80eb
diff --git a/blivet/blivet.py b/blivet/blivet.py
5b80eb
index c6908eb0..d29fadd0 100644
5b80eb
--- a/blivet/blivet.py
5b80eb
+++ b/blivet/blivet.py
5b80eb
@@ -576,6 +576,8 @@ class Blivet(object):
5b80eb
             :type vdo_pool: bool
5b80eb
             :keyword vdo_lv: whether to create a vdo lv
5b80eb
             :type vdo_lv: bool
5b80eb
+            :keyword cache_pool: whether to create a cache pool
5b80eb
+            :type cache_pool: bool
5b80eb
             :returns: the new device
5b80eb
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
5b80eb
 
5b80eb
@@ -594,6 +596,7 @@ class Blivet(object):
5b80eb
         thin_pool = kwargs.pop("thin_pool", False)
5b80eb
         vdo_pool = kwargs.pop("vdo_pool", False)
5b80eb
         vdo_lv = kwargs.pop("vdo_lv", False)
5b80eb
+        cache_pool = kwargs.pop("cache_pool", False)
5b80eb
         parent = kwargs.get("parents", [None])[0]
5b80eb
         if (thin_volume or vdo_lv) and parent:
5b80eb
             # kwargs["parents"] will contain the pool device, so...
5b80eb
@@ -609,6 +612,8 @@ class Blivet(object):
5b80eb
             kwargs["seg_type"] = "vdo-pool"
5b80eb
         if vdo_lv:
5b80eb
             kwargs["seg_type"] = "vdo"
5b80eb
+        if cache_pool:
5b80eb
+            kwargs["seg_type"] = "cache-pool"
5b80eb
 
5b80eb
         mountpoint = kwargs.pop("mountpoint", None)
5b80eb
         if 'fmt_type' in kwargs:
5b80eb
@@ -640,7 +645,7 @@ class Blivet(object):
5b80eb
                 swap = False
5b80eb
 
5b80eb
             prefix = ""
5b80eb
-            if thin_pool or vdo_pool:
5b80eb
+            if thin_pool or vdo_pool or cache_pool:
5b80eb
                 prefix = "pool"
5b80eb
 
5b80eb
             name = self.suggest_device_name(parent=vg,
5b80eb
@@ -651,7 +656,7 @@ class Blivet(object):
5b80eb
         if "%s-%s" % (vg.name, name) in self.names:
5b80eb
             raise ValueError("name '%s' is already in use" % name)
5b80eb
 
5b80eb
-        if thin_pool or thin_volume or vdo_pool or vdo_lv:
5b80eb
+        if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
5b80eb
             cache_req = kwargs.pop("cache_request", None)
5b80eb
             if cache_req:
5b80eb
                 raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
5b80eb
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
5b80eb
index cb6f655e..724aaff4 100644
5b80eb
--- a/blivet/devicelibs/lvm.py
5b80eb
+++ b/blivet/devicelibs/lvm.py
5b80eb
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
5b80eb
 LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
5b80eb
 LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B")  # 15.88 TiB
5b80eb
 
5b80eb
+# cache constants
5b80eb
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
5b80eb
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
5b80eb
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
5b80eb
+
5b80eb
 raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
5b80eb
 raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
5b80eb
 
5b80eb
@@ -236,3 +241,7 @@ def recommend_thpool_chunk_size(thpool_size):
5b80eb
     # for every ~15.88 TiB of thinpool data size
5b80eb
     return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
5b80eb
                LVM_THINP_MAX_CHUNK_SIZE)
5b80eb
+
5b80eb
+
5b80eb
+def is_valid_cache_md_size(md_size):
5b80eb
+    return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
5b80eb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
5b80eb
index 4700d141..7d374c3b 100644
5b80eb
--- a/blivet/devices/lvm.py
5b80eb
+++ b/blivet/devices/lvm.py
5b80eb
@@ -43,6 +43,7 @@ from .. import util
5b80eb
 from ..storage_log import log_method_call
5b80eb
 from .. import udev
5b80eb
 from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
5b80eb
+from ..static_data.lvm_info import lvs_info
5b80eb
 from ..tasks import availability
5b80eb
 
5b80eb
 import logging
5b80eb
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
5b80eb
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
5b80eb
 
5b80eb
         if not exists:
5b80eb
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
5b80eb
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
5b80eb
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
5b80eb
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
5b80eb
                 raise ValueError("List of PVs has to be given for every non-linear LV")
5b80eb
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
5b80eb
             # we reserve space for it
5b80eb
             self._metadata_size = self.vg.pe_size
5b80eb
             self._size -= self._metadata_size
5b80eb
-        elif self.seg_type == "thin-pool":
5b80eb
-            # LVMThinPoolMixin sets self._metadata_size on its own
5b80eb
+        elif self.seg_type in ("thin-pool", "cache_pool"):
5b80eb
+            # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
5b80eb
             if not self.exists and not from_lvs and not grow:
5b80eb
                 # a thin pool we are not going to grow -> lets calculate metadata
5b80eb
                 # size now if not given explicitly
5b80eb
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
5b80eb
         """ A list of this pool's LVs """
5b80eb
         return self._lvs[:]     # we don't want folks changing our list
5b80eb
 
5b80eb
-    @util.requires_property("is_thin_pool")
5b80eb
     def autoset_md_size(self, enforced=False):
5b80eb
         """ If self._metadata_size not set already, it calculates the recommended value
5b80eb
         and sets it while subtracting the size from self.size.
5b80eb
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
5b80eb
             self.pool._add_log_vol(self)
5b80eb
 
5b80eb
 
5b80eb
+class LVMCachePoolMixin(object):
5b80eb
+    def __init__(self, metadata_size, cache_mode=None):
5b80eb
+        self._metadata_size = metadata_size or Size(0)
5b80eb
+        self._cache_mode = cache_mode
5b80eb
+
5b80eb
+    def _init_check(self):
5b80eb
+        if not self.is_cache_pool:
5b80eb
+            return
5b80eb
+
5b80eb
+        if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
5b80eb
+            raise ValueError("invalid metadatasize value")
5b80eb
+
5b80eb
+        if not self.exists and not self._pv_specs:
5b80eb
+            raise ValueError("at least one fast PV must be specified to create a cache pool")
5b80eb
+
5b80eb
+    def _check_from_lvs(self):
5b80eb
+        if self._from_lvs:
5b80eb
+            if len(self._from_lvs) != 2:
5b80eb
+                raise errors.DeviceError("two LVs required to create a cache pool")
5b80eb
+
5b80eb
+    def _convert_from_lvs(self):
5b80eb
+        data_lv, metadata_lv = self._from_lvs
5b80eb
+
5b80eb
+        data_lv.parent_lv = self  # also adds the LV to self._internal_lvs
5b80eb
+        data_lv.int_lv_type = LVMInternalLVtype.data
5b80eb
+        metadata_lv.parent_lv = self
5b80eb
+        metadata_lv.int_lv_type = LVMInternalLVtype.meta
5b80eb
+
5b80eb
+        self.size = data_lv.size
5b80eb
+
5b80eb
+    @property
5b80eb
+    def is_cache_pool(self):
5b80eb
+        return self.seg_type == "cache-pool"
5b80eb
+
5b80eb
+    @property
5b80eb
+    def profile(self):
5b80eb
+        return self._profile
5b80eb
+
5b80eb
+    @property
5b80eb
+    def type(self):
5b80eb
+        return "lvmcachepool"
5b80eb
+
5b80eb
+    @property
5b80eb
+    def resizable(self):
5b80eb
+        return False
5b80eb
+
5b80eb
+    def read_current_size(self):
5b80eb
+        log_method_call(self, exists=self.exists, path=self.path,
5b80eb
+                        sysfs_path=self.sysfs_path)
5b80eb
+        if self.size != Size(0):
5b80eb
+            return self.size
5b80eb
+
5b80eb
+        if self.exists:
5b80eb
+            # cache pools are not active and don't have th device mapper mapping
5b80eb
+            # so we can't get this from sysfs
5b80eb
+            lv_info = lvs_info.cache.get(self.name)
5b80eb
+            if lv_info is None:
5b80eb
+                log.error("Failed to get size for existing cache pool '%s'", self.name)
5b80eb
+                return Size(0)
5b80eb
+            else:
5b80eb
+                return Size(lv_info.size)
5b80eb
+
5b80eb
+        return Size(0)
5b80eb
+
5b80eb
+    def autoset_md_size(self, enforced=False):
5b80eb
+        """ If self._metadata_size not set already, it calculates the recommended value
5b80eb
+        and sets it while subtracting the size from self.size.
5b80eb
+
5b80eb
+        """
5b80eb
+
5b80eb
+        log.debug("Auto-setting cache pool metadata size")
5b80eb
+
5b80eb
+        if self._size <= Size(0):
5b80eb
+            log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
5b80eb
+            self._metadata_size = 0
5b80eb
+            return
5b80eb
+
5b80eb
+        old_md_size = self._metadata_size
5b80eb
+        if self._metadata_size == 0 or enforced:
5b80eb
+            self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
5b80eb
+            log.debug("Using recommended metadata size: %s", self._metadata_size)
5b80eb
+
5b80eb
+        self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
5b80eb
+        log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
5b80eb
+
5b80eb
+        if self._metadata_size == old_md_size:
5b80eb
+            log.debug("Rounded metadata size unchanged")
5b80eb
+        else:
5b80eb
+            new_size = self.size - (self._metadata_size - old_md_size)
5b80eb
+            log.debug("Adjusting size from %s MiB to %s MiB",
5b80eb
+                      self.size.convert_to("MiB"), new_size.convert_to("MiB"))
5b80eb
+            self.size = new_size
5b80eb
+
5b80eb
+    def _pre_create(self):
5b80eb
+        # make sure all the LVs this LV should be created from exist (if any)
5b80eb
+        if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
5b80eb
+            raise errors.DeviceError("Component LVs need to be created first")
5b80eb
+
5b80eb
+    def _create(self):
5b80eb
+        """ Create the device. """
5b80eb
+        log_method_call(self, self.name, status=self.status)
5b80eb
+        if self._cache_mode:
5b80eb
+            try:
5b80eb
+                cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
5b80eb
+            except blockdev.LVMError as e:
5b80eb
+                raise errors.DeviceError from e
5b80eb
+        else:
5b80eb
+            cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
5b80eb
+
5b80eb
+        if self._from_lvs:
5b80eb
+            extra = dict()
5b80eb
+            if self.mode:
5b80eb
+                # we need the string here, it will be passed directly to he lvm command
5b80eb
+                extra["cachemode"] = self._cache_mode
5b80eb
+            data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
5b80eb
+            meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
5b80eb
+            blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
5b80eb
+        else:
5b80eb
+            blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
5b80eb
+                                           self.metadata_size,
5b80eb
+                                           cache_mode,
5b80eb
+                                           0,
5b80eb
+                                           [spec.pv.path for spec in self._pv_specs])
5b80eb
+
5b80eb
+    def dracut_setup_args(self):
5b80eb
+        return set()
5b80eb
+
5b80eb
+    @property
5b80eb
+    def direct(self):
5b80eb
+        """ Is this device directly accessible? """
5b80eb
+        return False
5b80eb
+
5b80eb
+
5b80eb
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
5b80eb
                              LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
5b80eb
-                             LVMVDOLogicalVolumeMixin):
5b80eb
+                             LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
5b80eb
     """ An LVM Logical Volume """
5b80eb
 
5b80eb
     # generally resizable, see :property:`resizable` for details
5b80eb
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
5b80eb
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
5b80eb
                  compression=False, deduplication=False, index_memory=0,
5b80eb
-                 write_policy=None):
5b80eb
+                 write_policy=None, cache_mode=None):
5b80eb
         """
5b80eb
             :param name: the device name (generally a device node's basename)
5b80eb
             :type name: str
5b80eb
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
             :keyword write_policy: write policy for the volume or None for default
5b80eb
             :type write_policy: str
5b80eb
 
5b80eb
+            For cache pools only:
5b80eb
+
5b80eb
+            :keyword metadata_size: the size of the metadata LV
5b80eb
+            :type metadata_size: :class:`~.size.Size`
5b80eb
+            :keyword cache_mode: mode for the cache or None for default (writethrough)
5b80eb
+            :type cache_mode: str
5b80eb
+
5b80eb
         """
5b80eb
 
5b80eb
         if isinstance(parents, (list, ParentList)):
5b80eb
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
         LVMSnapshotMixin.__init__(self, origin, vorigin)
5b80eb
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
5b80eb
         LVMThinLogicalVolumeMixin.__init__(self)
5b80eb
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
5b80eb
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
5b80eb
                                       fmt, exists, sysfs_path, grow, maxsize,
5b80eb
                                       percent, cache_request, pvs, from_lvs)
5b80eb
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
         LVMSnapshotMixin._init_check(self)
5b80eb
         LVMThinPoolMixin._init_check(self)
5b80eb
         LVMThinLogicalVolumeMixin._init_check(self)
5b80eb
+        LVMCachePoolMixin._init_check(self)
5b80eb
 
5b80eb
         if self._from_lvs:
5b80eb
             self._check_from_lvs()
5b80eb
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
             ret.append(LVMVDOPoolMixin)
5b80eb
         if self.is_vdo_lv:
5b80eb
             ret.append(LVMVDOLogicalVolumeMixin)
5b80eb
+        if self.is_cache_pool:
5b80eb
+            ret.append(LVMCachePoolMixin)
5b80eb
         return ret
5b80eb
 
5b80eb
     def _try_specific_call(self, name, *args, **kwargs):
5b80eb
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
 
5b80eb
         return True
5b80eb
 
5b80eb
+    @type_specific
5b80eb
+    def autoset_md_size(self, enforced=False):
5b80eb
+        pass
5b80eb
+
5b80eb
     def attach_cache(self, cache_pool_lv):
5b80eb
         if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
5b80eb
             raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
5b80eb
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
5b80eb
index c349f003..a1ddaf2d 100644
5b80eb
--- a/tests/devices_test/lvm_test.py
5b80eb
+++ b/tests/devices_test/lvm_test.py
5b80eb
@@ -867,3 +867,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
5b80eb
 
5b80eb
                 vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
5b80eb
                 self.assertFalse(vdo_supported)
5b80eb
+
5b80eb
+
5b80eb
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
5b80eb
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
5b80eb
+
5b80eb
+    def test_new_cache_pool(self):
5b80eb
+        b = blivet.Blivet()
5b80eb
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
5b80eb
+                           size=Size("10 GiB"), exists=True)
5b80eb
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
5b80eb
+
5b80eb
+        for dev in (pv, vg):
5b80eb
+            b.devicetree._add_device(dev)
5b80eb
+
5b80eb
+        # check that all the above devices are in the expected places
5b80eb
+        self.assertEqual(set(b.devices), {pv, vg})
5b80eb
+        self.assertEqual(set(b.vgs), {vg})
5b80eb
+
5b80eb
+        self.assertEqual(vg.size, Size("10236 MiB"))
5b80eb
+
5b80eb
+        cachepool = b.new_lv(name="cachepool", cache_pool=True,
5b80eb
+                             parents=[vg], pvs=[pv])
5b80eb
+
5b80eb
+        b.create_device(cachepool)
5b80eb
+
5b80eb
+        self.assertEqual(cachepool.type, "lvmcachepool")
5b80eb
-- 
5b80eb
2.35.3
5b80eb
5b80eb
5b80eb
From d25d52e146559d226369afdb4b102e516bd9e332 Mon Sep 17 00:00:00 2001
5b80eb
From: Vojtech Trefny <vtrefny@redhat.com>
5b80eb
Date: Thu, 30 Dec 2021 16:09:04 +0100
5b80eb
Subject: [PATCH 2/4] examples: Add LVM cache pool example
5b80eb
5b80eb
Related: rhbz#2055198
5b80eb
---
5b80eb
 examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
5b80eb
 1 file changed, 59 insertions(+)
5b80eb
 create mode 100644 examples/lvm_cachepool.py
5b80eb
5b80eb
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
5b80eb
new file mode 100644
5b80eb
index 00000000..ab2e8a72
5b80eb
--- /dev/null
5b80eb
+++ b/examples/lvm_cachepool.py
5b80eb
@@ -0,0 +1,59 @@
5b80eb
+import os
5b80eb
+
5b80eb
+import blivet
5b80eb
+from blivet.size import Size
5b80eb
+from blivet.util import set_up_logging, create_sparse_tempfile
5b80eb
+
5b80eb
+
5b80eb
+set_up_logging()
5b80eb
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
5b80eb
+
5b80eb
+# create a disk image file on which to create new devices
5b80eb
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
5b80eb
+b.disk_images["disk1"] = disk1_file
5b80eb
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
5b80eb
+b.disk_images["disk2"] = disk2_file
5b80eb
+
5b80eb
+b.reset()
5b80eb
+
5b80eb
+try:
5b80eb
+    disk1 = b.devicetree.get_device_by_name("disk1")
5b80eb
+    disk2 = b.devicetree.get_device_by_name("disk2")
5b80eb
+
5b80eb
+    b.initialize_disk(disk1)
5b80eb
+    b.initialize_disk(disk2)
5b80eb
+
5b80eb
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
5b80eb
+    b.create_device(pv)
5b80eb
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
5b80eb
+    b.create_device(pv2)
5b80eb
+
5b80eb
+    # allocate the partitions (decide where and on which disks they'll reside)
5b80eb
+    blivet.partitioning.do_partitioning(b)
5b80eb
+
5b80eb
+    vg = b.new_vg(parents=[pv, pv2])
5b80eb
+    b.create_device(vg)
5b80eb
+
5b80eb
+    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
5b80eb
+    lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
5b80eb
+    b.create_device(lv)
5b80eb
+
5b80eb
+    # new cache pool
5b80eb
+    cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
5b80eb
+    b.create_device(cpool)
5b80eb
+
5b80eb
+    # write the new partitions to disk and format them as specified
5b80eb
+    b.do_it()
5b80eb
+    print(b.devicetree)
5b80eb
+
5b80eb
+    # attach the newly created cache pool to the "slow" LV
5b80eb
+    lv.attach_cache(cpool)
5b80eb
+
5b80eb
+    b.reset()
5b80eb
+    print(b.devicetree)
5b80eb
+
5b80eb
+    input("Check the state and hit ENTER to trigger cleanup")
5b80eb
+finally:
5b80eb
+    b.devicetree.teardown_disk_images()
5b80eb
+    os.unlink(disk1_file)
5b80eb
+    os.unlink(disk2_file)
5b80eb
-- 
5b80eb
2.35.3
5b80eb
5b80eb
5b80eb
From 2411d8aa082f6baf46f25d5f97455da983c0ee5f Mon Sep 17 00:00:00 2001
5b80eb
From: Vojtech Trefny <vtrefny@redhat.com>
5b80eb
Date: Thu, 30 Dec 2021 16:13:33 +0100
5b80eb
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
5b80eb
 active
5b80eb
5b80eb
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
5b80eb
5b80eb
Related: rhbz#2055198
5b80eb
---
5b80eb
 blivet/devices/lvm.py | 9 ++-------
5b80eb
 1 file changed, 2 insertions(+), 7 deletions(-)
5b80eb
5b80eb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
5b80eb
index 7d374c3b..9f875e4e 100644
5b80eb
--- a/blivet/devices/lvm.py
5b80eb
+++ b/blivet/devices/lvm.py
5b80eb
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
5b80eb
 
5b80eb
         # special handling for incomplete VGs
5b80eb
         if not self.complete:
5b80eb
-            try:
5b80eb
-                lvs_info = blockdev.lvm.lvs(vg_name=self.name)
5b80eb
-            except blockdev.LVMError:
5b80eb
-                lvs_info = []
5b80eb
-
5b80eb
-            for lv_info in lvs_info:
5b80eb
-                if lv_info.attr and lv_info.attr[4] == 'a':
5b80eb
+            for lv_info in lvs_info.cache.values():
5b80eb
+                if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
5b80eb
                     return True
5b80eb
 
5b80eb
             return False
5b80eb
-- 
5b80eb
2.35.3
5b80eb
5b80eb
5b80eb
From c8fda78915f31f3d5011ada3c7463f85e181983b Mon Sep 17 00:00:00 2001
5b80eb
From: Vojtech Trefny <vtrefny@redhat.com>
5b80eb
Date: Mon, 30 May 2022 17:02:43 +0200
5b80eb
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
5b80eb
 existing LV
5b80eb
5b80eb
Because we do not have action for attaching the cache pool, we
5b80eb
cannot schedule both adding the fast PV to the VG and attaching
5b80eb
the cache pool to existing LV. This hack allows to schedule the
5b80eb
attach to happen after the cache pool is created.
5b80eb
5b80eb
Related: rhbz#2055198
5b80eb
---
5b80eb
 blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
5b80eb
 1 file changed, 35 insertions(+), 3 deletions(-)
5b80eb
5b80eb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
5b80eb
index 9f875e4e..7e4fcf53 100644
5b80eb
--- a/blivet/devices/lvm.py
5b80eb
+++ b/blivet/devices/lvm.py
5b80eb
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
5b80eb
 
5b80eb
 
5b80eb
 class LVMCachePoolMixin(object):
5b80eb
-    def __init__(self, metadata_size, cache_mode=None):
5b80eb
+    def __init__(self, metadata_size, cache_mode=None, attach_to=None):
5b80eb
         self._metadata_size = metadata_size or Size(0)
5b80eb
         self._cache_mode = cache_mode
5b80eb
+        self._attach_to = attach_to
5b80eb
 
5b80eb
     def _init_check(self):
5b80eb
         if not self.is_cache_pool:
5b80eb
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
5b80eb
         if not self.exists and not self._pv_specs:
5b80eb
             raise ValueError("at least one fast PV must be specified to create a cache pool")
5b80eb
 
5b80eb
+        if self._attach_to and not self._attach_to.exists:
5b80eb
+            raise ValueError("cache pool can be attached only to an existing LV")
5b80eb
+
5b80eb
     def _check_from_lvs(self):
5b80eb
         if self._from_lvs:
5b80eb
             if len(self._from_lvs) != 2:
5b80eb
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
5b80eb
                                            cache_mode,
5b80eb
                                            0,
5b80eb
                                            [spec.pv.path for spec in self._pv_specs])
5b80eb
+        if self._attach_to:
5b80eb
+            self._attach_to.attach_cache(self)
5b80eb
+
5b80eb
+    def _post_create(self):
5b80eb
+        if self._attach_to:
5b80eb
+            # post_create tries to activate the LV and after attaching it no longer exists
5b80eb
+            return
5b80eb
+
5b80eb
+        # pylint: disable=bad-super-call
5b80eb
+        super(LVMLogicalVolumeBase, self)._post_create()
5b80eb
+
5b80eb
+    def add_hook(self, new=True):
5b80eb
+        if self._attach_to:
5b80eb
+            self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
5b80eb
+                                              pvs=self._pv_specs, mode=self._cache_mode)
5b80eb
+
5b80eb
+        # pylint: disable=bad-super-call
5b80eb
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
5b80eb
+
5b80eb
+    def remove_hook(self, modparent=True):
5b80eb
+        if self._attach_to:
5b80eb
+            self._attach_to._cache = None
5b80eb
+
5b80eb
+        # pylint: disable=bad-super-call
5b80eb
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
5b80eb
 
5b80eb
     def dracut_setup_args(self):
5b80eb
         return set()
5b80eb
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
5b80eb
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
5b80eb
                  compression=False, deduplication=False, index_memory=0,
5b80eb
-                 write_policy=None, cache_mode=None):
5b80eb
+                 write_policy=None, cache_mode=None, attach_to=None):
5b80eb
         """
5b80eb
             :param name: the device name (generally a device node's basename)
5b80eb
             :type name: str
5b80eb
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
             :type metadata_size: :class:`~.size.Size`
5b80eb
             :keyword cache_mode: mode for the cache or None for default (writethrough)
5b80eb
             :type cache_mode: str
5b80eb
+            :keyword attach_to: for non-existing cache pools a logical volume the pool should
5b80eb
+                                be attached to when created
5b80eb
+            :type attach_to: :class:`LVMLogicalVolumeDevice`
5b80eb
 
5b80eb
         """
5b80eb
 
5b80eb
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
5b80eb
         LVMSnapshotMixin.__init__(self, origin, vorigin)
5b80eb
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
5b80eb
         LVMThinLogicalVolumeMixin.__init__(self)
5b80eb
-        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
5b80eb
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
5b80eb
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
5b80eb
                                       fmt, exists, sysfs_path, grow, maxsize,
5b80eb
                                       percent, cache_request, pvs, from_lvs)
5b80eb
-- 
5b80eb
2.35.3
5b80eb