neil / rpms / python-blivet

Forked from rpms/python-blivet a year ago
Clone

Blame SOURCES/0018-Add-support-for-creating-LVM-cache-pools.patch

f45bfb
From 91e443af7b9f6b8d7f845f353a3897e3c91015b3 Mon Sep 17 00:00:00 2001
f45bfb
From: Vojtech Trefny <vtrefny@redhat.com>
f45bfb
Date: Thu, 30 Dec 2021 16:08:43 +0100
f45bfb
Subject: [PATCH 1/4] Add support for creating LVM cache pools
f45bfb
f45bfb
Resolves: rhbz#2055198
f45bfb
---
f45bfb
 blivet/blivet.py               |   9 +-
f45bfb
 blivet/devicelibs/lvm.py       |   9 ++
f45bfb
 blivet/devices/lvm.py          | 160 +++++++++++++++++++++++++++++++--
f45bfb
 tests/devices_test/lvm_test.py |  26 ++++++
f45bfb
 4 files changed, 196 insertions(+), 8 deletions(-)
f45bfb
f45bfb
diff --git a/blivet/blivet.py b/blivet/blivet.py
f45bfb
index c6908eb0..d29fadd0 100644
f45bfb
--- a/blivet/blivet.py
f45bfb
+++ b/blivet/blivet.py
f45bfb
@@ -576,6 +576,8 @@ class Blivet(object):
f45bfb
             :type vdo_pool: bool
f45bfb
             :keyword vdo_lv: whether to create a vdo lv
f45bfb
             :type vdo_lv: bool
f45bfb
+            :keyword cache_pool: whether to create a cache pool
f45bfb
+            :type cache_pool: bool
f45bfb
             :returns: the new device
f45bfb
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
f45bfb
 
f45bfb
@@ -594,6 +596,7 @@ class Blivet(object):
f45bfb
         thin_pool = kwargs.pop("thin_pool", False)
f45bfb
         vdo_pool = kwargs.pop("vdo_pool", False)
f45bfb
         vdo_lv = kwargs.pop("vdo_lv", False)
f45bfb
+        cache_pool = kwargs.pop("cache_pool", False)
f45bfb
         parent = kwargs.get("parents", [None])[0]
f45bfb
         if (thin_volume or vdo_lv) and parent:
f45bfb
             # kwargs["parents"] will contain the pool device, so...
f45bfb
@@ -609,6 +612,8 @@ class Blivet(object):
f45bfb
             kwargs["seg_type"] = "vdo-pool"
f45bfb
         if vdo_lv:
f45bfb
             kwargs["seg_type"] = "vdo"
f45bfb
+        if cache_pool:
f45bfb
+            kwargs["seg_type"] = "cache-pool"
f45bfb
 
f45bfb
         mountpoint = kwargs.pop("mountpoint", None)
f45bfb
         if 'fmt_type' in kwargs:
f45bfb
@@ -640,7 +645,7 @@ class Blivet(object):
f45bfb
                 swap = False
f45bfb
 
f45bfb
             prefix = ""
f45bfb
-            if thin_pool or vdo_pool:
f45bfb
+            if thin_pool or vdo_pool or cache_pool:
f45bfb
                 prefix = "pool"
f45bfb
 
f45bfb
             name = self.suggest_device_name(parent=vg,
f45bfb
@@ -651,7 +656,7 @@ class Blivet(object):
f45bfb
         if "%s-%s" % (vg.name, name) in self.names:
f45bfb
             raise ValueError("name '%s' is already in use" % name)
f45bfb
 
f45bfb
-        if thin_pool or thin_volume or vdo_pool or vdo_lv:
f45bfb
+        if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
f45bfb
             cache_req = kwargs.pop("cache_request", None)
f45bfb
             if cache_req:
f45bfb
                 raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
f45bfb
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
f45bfb
index cb6f655e..724aaff4 100644
f45bfb
--- a/blivet/devicelibs/lvm.py
f45bfb
+++ b/blivet/devicelibs/lvm.py
f45bfb
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
f45bfb
 LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
f45bfb
 LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B")  # 15.88 TiB
f45bfb
 
f45bfb
+# cache constants
f45bfb
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
f45bfb
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
f45bfb
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
f45bfb
+
f45bfb
 raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
f45bfb
 raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
f45bfb
 
f45bfb
@@ -236,3 +241,7 @@ def recommend_thpool_chunk_size(thpool_size):
f45bfb
     # for every ~15.88 TiB of thinpool data size
f45bfb
     return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
f45bfb
                LVM_THINP_MAX_CHUNK_SIZE)
f45bfb
+
f45bfb
+
f45bfb
+def is_valid_cache_md_size(md_size):
f45bfb
+    return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
f45bfb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
f45bfb
index 4700d141..7d374c3b 100644
f45bfb
--- a/blivet/devices/lvm.py
f45bfb
+++ b/blivet/devices/lvm.py
f45bfb
@@ -43,6 +43,7 @@ from .. import util
f45bfb
 from ..storage_log import log_method_call
f45bfb
 from .. import udev
f45bfb
 from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
f45bfb
+from ..static_data.lvm_info import lvs_info
f45bfb
 from ..tasks import availability
f45bfb
 
f45bfb
 import logging
f45bfb
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
f45bfb
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
f45bfb
 
f45bfb
         if not exists:
f45bfb
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
f45bfb
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
f45bfb
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
f45bfb
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
f45bfb
                 raise ValueError("List of PVs has to be given for every non-linear LV")
f45bfb
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
f45bfb
             # we reserve space for it
f45bfb
             self._metadata_size = self.vg.pe_size
f45bfb
             self._size -= self._metadata_size
f45bfb
-        elif self.seg_type == "thin-pool":
f45bfb
-            # LVMThinPoolMixin sets self._metadata_size on its own
f45bfb
+        elif self.seg_type in ("thin-pool", "cache_pool"):
f45bfb
+            # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
f45bfb
             if not self.exists and not from_lvs and not grow:
f45bfb
                 # a thin pool we are not going to grow -> lets calculate metadata
f45bfb
                 # size now if not given explicitly
f45bfb
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
f45bfb
         """ A list of this pool's LVs """
f45bfb
         return self._lvs[:]     # we don't want folks changing our list
f45bfb
 
f45bfb
-    @util.requires_property("is_thin_pool")
f45bfb
     def autoset_md_size(self, enforced=False):
f45bfb
         """ If self._metadata_size not set already, it calculates the recommended value
f45bfb
         and sets it while subtracting the size from self.size.
f45bfb
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
f45bfb
             self.pool._add_log_vol(self)
f45bfb
 
f45bfb
 
f45bfb
+class LVMCachePoolMixin(object):
f45bfb
+    def __init__(self, metadata_size, cache_mode=None):
f45bfb
+        self._metadata_size = metadata_size or Size(0)
f45bfb
+        self._cache_mode = cache_mode
f45bfb
+
f45bfb
+    def _init_check(self):
f45bfb
+        if not self.is_cache_pool:
f45bfb
+            return
f45bfb
+
f45bfb
+        if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
f45bfb
+            raise ValueError("invalid metadatasize value")
f45bfb
+
f45bfb
+        if not self.exists and not self._pv_specs:
f45bfb
+            raise ValueError("at least one fast PV must be specified to create a cache pool")
f45bfb
+
f45bfb
+    def _check_from_lvs(self):
f45bfb
+        if self._from_lvs:
f45bfb
+            if len(self._from_lvs) != 2:
f45bfb
+                raise errors.DeviceError("two LVs required to create a cache pool")
f45bfb
+
f45bfb
+    def _convert_from_lvs(self):
f45bfb
+        data_lv, metadata_lv = self._from_lvs
f45bfb
+
f45bfb
+        data_lv.parent_lv = self  # also adds the LV to self._internal_lvs
f45bfb
+        data_lv.int_lv_type = LVMInternalLVtype.data
f45bfb
+        metadata_lv.parent_lv = self
f45bfb
+        metadata_lv.int_lv_type = LVMInternalLVtype.meta
f45bfb
+
f45bfb
+        self.size = data_lv.size
f45bfb
+
f45bfb
+    @property
f45bfb
+    def is_cache_pool(self):
f45bfb
+        return self.seg_type == "cache-pool"
f45bfb
+
f45bfb
+    @property
f45bfb
+    def profile(self):
f45bfb
+        return self._profile
f45bfb
+
f45bfb
+    @property
f45bfb
+    def type(self):
f45bfb
+        return "lvmcachepool"
f45bfb
+
f45bfb
+    @property
f45bfb
+    def resizable(self):
f45bfb
+        return False
f45bfb
+
f45bfb
+    def read_current_size(self):
f45bfb
+        log_method_call(self, exists=self.exists, path=self.path,
f45bfb
+                        sysfs_path=self.sysfs_path)
f45bfb
+        if self.size != Size(0):
f45bfb
+            return self.size
f45bfb
+
f45bfb
+        if self.exists:
f45bfb
+            # cache pools are not active and don't have th device mapper mapping
f45bfb
+            # so we can't get this from sysfs
f45bfb
+            lv_info = lvs_info.cache.get(self.name)
f45bfb
+            if lv_info is None:
f45bfb
+                log.error("Failed to get size for existing cache pool '%s'", self.name)
f45bfb
+                return Size(0)
f45bfb
+            else:
f45bfb
+                return Size(lv_info.size)
f45bfb
+
f45bfb
+        return Size(0)
f45bfb
+
f45bfb
+    def autoset_md_size(self, enforced=False):
f45bfb
+        """ If self._metadata_size not set already, it calculates the recommended value
f45bfb
+        and sets it while subtracting the size from self.size.
f45bfb
+
f45bfb
+        """
f45bfb
+
f45bfb
+        log.debug("Auto-setting cache pool metadata size")
f45bfb
+
f45bfb
+        if self._size <= Size(0):
f45bfb
+            log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
f45bfb
+            self._metadata_size = 0
f45bfb
+            return
f45bfb
+
f45bfb
+        old_md_size = self._metadata_size
f45bfb
+        if self._metadata_size == 0 or enforced:
f45bfb
+            self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
f45bfb
+            log.debug("Using recommended metadata size: %s", self._metadata_size)
f45bfb
+
f45bfb
+        self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
f45bfb
+        log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
f45bfb
+
f45bfb
+        if self._metadata_size == old_md_size:
f45bfb
+            log.debug("Rounded metadata size unchanged")
f45bfb
+        else:
f45bfb
+            new_size = self.size - (self._metadata_size - old_md_size)
f45bfb
+            log.debug("Adjusting size from %s MiB to %s MiB",
f45bfb
+                      self.size.convert_to("MiB"), new_size.convert_to("MiB"))
f45bfb
+            self.size = new_size
f45bfb
+
f45bfb
+    def _pre_create(self):
f45bfb
+        # make sure all the LVs this LV should be created from exist (if any)
f45bfb
+        if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
f45bfb
+            raise errors.DeviceError("Component LVs need to be created first")
f45bfb
+
f45bfb
+    def _create(self):
f45bfb
+        """ Create the device. """
f45bfb
+        log_method_call(self, self.name, status=self.status)
f45bfb
+        if self._cache_mode:
f45bfb
+            try:
f45bfb
+                cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
f45bfb
+            except blockdev.LVMError as e:
f45bfb
+                raise errors.DeviceError from e
f45bfb
+        else:
f45bfb
+            cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
f45bfb
+
f45bfb
+        if self._from_lvs:
f45bfb
+            extra = dict()
f45bfb
+            if self.mode:
f45bfb
+                # we need the string here, it will be passed directly to he lvm command
f45bfb
+                extra["cachemode"] = self._cache_mode
f45bfb
+            data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
f45bfb
+            meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
f45bfb
+            blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
f45bfb
+        else:
f45bfb
+            blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
f45bfb
+                                           self.metadata_size,
f45bfb
+                                           cache_mode,
f45bfb
+                                           0,
f45bfb
+                                           [spec.pv.path for spec in self._pv_specs])
f45bfb
+
f45bfb
+    def dracut_setup_args(self):
f45bfb
+        return set()
f45bfb
+
f45bfb
+    @property
f45bfb
+    def direct(self):
f45bfb
+        """ Is this device directly accessible? """
f45bfb
+        return False
f45bfb
+
f45bfb
+
f45bfb
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
f45bfb
                              LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
f45bfb
-                             LVMVDOLogicalVolumeMixin):
f45bfb
+                             LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
f45bfb
     """ An LVM Logical Volume """
f45bfb
 
f45bfb
     # generally resizable, see :property:`resizable` for details
f45bfb
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
f45bfb
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
f45bfb
                  compression=False, deduplication=False, index_memory=0,
f45bfb
-                 write_policy=None):
f45bfb
+                 write_policy=None, cache_mode=None):
f45bfb
         """
f45bfb
             :param name: the device name (generally a device node's basename)
f45bfb
             :type name: str
f45bfb
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
             :keyword write_policy: write policy for the volume or None for default
f45bfb
             :type write_policy: str
f45bfb
 
f45bfb
+            For cache pools only:
f45bfb
+
f45bfb
+            :keyword metadata_size: the size of the metadata LV
f45bfb
+            :type metadata_size: :class:`~.size.Size`
f45bfb
+            :keyword cache_mode: mode for the cache or None for default (writethrough)
f45bfb
+            :type cache_mode: str
f45bfb
+
f45bfb
         """
f45bfb
 
f45bfb
         if isinstance(parents, (list, ParentList)):
f45bfb
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
         LVMSnapshotMixin.__init__(self, origin, vorigin)
f45bfb
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
f45bfb
         LVMThinLogicalVolumeMixin.__init__(self)
f45bfb
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
f45bfb
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
f45bfb
                                       fmt, exists, sysfs_path, grow, maxsize,
f45bfb
                                       percent, cache_request, pvs, from_lvs)
f45bfb
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
         LVMSnapshotMixin._init_check(self)
f45bfb
         LVMThinPoolMixin._init_check(self)
f45bfb
         LVMThinLogicalVolumeMixin._init_check(self)
f45bfb
+        LVMCachePoolMixin._init_check(self)
f45bfb
 
f45bfb
         if self._from_lvs:
f45bfb
             self._check_from_lvs()
f45bfb
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
             ret.append(LVMVDOPoolMixin)
f45bfb
         if self.is_vdo_lv:
f45bfb
             ret.append(LVMVDOLogicalVolumeMixin)
f45bfb
+        if self.is_cache_pool:
f45bfb
+            ret.append(LVMCachePoolMixin)
f45bfb
         return ret
f45bfb
 
f45bfb
     def _try_specific_call(self, name, *args, **kwargs):
f45bfb
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
 
f45bfb
         return True
f45bfb
 
f45bfb
+    @type_specific
f45bfb
+    def autoset_md_size(self, enforced=False):
f45bfb
+        pass
f45bfb
+
f45bfb
     def attach_cache(self, cache_pool_lv):
f45bfb
         if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
f45bfb
             raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
f45bfb
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
f45bfb
index c349f003..a1ddaf2d 100644
f45bfb
--- a/tests/devices_test/lvm_test.py
f45bfb
+++ b/tests/devices_test/lvm_test.py
f45bfb
@@ -867,3 +867,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
f45bfb
 
f45bfb
                 vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
f45bfb
                 self.assertFalse(vdo_supported)
f45bfb
+
f45bfb
+
f45bfb
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
f45bfb
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
f45bfb
+
f45bfb
+    def test_new_cache_pool(self):
f45bfb
+        b = blivet.Blivet()
f45bfb
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
f45bfb
+                           size=Size("10 GiB"), exists=True)
f45bfb
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
f45bfb
+
f45bfb
+        for dev in (pv, vg):
f45bfb
+            b.devicetree._add_device(dev)
f45bfb
+
f45bfb
+        # check that all the above devices are in the expected places
f45bfb
+        self.assertEqual(set(b.devices), {pv, vg})
f45bfb
+        self.assertEqual(set(b.vgs), {vg})
f45bfb
+
f45bfb
+        self.assertEqual(vg.size, Size("10236 MiB"))
f45bfb
+
f45bfb
+        cachepool = b.new_lv(name="cachepool", cache_pool=True,
f45bfb
+                             parents=[vg], pvs=[pv])
f45bfb
+
f45bfb
+        b.create_device(cachepool)
f45bfb
+
f45bfb
+        self.assertEqual(cachepool.type, "lvmcachepool")
f45bfb
-- 
f45bfb
2.35.3
f45bfb
f45bfb
f45bfb
From d25d52e146559d226369afdb4b102e516bd9e332 Mon Sep 17 00:00:00 2001
f45bfb
From: Vojtech Trefny <vtrefny@redhat.com>
f45bfb
Date: Thu, 30 Dec 2021 16:09:04 +0100
f45bfb
Subject: [PATCH 2/4] examples: Add LVM cache pool example
f45bfb
f45bfb
Related: rhbz#2055198
f45bfb
---
f45bfb
 examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
f45bfb
 1 file changed, 59 insertions(+)
f45bfb
 create mode 100644 examples/lvm_cachepool.py
f45bfb
f45bfb
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
f45bfb
new file mode 100644
f45bfb
index 00000000..ab2e8a72
f45bfb
--- /dev/null
f45bfb
+++ b/examples/lvm_cachepool.py
f45bfb
@@ -0,0 +1,59 @@
f45bfb
+import os
f45bfb
+
f45bfb
+import blivet
f45bfb
+from blivet.size import Size
f45bfb
+from blivet.util import set_up_logging, create_sparse_tempfile
f45bfb
+
f45bfb
+
f45bfb
+set_up_logging()
f45bfb
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
f45bfb
+
f45bfb
+# create a disk image file on which to create new devices
f45bfb
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
f45bfb
+b.disk_images["disk1"] = disk1_file
f45bfb
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
f45bfb
+b.disk_images["disk2"] = disk2_file
f45bfb
+
f45bfb
+b.reset()
f45bfb
+
f45bfb
+try:
f45bfb
+    disk1 = b.devicetree.get_device_by_name("disk1")
f45bfb
+    disk2 = b.devicetree.get_device_by_name("disk2")
f45bfb
+
f45bfb
+    b.initialize_disk(disk1)
f45bfb
+    b.initialize_disk(disk2)
f45bfb
+
f45bfb
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
f45bfb
+    b.create_device(pv)
f45bfb
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
f45bfb
+    b.create_device(pv2)
f45bfb
+
f45bfb
+    # allocate the partitions (decide where and on which disks they'll reside)
f45bfb
+    blivet.partitioning.do_partitioning(b)
f45bfb
+
f45bfb
+    vg = b.new_vg(parents=[pv, pv2])
f45bfb
+    b.create_device(vg)
f45bfb
+
f45bfb
+    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
f45bfb
+    lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
f45bfb
+    b.create_device(lv)
f45bfb
+
f45bfb
+    # new cache pool
f45bfb
+    cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
f45bfb
+    b.create_device(cpool)
f45bfb
+
f45bfb
+    # write the new partitions to disk and format them as specified
f45bfb
+    b.do_it()
f45bfb
+    print(b.devicetree)
f45bfb
+
f45bfb
+    # attach the newly created cache pool to the "slow" LV
f45bfb
+    lv.attach_cache(cpool)
f45bfb
+
f45bfb
+    b.reset()
f45bfb
+    print(b.devicetree)
f45bfb
+
f45bfb
+    input("Check the state and hit ENTER to trigger cleanup")
f45bfb
+finally:
f45bfb
+    b.devicetree.teardown_disk_images()
f45bfb
+    os.unlink(disk1_file)
f45bfb
+    os.unlink(disk2_file)
f45bfb
-- 
f45bfb
2.35.3
f45bfb
f45bfb
f45bfb
From 2411d8aa082f6baf46f25d5f97455da983c0ee5f Mon Sep 17 00:00:00 2001
f45bfb
From: Vojtech Trefny <vtrefny@redhat.com>
f45bfb
Date: Thu, 30 Dec 2021 16:13:33 +0100
f45bfb
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
f45bfb
 active
f45bfb
f45bfb
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
f45bfb
f45bfb
Related: rhbz#2055198
f45bfb
---
f45bfb
 blivet/devices/lvm.py | 9 ++-------
f45bfb
 1 file changed, 2 insertions(+), 7 deletions(-)
f45bfb
f45bfb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
f45bfb
index 7d374c3b..9f875e4e 100644
f45bfb
--- a/blivet/devices/lvm.py
f45bfb
+++ b/blivet/devices/lvm.py
f45bfb
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
f45bfb
 
f45bfb
         # special handling for incomplete VGs
f45bfb
         if not self.complete:
f45bfb
-            try:
f45bfb
-                lvs_info = blockdev.lvm.lvs(vg_name=self.name)
f45bfb
-            except blockdev.LVMError:
f45bfb
-                lvs_info = []
f45bfb
-
f45bfb
-            for lv_info in lvs_info:
f45bfb
-                if lv_info.attr and lv_info.attr[4] == 'a':
f45bfb
+            for lv_info in lvs_info.cache.values():
f45bfb
+                if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
f45bfb
                     return True
f45bfb
 
f45bfb
             return False
f45bfb
-- 
f45bfb
2.35.3
f45bfb
f45bfb
f45bfb
From c8fda78915f31f3d5011ada3c7463f85e181983b Mon Sep 17 00:00:00 2001
f45bfb
From: Vojtech Trefny <vtrefny@redhat.com>
f45bfb
Date: Mon, 30 May 2022 17:02:43 +0200
f45bfb
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
f45bfb
 existing LV
f45bfb
f45bfb
Because we do not have action for attaching the cache pool, we
f45bfb
cannot schedule both adding the fast PV to the VG and attaching
f45bfb
the cache pool to existing LV. This hack allows to schedule the
f45bfb
attach to happen after the cache pool is created.
f45bfb
f45bfb
Related: rhbz#2055198
f45bfb
---
f45bfb
 blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
f45bfb
 1 file changed, 35 insertions(+), 3 deletions(-)
f45bfb
f45bfb
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
f45bfb
index 9f875e4e..7e4fcf53 100644
f45bfb
--- a/blivet/devices/lvm.py
f45bfb
+++ b/blivet/devices/lvm.py
f45bfb
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
f45bfb
 
f45bfb
 
f45bfb
 class LVMCachePoolMixin(object):
f45bfb
-    def __init__(self, metadata_size, cache_mode=None):
f45bfb
+    def __init__(self, metadata_size, cache_mode=None, attach_to=None):
f45bfb
         self._metadata_size = metadata_size or Size(0)
f45bfb
         self._cache_mode = cache_mode
f45bfb
+        self._attach_to = attach_to
f45bfb
 
f45bfb
     def _init_check(self):
f45bfb
         if not self.is_cache_pool:
f45bfb
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
f45bfb
         if not self.exists and not self._pv_specs:
f45bfb
             raise ValueError("at least one fast PV must be specified to create a cache pool")
f45bfb
 
f45bfb
+        if self._attach_to and not self._attach_to.exists:
f45bfb
+            raise ValueError("cache pool can be attached only to an existing LV")
f45bfb
+
f45bfb
     def _check_from_lvs(self):
f45bfb
         if self._from_lvs:
f45bfb
             if len(self._from_lvs) != 2:
f45bfb
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
f45bfb
                                            cache_mode,
f45bfb
                                            0,
f45bfb
                                            [spec.pv.path for spec in self._pv_specs])
f45bfb
+        if self._attach_to:
f45bfb
+            self._attach_to.attach_cache(self)
f45bfb
+
f45bfb
+    def _post_create(self):
f45bfb
+        if self._attach_to:
f45bfb
+            # post_create tries to activate the LV and after attaching it no longer exists
f45bfb
+            return
f45bfb
+
f45bfb
+        # pylint: disable=bad-super-call
f45bfb
+        super(LVMLogicalVolumeBase, self)._post_create()
f45bfb
+
f45bfb
+    def add_hook(self, new=True):
f45bfb
+        if self._attach_to:
f45bfb
+            self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
f45bfb
+                                              pvs=self._pv_specs, mode=self._cache_mode)
f45bfb
+
f45bfb
+        # pylint: disable=bad-super-call
f45bfb
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
f45bfb
+
f45bfb
+    def remove_hook(self, modparent=True):
f45bfb
+        if self._attach_to:
f45bfb
+            self._attach_to._cache = None
f45bfb
+
f45bfb
+        # pylint: disable=bad-super-call
f45bfb
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
f45bfb
 
f45bfb
     def dracut_setup_args(self):
f45bfb
         return set()
f45bfb
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
f45bfb
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
f45bfb
                  compression=False, deduplication=False, index_memory=0,
f45bfb
-                 write_policy=None, cache_mode=None):
f45bfb
+                 write_policy=None, cache_mode=None, attach_to=None):
f45bfb
         """
f45bfb
             :param name: the device name (generally a device node's basename)
f45bfb
             :type name: str
f45bfb
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
             :type metadata_size: :class:`~.size.Size`
f45bfb
             :keyword cache_mode: mode for the cache or None for default (writethrough)
f45bfb
             :type cache_mode: str
f45bfb
+            :keyword attach_to: for non-existing cache pools a logical volume the pool should
f45bfb
+                                be attached to when created
f45bfb
+            :type attach_to: :class:`LVMLogicalVolumeDevice`
f45bfb
 
f45bfb
         """
f45bfb
 
f45bfb
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
f45bfb
         LVMSnapshotMixin.__init__(self, origin, vorigin)
f45bfb
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
f45bfb
         LVMThinLogicalVolumeMixin.__init__(self)
f45bfb
-        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
f45bfb
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
f45bfb
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
f45bfb
                                       fmt, exists, sysfs_path, grow, maxsize,
f45bfb
                                       percent, cache_request, pvs, from_lvs)
f45bfb
-- 
f45bfb
2.35.3
f45bfb