neil / rpms / python-blivet

Forked from rpms/python-blivet a year ago
Clone
Blob Blame History Raw
From 91e443af7b9f6b8d7f845f353a3897e3c91015b3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:08:43 +0100
Subject: [PATCH 1/4] Add support for creating LVM cache pools

Resolves: rhbz#2055198
---
 blivet/blivet.py               |   9 +-
 blivet/devicelibs/lvm.py       |   9 ++
 blivet/devices/lvm.py          | 160 +++++++++++++++++++++++++++++++--
 tests/devices_test/lvm_test.py |  26 ++++++
 4 files changed, 196 insertions(+), 8 deletions(-)

diff --git a/blivet/blivet.py b/blivet/blivet.py
index c6908eb0..d29fadd0 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -576,6 +576,8 @@ class Blivet(object):
             :type vdo_pool: bool
             :keyword vdo_lv: whether to create a vdo lv
             :type vdo_lv: bool
+            :keyword cache_pool: whether to create a cache pool
+            :type cache_pool: bool
             :returns: the new device
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
 
@@ -594,6 +596,7 @@ class Blivet(object):
         thin_pool = kwargs.pop("thin_pool", False)
         vdo_pool = kwargs.pop("vdo_pool", False)
         vdo_lv = kwargs.pop("vdo_lv", False)
+        cache_pool = kwargs.pop("cache_pool", False)
         parent = kwargs.get("parents", [None])[0]
         if (thin_volume or vdo_lv) and parent:
             # kwargs["parents"] will contain the pool device, so...
@@ -609,6 +612,8 @@ class Blivet(object):
             kwargs["seg_type"] = "vdo-pool"
         if vdo_lv:
             kwargs["seg_type"] = "vdo"
+        if cache_pool:
+            kwargs["seg_type"] = "cache-pool"
 
         mountpoint = kwargs.pop("mountpoint", None)
         if 'fmt_type' in kwargs:
@@ -640,7 +645,7 @@ class Blivet(object):
                 swap = False
 
             prefix = ""
-            if thin_pool or vdo_pool:
+            if thin_pool or vdo_pool or cache_pool:
                 prefix = "pool"
 
             name = self.suggest_device_name(parent=vg,
@@ -651,7 +656,7 @@ class Blivet(object):
         if "%s-%s" % (vg.name, name) in self.names:
             raise ValueError("name '%s' is already in use" % name)
 
-        if thin_pool or thin_volume or vdo_pool or vdo_lv:
+        if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
             cache_req = kwargs.pop("cache_request", None)
             if cache_req:
                 raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index cb6f655e..724aaff4 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
 LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
 LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B")  # 15.88 TiB
 
+# cache constants
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
+
 raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
 raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
 
@@ -236,3 +241,7 @@ def recommend_thpool_chunk_size(thpool_size):
     # for every ~15.88 TiB of thinpool data size
     return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
                LVM_THINP_MAX_CHUNK_SIZE)
+
+
+def is_valid_cache_md_size(md_size):
+    return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 4700d141..7d374c3b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -43,6 +43,7 @@ from .. import util
 from ..storage_log import log_method_call
 from .. import udev
 from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
+from ..static_data.lvm_info import lvs_info
 from ..tasks import availability
 
 import logging
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
 
         if not exists:
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
                 raise ValueError("List of PVs has to be given for every non-linear LV")
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
             # we reserve space for it
             self._metadata_size = self.vg.pe_size
             self._size -= self._metadata_size
-        elif self.seg_type == "thin-pool":
-            # LVMThinPoolMixin sets self._metadata_size on its own
+        elif self.seg_type in ("thin-pool", "cache_pool"):
+            # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
             if not self.exists and not from_lvs and not grow:
                 # a thin pool we are not going to grow -> lets calculate metadata
                 # size now if not given explicitly
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
         """ A list of this pool's LVs """
         return self._lvs[:]     # we don't want folks changing our list
 
-    @util.requires_property("is_thin_pool")
     def autoset_md_size(self, enforced=False):
         """ If self._metadata_size not set already, it calculates the recommended value
         and sets it while subtracting the size from self.size.
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
             self.pool._add_log_vol(self)
 
 
+class LVMCachePoolMixin(object):
+    def __init__(self, metadata_size, cache_mode=None):
+        self._metadata_size = metadata_size or Size(0)
+        self._cache_mode = cache_mode
+
+    def _init_check(self):
+        if not self.is_cache_pool:
+            return
+
+        if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
+            raise ValueError("invalid metadatasize value")
+
+        if not self.exists and not self._pv_specs:
+            raise ValueError("at least one fast PV must be specified to create a cache pool")
+
+    def _check_from_lvs(self):
+        if self._from_lvs:
+            if len(self._from_lvs) != 2:
+                raise errors.DeviceError("two LVs required to create a cache pool")
+
+    def _convert_from_lvs(self):
+        data_lv, metadata_lv = self._from_lvs
+
+        data_lv.parent_lv = self  # also adds the LV to self._internal_lvs
+        data_lv.int_lv_type = LVMInternalLVtype.data
+        metadata_lv.parent_lv = self
+        metadata_lv.int_lv_type = LVMInternalLVtype.meta
+
+        self.size = data_lv.size
+
+    @property
+    def is_cache_pool(self):
+        return self.seg_type == "cache-pool"
+
+    @property
+    def profile(self):
+        return self._profile
+
+    @property
+    def type(self):
+        return "lvmcachepool"
+
+    @property
+    def resizable(self):
+        return False
+
+    def read_current_size(self):
+        log_method_call(self, exists=self.exists, path=self.path,
+                        sysfs_path=self.sysfs_path)
+        if self.size != Size(0):
+            return self.size
+
+        if self.exists:
+            # cache pools are not active and don't have th device mapper mapping
+            # so we can't get this from sysfs
+            lv_info = lvs_info.cache.get(self.name)
+            if lv_info is None:
+                log.error("Failed to get size for existing cache pool '%s'", self.name)
+                return Size(0)
+            else:
+                return Size(lv_info.size)
+
+        return Size(0)
+
+    def autoset_md_size(self, enforced=False):
+        """ If self._metadata_size not set already, it calculates the recommended value
+        and sets it while subtracting the size from self.size.
+
+        """
+
+        log.debug("Auto-setting cache pool metadata size")
+
+        if self._size <= Size(0):
+            log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
+            self._metadata_size = 0
+            return
+
+        old_md_size = self._metadata_size
+        if self._metadata_size == 0 or enforced:
+            self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
+            log.debug("Using recommended metadata size: %s", self._metadata_size)
+
+        self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
+        log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
+
+        if self._metadata_size == old_md_size:
+            log.debug("Rounded metadata size unchanged")
+        else:
+            new_size = self.size - (self._metadata_size - old_md_size)
+            log.debug("Adjusting size from %s MiB to %s MiB",
+                      self.size.convert_to("MiB"), new_size.convert_to("MiB"))
+            self.size = new_size
+
+    def _pre_create(self):
+        # make sure all the LVs this LV should be created from exist (if any)
+        if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
+            raise errors.DeviceError("Component LVs need to be created first")
+
+    def _create(self):
+        """ Create the device. """
+        log_method_call(self, self.name, status=self.status)
+        if self._cache_mode:
+            try:
+                cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
+            except blockdev.LVMError as e:
+                raise errors.DeviceError from e
+        else:
+            cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
+
+        if self._from_lvs:
+            extra = dict()
+            if self.mode:
+                # we need the string here, it will be passed directly to he lvm command
+                extra["cachemode"] = self._cache_mode
+            data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
+            meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
+            blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
+        else:
+            blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
+                                           self.metadata_size,
+                                           cache_mode,
+                                           0,
+                                           [spec.pv.path for spec in self._pv_specs])
+
+    def dracut_setup_args(self):
+        return set()
+
+    @property
+    def direct(self):
+        """ Is this device directly accessible? """
+        return False
+
+
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
                              LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
-                             LVMVDOLogicalVolumeMixin):
+                             LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
     """ An LVM Logical Volume """
 
     # generally resizable, see :property:`resizable` for details
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
                  compression=False, deduplication=False, index_memory=0,
-                 write_policy=None):
+                 write_policy=None, cache_mode=None):
         """
             :param name: the device name (generally a device node's basename)
             :type name: str
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
             :keyword write_policy: write policy for the volume or None for default
             :type write_policy: str
 
+            For cache pools only:
+
+            :keyword metadata_size: the size of the metadata LV
+            :type metadata_size: :class:`~.size.Size`
+            :keyword cache_mode: mode for the cache or None for default (writethrough)
+            :type cache_mode: str
+
         """
 
         if isinstance(parents, (list, ParentList)):
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
         LVMSnapshotMixin.__init__(self, origin, vorigin)
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
         LVMThinLogicalVolumeMixin.__init__(self)
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
                                       fmt, exists, sysfs_path, grow, maxsize,
                                       percent, cache_request, pvs, from_lvs)
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
         LVMSnapshotMixin._init_check(self)
         LVMThinPoolMixin._init_check(self)
         LVMThinLogicalVolumeMixin._init_check(self)
+        LVMCachePoolMixin._init_check(self)
 
         if self._from_lvs:
             self._check_from_lvs()
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
             ret.append(LVMVDOPoolMixin)
         if self.is_vdo_lv:
             ret.append(LVMVDOLogicalVolumeMixin)
+        if self.is_cache_pool:
+            ret.append(LVMCachePoolMixin)
         return ret
 
     def _try_specific_call(self, name, *args, **kwargs):
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 
         return True
 
+    @type_specific
+    def autoset_md_size(self, enforced=False):
+        pass
+
     def attach_cache(self, cache_pool_lv):
         if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
             raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index c349f003..a1ddaf2d 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -867,3 +867,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
 
                 vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
                 self.assertFalse(vdo_supported)
+
+
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
+
+    def test_new_cache_pool(self):
+        b = blivet.Blivet()
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+                           size=Size("10 GiB"), exists=True)
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+
+        for dev in (pv, vg):
+            b.devicetree._add_device(dev)
+
+        # check that all the above devices are in the expected places
+        self.assertEqual(set(b.devices), {pv, vg})
+        self.assertEqual(set(b.vgs), {vg})
+
+        self.assertEqual(vg.size, Size("10236 MiB"))
+
+        cachepool = b.new_lv(name="cachepool", cache_pool=True,
+                             parents=[vg], pvs=[pv])
+
+        b.create_device(cachepool)
+
+        self.assertEqual(cachepool.type, "lvmcachepool")
-- 
2.35.3


From d25d52e146559d226369afdb4b102e516bd9e332 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:09:04 +0100
Subject: [PATCH 2/4] examples: Add LVM cache pool example

Related: rhbz#2055198
---
 examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 59 insertions(+)
 create mode 100644 examples/lvm_cachepool.py

diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
new file mode 100644
index 00000000..ab2e8a72
--- /dev/null
+++ b/examples/lvm_cachepool.py
@@ -0,0 +1,59 @@
+import os
+
+import blivet
+from blivet.size import Size
+from blivet.util import set_up_logging, create_sparse_tempfile
+
+
+set_up_logging()
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
+
+# create a disk image file on which to create new devices
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
+b.disk_images["disk1"] = disk1_file
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
+b.disk_images["disk2"] = disk2_file
+
+b.reset()
+
+try:
+    disk1 = b.devicetree.get_device_by_name("disk1")
+    disk2 = b.devicetree.get_device_by_name("disk2")
+
+    b.initialize_disk(disk1)
+    b.initialize_disk(disk2)
+
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
+    b.create_device(pv)
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
+    b.create_device(pv2)
+
+    # allocate the partitions (decide where and on which disks they'll reside)
+    blivet.partitioning.do_partitioning(b)
+
+    vg = b.new_vg(parents=[pv, pv2])
+    b.create_device(vg)
+
+    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
+    lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
+    b.create_device(lv)
+
+    # new cache pool
+    cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
+    b.create_device(cpool)
+
+    # write the new partitions to disk and format them as specified
+    b.do_it()
+    print(b.devicetree)
+
+    # attach the newly created cache pool to the "slow" LV
+    lv.attach_cache(cpool)
+
+    b.reset()
+    print(b.devicetree)
+
+    input("Check the state and hit ENTER to trigger cleanup")
+finally:
+    b.devicetree.teardown_disk_images()
+    os.unlink(disk1_file)
+    os.unlink(disk2_file)
-- 
2.35.3


From 2411d8aa082f6baf46f25d5f97455da983c0ee5f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:13:33 +0100
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
 active

Instead of calling 'lvs' again in LVMVolumeGroupDevice.status

Related: rhbz#2055198
---
 blivet/devices/lvm.py | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7d374c3b..9f875e4e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
 
         # special handling for incomplete VGs
         if not self.complete:
-            try:
-                lvs_info = blockdev.lvm.lvs(vg_name=self.name)
-            except blockdev.LVMError:
-                lvs_info = []
-
-            for lv_info in lvs_info:
-                if lv_info.attr and lv_info.attr[4] == 'a':
+            for lv_info in lvs_info.cache.values():
+                if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
                     return True
 
             return False
-- 
2.35.3


From c8fda78915f31f3d5011ada3c7463f85e181983b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 30 May 2022 17:02:43 +0200
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
 existing LV

Because we do not have action for attaching the cache pool, we
cannot schedule both adding the fast PV to the VG and attaching
the cache pool to existing LV. This hack allows to schedule the
attach to happen after the cache pool is created.

Related: rhbz#2055198
---
 blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
 1 file changed, 35 insertions(+), 3 deletions(-)

diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 9f875e4e..7e4fcf53 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
 
 
 class LVMCachePoolMixin(object):
-    def __init__(self, metadata_size, cache_mode=None):
+    def __init__(self, metadata_size, cache_mode=None, attach_to=None):
         self._metadata_size = metadata_size or Size(0)
         self._cache_mode = cache_mode
+        self._attach_to = attach_to
 
     def _init_check(self):
         if not self.is_cache_pool:
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
         if not self.exists and not self._pv_specs:
             raise ValueError("at least one fast PV must be specified to create a cache pool")
 
+        if self._attach_to and not self._attach_to.exists:
+            raise ValueError("cache pool can be attached only to an existing LV")
+
     def _check_from_lvs(self):
         if self._from_lvs:
             if len(self._from_lvs) != 2:
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
                                            cache_mode,
                                            0,
                                            [spec.pv.path for spec in self._pv_specs])
+        if self._attach_to:
+            self._attach_to.attach_cache(self)
+
+    def _post_create(self):
+        if self._attach_to:
+            # post_create tries to activate the LV and after attaching it no longer exists
+            return
+
+        # pylint: disable=bad-super-call
+        super(LVMLogicalVolumeBase, self)._post_create()
+
+    def add_hook(self, new=True):
+        if self._attach_to:
+            self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
+                                              pvs=self._pv_specs, mode=self._cache_mode)
+
+        # pylint: disable=bad-super-call
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
+
+    def remove_hook(self, modparent=True):
+        if self._attach_to:
+            self._attach_to._cache = None
+
+        # pylint: disable=bad-super-call
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
 
     def dracut_setup_args(self):
         return set()
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
                  metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
                  compression=False, deduplication=False, index_memory=0,
-                 write_policy=None, cache_mode=None):
+                 write_policy=None, cache_mode=None, attach_to=None):
         """
             :param name: the device name (generally a device node's basename)
             :type name: str
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
             :type metadata_size: :class:`~.size.Size`
             :keyword cache_mode: mode for the cache or None for default (writethrough)
             :type cache_mode: str
+            :keyword attach_to: for non-existing cache pools a logical volume the pool should
+                                be attached to when created
+            :type attach_to: :class:`LVMLogicalVolumeDevice`
 
         """
 
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
         LVMSnapshotMixin.__init__(self, origin, vorigin)
         LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
         LVMThinLogicalVolumeMixin.__init__(self)
-        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
+        LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
                                       fmt, exists, sysfs_path, grow, maxsize,
                                       percent, cache_request, pvs, from_lvs)
-- 
2.35.3