|
|
2a10b1 |
From 91e443af7b9f6b8d7f845f353a3897e3c91015b3 Mon Sep 17 00:00:00 2001
|
|
|
2a10b1 |
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
|
2a10b1 |
Date: Thu, 30 Dec 2021 16:08:43 +0100
|
|
|
2a10b1 |
Subject: [PATCH 1/4] Add support for creating LVM cache pools
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Resolves: rhbz#2055198
|
|
|
2a10b1 |
---
|
|
|
2a10b1 |
blivet/blivet.py | 9 +-
|
|
|
2a10b1 |
blivet/devicelibs/lvm.py | 9 ++
|
|
|
2a10b1 |
blivet/devices/lvm.py | 160 +++++++++++++++++++++++++++++++--
|
|
|
2a10b1 |
tests/devices_test/lvm_test.py | 26 ++++++
|
|
|
2a10b1 |
4 files changed, 196 insertions(+), 8 deletions(-)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
diff --git a/blivet/blivet.py b/blivet/blivet.py
|
|
|
2a10b1 |
index c6908eb0..d29fadd0 100644
|
|
|
2a10b1 |
--- a/blivet/blivet.py
|
|
|
2a10b1 |
+++ b/blivet/blivet.py
|
|
|
2a10b1 |
@@ -576,6 +576,8 @@ class Blivet(object):
|
|
|
2a10b1 |
:type vdo_pool: bool
|
|
|
2a10b1 |
:keyword vdo_lv: whether to create a vdo lv
|
|
|
2a10b1 |
:type vdo_lv: bool
|
|
|
2a10b1 |
+ :keyword cache_pool: whether to create a cache pool
|
|
|
2a10b1 |
+ :type cache_pool: bool
|
|
|
2a10b1 |
:returns: the new device
|
|
|
2a10b1 |
:rtype: :class:`~.devices.LVMLogicalVolumeDevice`
|
|
|
2a10b1 |
|
|
|
2a10b1 |
@@ -594,6 +596,7 @@ class Blivet(object):
|
|
|
2a10b1 |
thin_pool = kwargs.pop("thin_pool", False)
|
|
|
2a10b1 |
vdo_pool = kwargs.pop("vdo_pool", False)
|
|
|
2a10b1 |
vdo_lv = kwargs.pop("vdo_lv", False)
|
|
|
2a10b1 |
+ cache_pool = kwargs.pop("cache_pool", False)
|
|
|
2a10b1 |
parent = kwargs.get("parents", [None])[0]
|
|
|
2a10b1 |
if (thin_volume or vdo_lv) and parent:
|
|
|
2a10b1 |
# kwargs["parents"] will contain the pool device, so...
|
|
|
2a10b1 |
@@ -609,6 +612,8 @@ class Blivet(object):
|
|
|
2a10b1 |
kwargs["seg_type"] = "vdo-pool"
|
|
|
2a10b1 |
if vdo_lv:
|
|
|
2a10b1 |
kwargs["seg_type"] = "vdo"
|
|
|
2a10b1 |
+ if cache_pool:
|
|
|
2a10b1 |
+ kwargs["seg_type"] = "cache-pool"
|
|
|
2a10b1 |
|
|
|
2a10b1 |
mountpoint = kwargs.pop("mountpoint", None)
|
|
|
2a10b1 |
if 'fmt_type' in kwargs:
|
|
|
2a10b1 |
@@ -640,7 +645,7 @@ class Blivet(object):
|
|
|
2a10b1 |
swap = False
|
|
|
2a10b1 |
|
|
|
2a10b1 |
prefix = ""
|
|
|
2a10b1 |
- if thin_pool or vdo_pool:
|
|
|
2a10b1 |
+ if thin_pool or vdo_pool or cache_pool:
|
|
|
2a10b1 |
prefix = "pool"
|
|
|
2a10b1 |
|
|
|
2a10b1 |
name = self.suggest_device_name(parent=vg,
|
|
|
2a10b1 |
@@ -651,7 +656,7 @@ class Blivet(object):
|
|
|
2a10b1 |
if "%s-%s" % (vg.name, name) in self.names:
|
|
|
2a10b1 |
raise ValueError("name '%s' is already in use" % name)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
- if thin_pool or thin_volume or vdo_pool or vdo_lv:
|
|
|
2a10b1 |
+ if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
|
|
|
2a10b1 |
cache_req = kwargs.pop("cache_request", None)
|
|
|
2a10b1 |
if cache_req:
|
|
|
2a10b1 |
raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
|
|
|
2a10b1 |
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
|
|
|
2a10b1 |
index cb6f655e..724aaff4 100644
|
|
|
2a10b1 |
--- a/blivet/devicelibs/lvm.py
|
|
|
2a10b1 |
+++ b/blivet/devicelibs/lvm.py
|
|
|
2a10b1 |
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
|
|
|
2a10b1 |
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
|
|
|
2a10b1 |
LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
|
|
|
2a10b1 |
|
|
|
2a10b1 |
+# cache constants
|
|
|
2a10b1 |
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
|
|
|
2a10b1 |
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
|
|
|
2a10b1 |
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
|
|
|
2a10b1 |
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
|
|
|
2a10b1 |
|
|
|
2a10b1 |
@@ -236,3 +241,7 @@ def recommend_thpool_chunk_size(thpool_size):
|
|
|
2a10b1 |
# for every ~15.88 TiB of thinpool data size
|
|
|
2a10b1 |
return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
|
|
|
2a10b1 |
LVM_THINP_MAX_CHUNK_SIZE)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+def is_valid_cache_md_size(md_size):
|
|
|
2a10b1 |
+ return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
|
|
|
2a10b1 |
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
|
2a10b1 |
index 4700d141..7d374c3b 100644
|
|
|
2a10b1 |
--- a/blivet/devices/lvm.py
|
|
|
2a10b1 |
+++ b/blivet/devices/lvm.py
|
|
|
2a10b1 |
@@ -43,6 +43,7 @@ from .. import util
|
|
|
2a10b1 |
from ..storage_log import log_method_call
|
|
|
2a10b1 |
from .. import udev
|
|
|
2a10b1 |
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
|
|
|
2a10b1 |
+from ..static_data.lvm_info import lvs_info
|
|
|
2a10b1 |
from ..tasks import availability
|
|
|
2a10b1 |
|
|
|
2a10b1 |
import logging
|
|
|
2a10b1 |
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
|
|
2a10b1 |
percent=None, cache_request=None, pvs=None, from_lvs=None):
|
|
|
2a10b1 |
|
|
|
2a10b1 |
if not exists:
|
|
|
2a10b1 |
- if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
|
|
|
2a10b1 |
+ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
|
|
|
2a10b1 |
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
|
|
|
2a10b1 |
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
|
|
|
2a10b1 |
raise ValueError("List of PVs has to be given for every non-linear LV")
|
|
|
2a10b1 |
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
|
|
2a10b1 |
# we reserve space for it
|
|
|
2a10b1 |
self._metadata_size = self.vg.pe_size
|
|
|
2a10b1 |
self._size -= self._metadata_size
|
|
|
2a10b1 |
- elif self.seg_type == "thin-pool":
|
|
|
2a10b1 |
- # LVMThinPoolMixin sets self._metadata_size on its own
|
|
|
2a10b1 |
+ elif self.seg_type in ("thin-pool", "cache_pool"):
|
|
|
2a10b1 |
+ # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
|
|
|
2a10b1 |
if not self.exists and not from_lvs and not grow:
|
|
|
2a10b1 |
# a thin pool we are not going to grow -> lets calculate metadata
|
|
|
2a10b1 |
# size now if not given explicitly
|
|
|
2a10b1 |
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
|
|
|
2a10b1 |
""" A list of this pool's LVs """
|
|
|
2a10b1 |
return self._lvs[:] # we don't want folks changing our list
|
|
|
2a10b1 |
|
|
|
2a10b1 |
- @util.requires_property("is_thin_pool")
|
|
|
2a10b1 |
def autoset_md_size(self, enforced=False):
|
|
|
2a10b1 |
""" If self._metadata_size not set already, it calculates the recommended value
|
|
|
2a10b1 |
and sets it while subtracting the size from self.size.
|
|
|
2a10b1 |
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
|
|
|
2a10b1 |
self.pool._add_log_vol(self)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
|
|
|
2a10b1 |
+class LVMCachePoolMixin(object):
|
|
|
2a10b1 |
+ def __init__(self, metadata_size, cache_mode=None):
|
|
|
2a10b1 |
+ self._metadata_size = metadata_size or Size(0)
|
|
|
2a10b1 |
+ self._cache_mode = cache_mode
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _init_check(self):
|
|
|
2a10b1 |
+ if not self.is_cache_pool:
|
|
|
2a10b1 |
+ return
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
|
|
|
2a10b1 |
+ raise ValueError("invalid metadatasize value")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if not self.exists and not self._pv_specs:
|
|
|
2a10b1 |
+ raise ValueError("at least one fast PV must be specified to create a cache pool")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _check_from_lvs(self):
|
|
|
2a10b1 |
+ if self._from_lvs:
|
|
|
2a10b1 |
+ if len(self._from_lvs) != 2:
|
|
|
2a10b1 |
+ raise errors.DeviceError("two LVs required to create a cache pool")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _convert_from_lvs(self):
|
|
|
2a10b1 |
+ data_lv, metadata_lv = self._from_lvs
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ data_lv.parent_lv = self # also adds the LV to self._internal_lvs
|
|
|
2a10b1 |
+ data_lv.int_lv_type = LVMInternalLVtype.data
|
|
|
2a10b1 |
+ metadata_lv.parent_lv = self
|
|
|
2a10b1 |
+ metadata_lv.int_lv_type = LVMInternalLVtype.meta
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ self.size = data_lv.size
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ @property
|
|
|
2a10b1 |
+ def is_cache_pool(self):
|
|
|
2a10b1 |
+ return self.seg_type == "cache-pool"
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ @property
|
|
|
2a10b1 |
+ def profile(self):
|
|
|
2a10b1 |
+ return self._profile
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ @property
|
|
|
2a10b1 |
+ def type(self):
|
|
|
2a10b1 |
+ return "lvmcachepool"
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ @property
|
|
|
2a10b1 |
+ def resizable(self):
|
|
|
2a10b1 |
+ return False
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def read_current_size(self):
|
|
|
2a10b1 |
+ log_method_call(self, exists=self.exists, path=self.path,
|
|
|
2a10b1 |
+ sysfs_path=self.sysfs_path)
|
|
|
2a10b1 |
+ if self.size != Size(0):
|
|
|
2a10b1 |
+ return self.size
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if self.exists:
|
|
|
2a10b1 |
+ # cache pools are not active and don't have th device mapper mapping
|
|
|
2a10b1 |
+ # so we can't get this from sysfs
|
|
|
2a10b1 |
+ lv_info = lvs_info.cache.get(self.name)
|
|
|
2a10b1 |
+ if lv_info is None:
|
|
|
2a10b1 |
+ log.error("Failed to get size for existing cache pool '%s'", self.name)
|
|
|
2a10b1 |
+ return Size(0)
|
|
|
2a10b1 |
+ else:
|
|
|
2a10b1 |
+ return Size(lv_info.size)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ return Size(0)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def autoset_md_size(self, enforced=False):
|
|
|
2a10b1 |
+ """ If self._metadata_size not set already, it calculates the recommended value
|
|
|
2a10b1 |
+ and sets it while subtracting the size from self.size.
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ """
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ log.debug("Auto-setting cache pool metadata size")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if self._size <= Size(0):
|
|
|
2a10b1 |
+ log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
|
|
|
2a10b1 |
+ self._metadata_size = 0
|
|
|
2a10b1 |
+ return
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ old_md_size = self._metadata_size
|
|
|
2a10b1 |
+ if self._metadata_size == 0 or enforced:
|
|
|
2a10b1 |
+ self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
|
|
|
2a10b1 |
+ log.debug("Using recommended metadata size: %s", self._metadata_size)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
|
|
|
2a10b1 |
+ log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if self._metadata_size == old_md_size:
|
|
|
2a10b1 |
+ log.debug("Rounded metadata size unchanged")
|
|
|
2a10b1 |
+ else:
|
|
|
2a10b1 |
+ new_size = self.size - (self._metadata_size - old_md_size)
|
|
|
2a10b1 |
+ log.debug("Adjusting size from %s MiB to %s MiB",
|
|
|
2a10b1 |
+ self.size.convert_to("MiB"), new_size.convert_to("MiB"))
|
|
|
2a10b1 |
+ self.size = new_size
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _pre_create(self):
|
|
|
2a10b1 |
+ # make sure all the LVs this LV should be created from exist (if any)
|
|
|
2a10b1 |
+ if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
|
|
|
2a10b1 |
+ raise errors.DeviceError("Component LVs need to be created first")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _create(self):
|
|
|
2a10b1 |
+ """ Create the device. """
|
|
|
2a10b1 |
+ log_method_call(self, self.name, status=self.status)
|
|
|
2a10b1 |
+ if self._cache_mode:
|
|
|
2a10b1 |
+ try:
|
|
|
2a10b1 |
+ cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
|
|
|
2a10b1 |
+ except blockdev.LVMError as e:
|
|
|
2a10b1 |
+ raise errors.DeviceError from e
|
|
|
2a10b1 |
+ else:
|
|
|
2a10b1 |
+ cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ if self._from_lvs:
|
|
|
2a10b1 |
+ extra = dict()
|
|
|
2a10b1 |
+ if self.mode:
|
|
|
2a10b1 |
+ # we need the string here, it will be passed directly to he lvm command
|
|
|
2a10b1 |
+ extra["cachemode"] = self._cache_mode
|
|
|
2a10b1 |
+ data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
|
|
|
2a10b1 |
+ meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
|
|
|
2a10b1 |
+ blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
|
|
|
2a10b1 |
+ else:
|
|
|
2a10b1 |
+ blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
|
|
|
2a10b1 |
+ self.metadata_size,
|
|
|
2a10b1 |
+ cache_mode,
|
|
|
2a10b1 |
+ 0,
|
|
|
2a10b1 |
+ [spec.pv.path for spec in self._pv_specs])
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def dracut_setup_args(self):
|
|
|
2a10b1 |
+ return set()
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ @property
|
|
|
2a10b1 |
+ def direct(self):
|
|
|
2a10b1 |
+ """ Is this device directly accessible? """
|
|
|
2a10b1 |
+ return False
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
|
|
|
2a10b1 |
LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
|
|
|
2a10b1 |
- LVMVDOLogicalVolumeMixin):
|
|
|
2a10b1 |
+ LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
|
|
|
2a10b1 |
""" An LVM Logical Volume """
|
|
|
2a10b1 |
|
|
|
2a10b1 |
# generally resizable, see :property:`resizable` for details
|
|
|
2a10b1 |
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
parent_lv=None, int_type=None, origin=None, vorigin=False,
|
|
|
2a10b1 |
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
|
|
|
2a10b1 |
compression=False, deduplication=False, index_memory=0,
|
|
|
2a10b1 |
- write_policy=None):
|
|
|
2a10b1 |
+ write_policy=None, cache_mode=None):
|
|
|
2a10b1 |
"""
|
|
|
2a10b1 |
:param name: the device name (generally a device node's basename)
|
|
|
2a10b1 |
:type name: str
|
|
|
2a10b1 |
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
:keyword write_policy: write policy for the volume or None for default
|
|
|
2a10b1 |
:type write_policy: str
|
|
|
2a10b1 |
|
|
|
2a10b1 |
+ For cache pools only:
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ :keyword metadata_size: the size of the metadata LV
|
|
|
2a10b1 |
+ :type metadata_size: :class:`~.size.Size`
|
|
|
2a10b1 |
+ :keyword cache_mode: mode for the cache or None for default (writethrough)
|
|
|
2a10b1 |
+ :type cache_mode: str
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
"""
|
|
|
2a10b1 |
|
|
|
2a10b1 |
if isinstance(parents, (list, ParentList)):
|
|
|
2a10b1 |
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
LVMSnapshotMixin.__init__(self, origin, vorigin)
|
|
|
2a10b1 |
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
|
|
|
2a10b1 |
LVMThinLogicalVolumeMixin.__init__(self)
|
|
|
2a10b1 |
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
|
|
|
2a10b1 |
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
|
|
|
2a10b1 |
fmt, exists, sysfs_path, grow, maxsize,
|
|
|
2a10b1 |
percent, cache_request, pvs, from_lvs)
|
|
|
2a10b1 |
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
LVMSnapshotMixin._init_check(self)
|
|
|
2a10b1 |
LVMThinPoolMixin._init_check(self)
|
|
|
2a10b1 |
LVMThinLogicalVolumeMixin._init_check(self)
|
|
|
2a10b1 |
+ LVMCachePoolMixin._init_check(self)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
if self._from_lvs:
|
|
|
2a10b1 |
self._check_from_lvs()
|
|
|
2a10b1 |
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
ret.append(LVMVDOPoolMixin)
|
|
|
2a10b1 |
if self.is_vdo_lv:
|
|
|
2a10b1 |
ret.append(LVMVDOLogicalVolumeMixin)
|
|
|
2a10b1 |
+ if self.is_cache_pool:
|
|
|
2a10b1 |
+ ret.append(LVMCachePoolMixin)
|
|
|
2a10b1 |
return ret
|
|
|
2a10b1 |
|
|
|
2a10b1 |
def _try_specific_call(self, name, *args, **kwargs):
|
|
|
2a10b1 |
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
|
|
|
2a10b1 |
return True
|
|
|
2a10b1 |
|
|
|
2a10b1 |
+ @type_specific
|
|
|
2a10b1 |
+ def autoset_md_size(self, enforced=False):
|
|
|
2a10b1 |
+ pass
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
def attach_cache(self, cache_pool_lv):
|
|
|
2a10b1 |
if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
|
|
|
2a10b1 |
raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
|
|
|
2a10b1 |
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
|
|
|
2a10b1 |
index c349f003..a1ddaf2d 100644
|
|
|
2a10b1 |
--- a/tests/devices_test/lvm_test.py
|
|
|
2a10b1 |
+++ b/tests/devices_test/lvm_test.py
|
|
|
2a10b1 |
@@ -867,3 +867,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
|
|
|
2a10b1 |
|
|
|
2a10b1 |
vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
|
|
|
2a10b1 |
self.assertFalse(vdo_supported)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
|
|
|
2a10b1 |
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def test_new_cache_pool(self):
|
|
|
2a10b1 |
+ b = blivet.Blivet()
|
|
|
2a10b1 |
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
|
|
2a10b1 |
+ size=Size("10 GiB"), exists=True)
|
|
|
2a10b1 |
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ for dev in (pv, vg):
|
|
|
2a10b1 |
+ b.devicetree._add_device(dev)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # check that all the above devices are in the expected places
|
|
|
2a10b1 |
+ self.assertEqual(set(b.devices), {pv, vg})
|
|
|
2a10b1 |
+ self.assertEqual(set(b.vgs), {vg})
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ self.assertEqual(vg.size, Size("10236 MiB"))
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ cachepool = b.new_lv(name="cachepool", cache_pool=True,
|
|
|
2a10b1 |
+ parents=[vg], pvs=[pv])
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ b.create_device(cachepool)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ self.assertEqual(cachepool.type, "lvmcachepool")
|
|
|
2a10b1 |
--
|
|
|
2a10b1 |
2.35.3
|
|
|
2a10b1 |
|
|
|
2a10b1 |
|
|
|
2a10b1 |
From d25d52e146559d226369afdb4b102e516bd9e332 Mon Sep 17 00:00:00 2001
|
|
|
2a10b1 |
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
|
2a10b1 |
Date: Thu, 30 Dec 2021 16:09:04 +0100
|
|
|
2a10b1 |
Subject: [PATCH 2/4] examples: Add LVM cache pool example
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Related: rhbz#2055198
|
|
|
2a10b1 |
---
|
|
|
2a10b1 |
examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
|
|
|
2a10b1 |
1 file changed, 59 insertions(+)
|
|
|
2a10b1 |
create mode 100644 examples/lvm_cachepool.py
|
|
|
2a10b1 |
|
|
|
2a10b1 |
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
|
|
|
2a10b1 |
new file mode 100644
|
|
|
2a10b1 |
index 00000000..ab2e8a72
|
|
|
2a10b1 |
--- /dev/null
|
|
|
2a10b1 |
+++ b/examples/lvm_cachepool.py
|
|
|
2a10b1 |
@@ -0,0 +1,59 @@
|
|
|
2a10b1 |
+import os
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+import blivet
|
|
|
2a10b1 |
+from blivet.size import Size
|
|
|
2a10b1 |
+from blivet.util import set_up_logging, create_sparse_tempfile
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+set_up_logging()
|
|
|
2a10b1 |
+b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+# create a disk image file on which to create new devices
|
|
|
2a10b1 |
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
|
|
|
2a10b1 |
+b.disk_images["disk1"] = disk1_file
|
|
|
2a10b1 |
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
|
|
|
2a10b1 |
+b.disk_images["disk2"] = disk2_file
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+b.reset()
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+try:
|
|
|
2a10b1 |
+ disk1 = b.devicetree.get_device_by_name("disk1")
|
|
|
2a10b1 |
+ disk2 = b.devicetree.get_device_by_name("disk2")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ b.initialize_disk(disk1)
|
|
|
2a10b1 |
+ b.initialize_disk(disk2)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
|
|
|
2a10b1 |
+ b.create_device(pv)
|
|
|
2a10b1 |
+ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
|
|
|
2a10b1 |
+ b.create_device(pv2)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # allocate the partitions (decide where and on which disks they'll reside)
|
|
|
2a10b1 |
+ blivet.partitioning.do_partitioning(b)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ vg = b.new_vg(parents=[pv, pv2])
|
|
|
2a10b1 |
+ b.create_device(vg)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
|
|
|
2a10b1 |
+ lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
|
|
|
2a10b1 |
+ b.create_device(lv)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # new cache pool
|
|
|
2a10b1 |
+ cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
|
|
|
2a10b1 |
+ b.create_device(cpool)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # write the new partitions to disk and format them as specified
|
|
|
2a10b1 |
+ b.do_it()
|
|
|
2a10b1 |
+ print(b.devicetree)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # attach the newly created cache pool to the "slow" LV
|
|
|
2a10b1 |
+ lv.attach_cache(cpool)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ b.reset()
|
|
|
2a10b1 |
+ print(b.devicetree)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ input("Check the state and hit ENTER to trigger cleanup")
|
|
|
2a10b1 |
+finally:
|
|
|
2a10b1 |
+ b.devicetree.teardown_disk_images()
|
|
|
2a10b1 |
+ os.unlink(disk1_file)
|
|
|
2a10b1 |
+ os.unlink(disk2_file)
|
|
|
2a10b1 |
--
|
|
|
2a10b1 |
2.35.3
|
|
|
2a10b1 |
|
|
|
2a10b1 |
|
|
|
2a10b1 |
From 2411d8aa082f6baf46f25d5f97455da983c0ee5f Mon Sep 17 00:00:00 2001
|
|
|
2a10b1 |
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
|
2a10b1 |
Date: Thu, 30 Dec 2021 16:13:33 +0100
|
|
|
2a10b1 |
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
|
|
|
2a10b1 |
active
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Related: rhbz#2055198
|
|
|
2a10b1 |
---
|
|
|
2a10b1 |
blivet/devices/lvm.py | 9 ++-------
|
|
|
2a10b1 |
1 file changed, 2 insertions(+), 7 deletions(-)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
|
2a10b1 |
index 7d374c3b..9f875e4e 100644
|
|
|
2a10b1 |
--- a/blivet/devices/lvm.py
|
|
|
2a10b1 |
+++ b/blivet/devices/lvm.py
|
|
|
2a10b1 |
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
|
|
2a10b1 |
|
|
|
2a10b1 |
# special handling for incomplete VGs
|
|
|
2a10b1 |
if not self.complete:
|
|
|
2a10b1 |
- try:
|
|
|
2a10b1 |
- lvs_info = blockdev.lvm.lvs(vg_name=self.name)
|
|
|
2a10b1 |
- except blockdev.LVMError:
|
|
|
2a10b1 |
- lvs_info = []
|
|
|
2a10b1 |
-
|
|
|
2a10b1 |
- for lv_info in lvs_info:
|
|
|
2a10b1 |
- if lv_info.attr and lv_info.attr[4] == 'a':
|
|
|
2a10b1 |
+ for lv_info in lvs_info.cache.values():
|
|
|
2a10b1 |
+ if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
|
|
|
2a10b1 |
return True
|
|
|
2a10b1 |
|
|
|
2a10b1 |
return False
|
|
|
2a10b1 |
--
|
|
|
2a10b1 |
2.35.3
|
|
|
2a10b1 |
|
|
|
2a10b1 |
|
|
|
2a10b1 |
From c8fda78915f31f3d5011ada3c7463f85e181983b Mon Sep 17 00:00:00 2001
|
|
|
2a10b1 |
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
|
2a10b1 |
Date: Mon, 30 May 2022 17:02:43 +0200
|
|
|
2a10b1 |
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
|
|
|
2a10b1 |
existing LV
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Because we do not have action for attaching the cache pool, we
|
|
|
2a10b1 |
cannot schedule both adding the fast PV to the VG and attaching
|
|
|
2a10b1 |
the cache pool to existing LV. This hack allows to schedule the
|
|
|
2a10b1 |
attach to happen after the cache pool is created.
|
|
|
2a10b1 |
|
|
|
2a10b1 |
Related: rhbz#2055198
|
|
|
2a10b1 |
---
|
|
|
2a10b1 |
blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
|
|
|
2a10b1 |
1 file changed, 35 insertions(+), 3 deletions(-)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
|
2a10b1 |
index 9f875e4e..7e4fcf53 100644
|
|
|
2a10b1 |
--- a/blivet/devices/lvm.py
|
|
|
2a10b1 |
+++ b/blivet/devices/lvm.py
|
|
|
2a10b1 |
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
|
|
|
2a10b1 |
|
|
|
2a10b1 |
|
|
|
2a10b1 |
class LVMCachePoolMixin(object):
|
|
|
2a10b1 |
- def __init__(self, metadata_size, cache_mode=None):
|
|
|
2a10b1 |
+ def __init__(self, metadata_size, cache_mode=None, attach_to=None):
|
|
|
2a10b1 |
self._metadata_size = metadata_size or Size(0)
|
|
|
2a10b1 |
self._cache_mode = cache_mode
|
|
|
2a10b1 |
+ self._attach_to = attach_to
|
|
|
2a10b1 |
|
|
|
2a10b1 |
def _init_check(self):
|
|
|
2a10b1 |
if not self.is_cache_pool:
|
|
|
2a10b1 |
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
|
|
|
2a10b1 |
if not self.exists and not self._pv_specs:
|
|
|
2a10b1 |
raise ValueError("at least one fast PV must be specified to create a cache pool")
|
|
|
2a10b1 |
|
|
|
2a10b1 |
+ if self._attach_to and not self._attach_to.exists:
|
|
|
2a10b1 |
+ raise ValueError("cache pool can be attached only to an existing LV")
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
def _check_from_lvs(self):
|
|
|
2a10b1 |
if self._from_lvs:
|
|
|
2a10b1 |
if len(self._from_lvs) != 2:
|
|
|
2a10b1 |
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
|
|
|
2a10b1 |
cache_mode,
|
|
|
2a10b1 |
0,
|
|
|
2a10b1 |
[spec.pv.path for spec in self._pv_specs])
|
|
|
2a10b1 |
+ if self._attach_to:
|
|
|
2a10b1 |
+ self._attach_to.attach_cache(self)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def _post_create(self):
|
|
|
2a10b1 |
+ if self._attach_to:
|
|
|
2a10b1 |
+ # post_create tries to activate the LV and after attaching it no longer exists
|
|
|
2a10b1 |
+ return
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # pylint: disable=bad-super-call
|
|
|
2a10b1 |
+ super(LVMLogicalVolumeBase, self)._post_create()
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def add_hook(self, new=True):
|
|
|
2a10b1 |
+ if self._attach_to:
|
|
|
2a10b1 |
+ self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
|
|
|
2a10b1 |
+ pvs=self._pv_specs, mode=self._cache_mode)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # pylint: disable=bad-super-call
|
|
|
2a10b1 |
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ def remove_hook(self, modparent=True):
|
|
|
2a10b1 |
+ if self._attach_to:
|
|
|
2a10b1 |
+ self._attach_to._cache = None
|
|
|
2a10b1 |
+
|
|
|
2a10b1 |
+ # pylint: disable=bad-super-call
|
|
|
2a10b1 |
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
|
|
|
2a10b1 |
|
|
|
2a10b1 |
def dracut_setup_args(self):
|
|
|
2a10b1 |
return set()
|
|
|
2a10b1 |
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
parent_lv=None, int_type=None, origin=None, vorigin=False,
|
|
|
2a10b1 |
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
|
|
|
2a10b1 |
compression=False, deduplication=False, index_memory=0,
|
|
|
2a10b1 |
- write_policy=None, cache_mode=None):
|
|
|
2a10b1 |
+ write_policy=None, cache_mode=None, attach_to=None):
|
|
|
2a10b1 |
"""
|
|
|
2a10b1 |
:param name: the device name (generally a device node's basename)
|
|
|
2a10b1 |
:type name: str
|
|
|
2a10b1 |
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
:type metadata_size: :class:`~.size.Size`
|
|
|
2a10b1 |
:keyword cache_mode: mode for the cache or None for default (writethrough)
|
|
|
2a10b1 |
:type cache_mode: str
|
|
|
2a10b1 |
+ :keyword attach_to: for non-existing cache pools a logical volume the pool should
|
|
|
2a10b1 |
+ be attached to when created
|
|
|
2a10b1 |
+ :type attach_to: :class:`LVMLogicalVolumeDevice`
|
|
|
2a10b1 |
|
|
|
2a10b1 |
"""
|
|
|
2a10b1 |
|
|
|
2a10b1 |
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
|
|
2a10b1 |
LVMSnapshotMixin.__init__(self, origin, vorigin)
|
|
|
2a10b1 |
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
|
|
|
2a10b1 |
LVMThinLogicalVolumeMixin.__init__(self)
|
|
|
2a10b1 |
- LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
|
|
|
2a10b1 |
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
|
|
|
2a10b1 |
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
|
|
|
2a10b1 |
fmt, exists, sysfs_path, grow, maxsize,
|
|
|
2a10b1 |
percent, cache_request, pvs, from_lvs)
|
|
|
2a10b1 |
--
|
|
|
2a10b1 |
2.35.3
|
|
|
2a10b1 |
|