[anaconda][PATCH 3/3] Add support for lvm thin provisioning via ks custom storage.
David Lehman
dlehman at redhat.com
Tue Jan 27 18:10:31 UTC 2015
Support is limited to custom kickstart layouts. There is no support
for interactive installation or automatic partitioning.
Resolves: rhbz#1083459
---
kickstart.py | 45 ++++++++-
loader/loader.c | 2 +-
scripts/mk-images | 2 +-
storage/__init__.py | 50 +++++++++-
storage/devicelibs/lvm.py | 102 ++++++++++++++++++++
storage/devices.py | 236 +++++++++++++++++++++++++++++++++++++++++++---
storage/devicetree.py | 215 +++++++++++++++++++++++------------------
storage/partitioning.py | 200 ++++++++++++++++++++++++++-------------
8 files changed, 672 insertions(+), 180 deletions(-)
diff --git a/kickstart.py b/kickstart.py
index 13e8798..5bae2c5 100644
--- a/kickstart.py
+++ b/kickstart.py
@@ -20,7 +20,7 @@
from storage.deviceaction import *
from storage.devices import LUKSDevice
-from storage.devicelibs.lvm import getPossiblePhysicalExtents
+from storage.devicelibs.lvm import getPossiblePhysicalExtents, KNOWN_THPOOL_PROFILES
from storage.devicelibs.mpath import MultipathConfigWriter, identifyMultipaths
from storage.devicelibs.mpath import writeMultipathConf
from storage.formats import getFormat
@@ -516,6 +516,10 @@ class LogVolData(commands.logvol.RHEL6_LogVolData):
else:
type = storage.defaultFSType
+ if self.thin_pool:
+ self.mountpoint = ""
+ type = None
+
# Sanity check mountpoint
if self.mountpoint != "" and self.mountpoint[0] != '/':
raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point \"%s\" is not valid." % (self.mountpoint,))
@@ -525,6 +529,16 @@ class LogVolData(commands.logvol.RHEL6_LogVolData):
if not vg:
raise KickstartValueError, formatErrorMsg(self.lineno, msg="No volume group exists with the name \"%s\". Specify volume groups before logical volumes." % self.vgname)
+ pool = None
+ if self.thin_volume:
+ pool = devicetree.getDeviceByName("%s-%s" % (vg.name, self.pool_name))
+ if not pool:
+ err = formatErrorMsg(self.lineno,
+ msg="No thin pool exists with the name "
+ "\"%s\". Specify thin pools before "
+ "thin volumes." % self.pool_name)
+ raise KickstartValueError, err
+
# If this specifies an existing request that we should not format,
# quit here after setting up enough information to mount it later.
if not self.format:
@@ -559,7 +573,7 @@ class LogVolData(commands.logvol.RHEL6_LogVolData):
format = getFormat(type,
mountpoint=self.mountpoint,
mountopts=self.fsopts)
- if not format:
+ if not format.type and not self.thin_pool:
raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % type)
# If we were given a pre-existing LV to create a filesystem on, we need
@@ -583,14 +597,37 @@ class LogVolData(commands.logvol.RHEL6_LogVolData):
except KeyError:
pass
+ if self.thin_volume:
+ parent = pool
+ else:
+ parent = vg
+
+ pool_args = {}
+ if self.thin_pool:
+ if self.profile:
+ matching = (p for p in KNOWN_THPOOL_PROFILES if p.name == self.profile)
+ profile = next(matching, None)
+ if profile:
+ pool_args["profile"] = profile
+ else:
+ log.warning("No matching profile for %s found in LVM configuration", self.profile)
+ if self.metadata_size:
+ pool_args["metadatasize"] = self.metadata_size
+
+ if self.chunk_size:
+ pool_args["chunksize"] = self.chunk_size / 1024.0
+
try:
request = storage.newLV(format=format,
name=self.name,
- vg=vg,
+ vg=parent,
size=self.size,
+ thin_pool=self.thin_pool,
+ thin_volume=self.thin_volume,
grow=self.grow,
maxsize=self.maxSizeMB,
- percent=self.percent)
+ percent=self.percent,
+ **pool_args)
except DeviceError as e:
# Promote DeviceError to KickstartError so the UI will display it
raise KickstartError(str(e))
diff --git a/loader/loader.c b/loader/loader.c
index 8b5b65f..9c3d5a7 100644
--- a/loader/loader.c
+++ b/loader/loader.c
@@ -2384,7 +2384,7 @@ int main(int argc, char ** argv) {
stop_fw_loader(&loaderData);
start_fw_loader(&loaderData);
- mlLoadModuleSet("raid0:raid1:raid5:raid6:raid456:raid10:linear:dm-mod:dm-zero:dm-mirror:dm-snapshot:dm-multipath:dm-round-robin:dm-crypt:dm-raid:cbc:sha256:lrw:xts");
+ mlLoadModuleSet("raid0:raid1:raid5:raid6:raid456:raid10:linear:dm-mod:dm-zero:dm-mirror:dm-snapshot:dm-thin-pool:dm-multipath:dm-round-robin:dm-crypt:dm-raid:cbc:sha256:lrw:xts");
if (!access("/mnt/runtime/usr/lib/libunicode-lite.so.1", R_OK))
setenv("LD_PRELOAD", "/mnt/runtime/usr/lib/libunicode-lite.so.1", 1);
diff --git a/scripts/mk-images b/scripts/mk-images
index 2749c41..09cbf38 100755
--- a/scripts/mk-images
+++ b/scripts/mk-images
@@ -107,7 +107,7 @@ SDMODS="mmc-block sdhci sdhci-pci"
IDEMODS="ide-cd ide-cd_mod"
SCSIMODS="sr_mod sg st sd_mod scsi_mod iscsi_tcp iscsi_ibft scsi_wait_scan cxgb3i bnx2i be2iscsi"
FSMODS="fat msdos vfat ext2 ext3 ext4 reiserfs jfs xfs gfs2 cifs fuse btrfs hfsplus xenfs"
-LVMMODS="dm-mod dm-zero dm-snapshot dm-mirror dm-multipath dm-round-robin dm-crypt dm-raid dm-raid45 dm-memcache"
+LVMMODS="dm-mod dm-zero dm-snapshot dm-mirror dm-multipath dm-round-robin dm-crypt dm-raid dm-raid45 dm-memcache dm-thin-pool"
RAIDMODS="raid0 raid1 raid5 raid6 raid456 raid10 linear"
CRYPTOMODS="sha256_generic cbc xts lrw aes_generic crypto_blkcipher crc32c ecb arc4"
PCMCIASOCKMODS="yenta_socket i82365 tcic pcmcia"
diff --git a/storage/__init__.py b/storage/__init__.py
index 8e227c5..2ff0933 100644
--- a/storage/__init__.py
+++ b/storage/__init__.py
@@ -548,6 +548,30 @@ class Storage(object):
return lvs
@property
+ def thinlvs(self):
+ """ A list of the LVM Thin Logical Volumes in the device tree.
+
+ This is based on the current state of the device tree and
+ does not necessarily reflect the actual on-disk state of the
+ system's disks.
+ """
+ thin = self.devicetree.getDevicesByType("lvmthinlv")
+ thin.sort(key=lambda d: d.name)
+ return thin
+
+ @property
+ def thinpools(self):
+ """ A list of the LVM Thin Pool Logical Volumes in the device tree.
+
+ This is based on the current state of the device tree and
+ does not necessarily reflect the actual on-disk state of the
+ system's disks.
+ """
+ pools = self.devicetree.getDevicesByType("lvmthinpool")
+ pools.sort(key=lambda d: d.name)
+ return pools
+
+ @property
def pvs(self):
""" A list of the LVM Physical Volumes in the device tree.
@@ -824,6 +848,8 @@ class Storage(object):
def newLV(self, *args, **kwargs):
""" Return a new LVMLogicalVolumeDevice instance. """
+ thin_volume = kwargs.pop("thin_volume", False)
+ thin_pool = kwargs.pop("thin_pool", False)
if kwargs.has_key("vg"):
vg = kwargs.pop("vg")
@@ -847,12 +873,20 @@ class Storage(object):
swap = False
name = self.createSuggestedLVName(vg,
swap=swap,
+ pool=thin_pool,
mountpoint=mountpoint)
if name in [d.name for d in self.devices]:
raise ValueError("name already in use")
- return LVMLogicalVolumeDevice(name, vg, *args, **kwargs)
+ if thin_pool:
+ device_class = LVMThinPoolDevice
+ elif thin_volume:
+ device_class = LVMThinLogicalVolumeDevice
+ else:
+ device_class = LVMLogicalVolumeDevice
+
+ return device_class(name, vg, *args, **kwargs)
def createDevice(self, device):
""" Schedule creation of a device.
@@ -947,7 +981,7 @@ class Storage(object):
return tmpname
- def createSuggestedLVName(self, vg, swap=None, mountpoint=None):
+ def createSuggestedLVName(self, vg, swap=None, mountpoint=None, pool=False):
""" Return a suitable, unused name for a new logical volume. """
# FIXME: this is not at all guaranteed to work
if mountpoint:
@@ -973,6 +1007,18 @@ class Storage(object):
break
else:
lvtemplate = "lv_swap"
+ elif pool:
+ pool_count = len([p for p in self.thinpools if p in vg.lvs])
+ if pool_count:
+ idx = pool_count
+ while True:
+ lvtemplate = "lv_pool%02d" % idx
+ if lvtemplate in [lv.lvname for lv in vg.lvs]:
+ idx += 1
+ else:
+ break
+ else:
+ lvtemplate = "lv_pool"
else:
idx = len(vg.lvs)
while True:
diff --git a/storage/devicelibs/lvm.py b/storage/devicelibs/lvm.py
index 7759e45..06f8304 100644
--- a/storage/devicelibs/lvm.py
+++ b/storage/devicelibs/lvm.py
@@ -23,6 +23,7 @@
import os
import math
import re
+from collections import namedtuple
import iutil
import logging
@@ -33,9 +34,22 @@ from constants import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
+N_ = lambda x: x
MAX_LV_SLOTS = 256
+LVM_PE_SIZE = 4 # MiB
+
+# thinp constants
+LVM_THINP_MIN_METADATA_SIZE = 2
+LVM_THINP_MAX_METADATA_SIZE = 16384
+LVM_THINP_MIN_CHUNK_SIZE = 0.0625 # 64 KiB
+LVM_THINP_MAX_CHUNK_SIZE = 1024
+
+ThPoolProfile = namedtuple("ThPoolProfile", ["name", "desc"])
+KNOWN_THPOOL_PROFILES = (ThPoolProfile("thin-generic", N_("Generic")),
+ ThPoolProfile("thin-performance", N_("Performance")))
+
def has_lvm():
has_lvm = False
for path in os.environ["PATH"].split(":"):
@@ -172,6 +186,52 @@ def clampSize(size, pesize, roundup=None):
return long(round(float(size)/float(pesize)) * pesize)
+def get_pool_padding(size, pesize=LVM_PE_SIZE, reverse=False):
+ """ Return the size of the pad required for a pool with the given specs.
+
+ reverse means the pad is already included in the specified size and we
+ should calculate how much of the total is the pad
+ """
+ if not reverse:
+ multiplier = 0.2
+ else:
+ multiplier = 1.0 / 6
+
+ pad = min(clampSize(size * multiplier, pesize, roundup=True),
+ clampSize(LVM_THINP_MAX_METADATA_SIZE, pesize, roundup=True))
+
+ return pad
+
+def is_valid_thin_pool_metadata_size(size):
+ """ Return True if size is a valid thin pool metadata vol size.
+
+ :param size: metadata vol size (in MiB) to validate
+ :type size: int or float
+ :returns: whether the size is valid
+ :rtype: bool
+ """
+ return (LVM_THINP_MIN_METADATA_SIZE <= size <= LVM_THINP_MAX_METADATA_SIZE)
+
+# To support discard, chunk size must be a power of two. Otherwise it must be a
+# multiple of 64 KiB.
+def is_valid_thin_pool_chunk_size(size, discard=False):
+ """ Return True if size is a valid thin pool chunk size.
+
+ :param size: chunk size (in MiB) to validate
+ :type size: int or float
+ :keyword discard: whether discard support is required (default: False)
+ :type discard: bool
+ :returns: whether the size is valid
+ :rtype: bool
+ """
+ if not LVM_THINP_MIN_CHUNK_SIZE <= size <= LVM_THINP_MAX_CHUNK_SIZE:
+ return False
+
+ if discard:
+ return util.power_of_two(int(size))
+ else:
+ return (size % LVM_THINP_MIN_CHUNK_SIZE == Size(0))
+
def lvm(args, progress=None):
rc = iutil.execWithPulseProgress("lvm", args,
stdout = "/dev/tty5",
@@ -432,3 +492,45 @@ def lvdeactivate(vg_name, lv_name):
except LVMError as msg:
raise LVMError("lvdeactivate failed for %s: %s" % (lv_name, msg))
+def thinpoolcreate(vg_name, lv_name, size, metadatasize=None, chunksize=None, profile=None, progress=None):
+ args = ["lvcreate"] + config_args + \
+ ["--thinpool", "%s/%s" % (vg_name, lv_name), "--size", "%dm" % size]
+
+ if metadatasize:
+ # default unit is MiB
+ args += ["--poolmetadatasize", "%d" % metadatasize]
+
+ if chunksize:
+ # default unit is KiB
+ args += ["--chunksize", "%d" % chunksize * 1024]
+
+ if profile:
+ args += ["--profile=%s" % profile]
+
+ try:
+ lvm(args, progress=progress)
+ except LVMError as msg:
+ raise LVMError("lvcreate failed for %s/%s: %s" % (vg_name, lv_name, msg))
+
+def thinlvcreate(vg_name, pool_name, lv_name, size, progress=None):
+ args = ["lvcreate"] + config_args + \
+ ["--thinpool", "%s/%s" % (vg_name, pool_name),
+ "--virtualsize", "%dm" % size,
+ "-n", lv_name]
+
+ try:
+ lvm(args, progress=progress)
+ except LVMError as msg:
+ raise LVMError("lvcreate failed for %s/%s: %s" % (vg_name, lv_name, msg))
+
+def thinlvpoolname(vg_name, lv_name):
+ args = ["lvs"] + config_args + \
+ [ "--noheadings", "-o", "pool_lv", "%s/%s" % (vg_name, lv_name)]
+
+ lines = lvm(args, capture=True)
+ try:
+ pool = lines[0].strip()
+ except IndexError:
+ pool = ''
+
+ return pool
diff --git a/storage/devices.py b/storage/devices.py
index e179339..c716bae 100644
--- a/storage/devices.py
+++ b/storage/devices.py
@@ -2136,7 +2136,9 @@ class LVMVolumeGroupDevice(DMDevice):
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
- if not lv.exists and not self.growable and lv.size > self.freeSpace:
+ if not lv.exists and not self.growable and \
+ not isinstance(lv, LVMThinLogicalVolumeDevice) and \
+ lv.size > self.freeSpace:
raise DeviceError("new lv is too large to fit in free space", self.name)
log.debug("Adding %s/%dMB to %s" % (lv.name, lv.size, self.name))
@@ -2284,6 +2286,14 @@ class LVMVolumeGroupDevice(DMDevice):
return self._lvs[:] # we don't want folks changing our list
@property
+ def thinpools(self):
+ return [l for l in self._lvs if isinstance(l, LVMThinPoolDevice)]
+
+ @property
+ def thinlvs(self):
+ return [l for l in self._lvs if isinstance(l, LVMThinLogicalVolumeDevice)]
+
+ @property
def complete(self):
"""Check if the vg has all its pvs in the system
Return True if complete.
@@ -2333,13 +2343,14 @@ class LVMLogicalVolumeDevice(DMDevice):
percent -- percent of VG space to take
"""
- if isinstance(vgdev, list):
- if len(vgdev) != 1:
- raise ValueError("constructor requires a single LVMVolumeGroupDevice instance")
- elif not isinstance(vgdev[0], LVMVolumeGroupDevice):
+ if self.__class__.__name__ == "LVMLogicalVolumeDevice":
+ if isinstance(vgdev, list):
+ if len(vgdev) != 1:
+ raise ValueError("constructor requires a single LVMVolumeGroupDevice instance")
+ elif not isinstance(vgdev[0], LVMVolumeGroupDevice):
+ raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
+ elif not isinstance(vgdev, LVMVolumeGroupDevice):
raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
- elif not isinstance(vgdev, LVMVolumeGroupDevice):
- raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
DMDevice.__init__(self, name, size=size, format=format,
sysfsPath=sysfsPath, parents=vgdev,
exists=exists)
@@ -2382,7 +2393,7 @@ class LVMLogicalVolumeDevice(DMDevice):
raise SinglePhysicalVolumeError(self.singlePVerr)
# here we go with the circular references
- self.vg._addLogVol(self)
+ self.parents[0]._addLogVol(self)
def __str__(self):
s = DMDevice.__str__(self)
@@ -2545,6 +2556,14 @@ class LVMLogicalVolumeDevice(DMDevice):
return [validpvs[0].path]
+ def _create(self, progress=None):
+ # should we use --zero for safety's sake?
+ if self.singlePV:
+ lvm.lvcreate(self.vg.name, self._name, self.size, progress=progress,
+ pvs=self._getSinglePV())
+ else:
+ lvm.lvcreate(self.vg.name, self._name, self.size, progress=progress)
+
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
@@ -2560,13 +2579,7 @@ class LVMLogicalVolumeDevice(DMDevice):
try:
self.createParents()
self.setupParents()
-
- # should we use --zero for safety's sake?
- if self.singlePV:
- lvm.lvcreate(self.vg.name, self._name, self.size, progress=w,
- pvs=self._getSinglePV())
- else:
- lvm.lvcreate(self.vg.name, self._name, self.size, progress=w)
+ self._create(progress=w)
except Exception:
raise
else:
@@ -2632,6 +2645,199 @@ class LVMLogicalVolumeDevice(DMDevice):
problem = _("small")
return problem
+class LVMThinPoolDevice(LVMLogicalVolumeDevice):
+ """ An LVM Thin Pool """
+ _type = "lvmthinpool"
+ _resizable = False
+
+ def __init__(self, name, vgdev, size=None, uuid=None,
+ format=None, exists=False, sysfsPath='',
+ grow=None, maxsize=None, percent=None,
+ metadatasize=None, chunksize=None, segType=None, profile=None):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :param vgdev: the vg that will contain this pool
+ :type vgdev: :class:`~.LVMVolumeGroupDevice`
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ :keyword sysfsPath: sysfs device path
+ :type sysfsPath: str
+ :keyword uuid: the device UUID
+ :type uuid: str
+ :keyword segType: segment type
+ :type segType: str
+
+ For non-existent pools only:
+
+ :keyword grow: whether to grow this LV
+ :type grow: bool
+ :keyword maxsize: maximum size for growable LV
+ :type maxsize: :class:`~.size.Size`
+ :keyword percent: percent of VG space to take
+ :type percent: int
+ :keyword metadatasize: the size of the metadata LV
+ :type metadatasize: :class:`~.size.Size`
+ :keyword chunksize: chunk size for the pool
+ :type chunksize: :class:`~.size.Size`
+ :keyword profile: (allocation) profile for the pool or None (unspecified)
+ """
+ if metadatasize is not None and \
+ not lvm.is_valid_thin_pool_metadata_size(metadatasize):
+ raise ValueError("invalid metadatasize value")
+
+ if chunksize is not None and \
+ not lvm.is_valid_thin_pool_chunk_size(chunksize):
+ raise ValueError("invalid chunksize value")
+
+ super(LVMThinPoolDevice, self).__init__(name, vgdev,
+ size=size, uuid=uuid,
+ format=format, exists=exists,
+ sysfsPath=sysfsPath, grow=grow,
+ maxsize=maxsize,
+ percent=percent,
+ segType=segType)
+
+ self.metaDataSize = metadatasize or 0
+ self.chunkSize = chunksize or 0
+ self.profile = profile
+ self._lvs = []
+
+ def _addLogVol(self, lv):
+ """ Add an LV to this pool. """
+ if lv in self._lvs:
+ raise ValueError("lv is already part of this vg")
+
+ # TODO: add some checking to prevent overcommit for preexisting
+ self.vg._addLogVol(lv)
+ log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
+ self._lvs.append(lv)
+
+ def _removeLogVol(self, lv):
+ """ Remove an LV from this pool. """
+ if lv not in self._lvs:
+ raise ValueError("specified lv is not part of this vg")
+
+ self._lvs.remove(lv)
+ self.vg._removeLogVol(lv)
+
+ @property
+ def lvs(self):
+ """ A list of this pool's LVs """
+ return self._lvs[:] # we don't want folks changing our list
+
+ @property
+ def vgSpaceUsed(self):
+ space = super(LVMThinPoolDevice, self).vgSpaceUsed
+ space += lvm.get_pool_padding(space, pesize=self.vg.peSize)
+ return space
+
+ @property
+ def usedSpace(self):
+ return sum(l.poolSpaceUsed for l in self.lvs)
+
+ @property
+ def freeSpace(self):
+ return self.size - self.usedSpace
+
+ def _create(self, progress=None):
+ """ Create the device. """
+ log_method_call(self, self.name, status=self.status)
+ lvm.thinpoolcreate(self.vg.name, self.lvname, self.size,
+ metadatasize=self.metaDataSize,
+ chunksize=self.chunkSize,
+ profile=self.profile.name if self.profile else "",
+ progress=progress)
+
+ def dracutSetupArgs(self):
+ return set()
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+ def populateKSData(self, data):
+ super(LVMThinPoolDevice, self).populateKSData(data)
+ data.mountpoint = "none"
+ data.thin_pool = True
+ data.metadata_size = self.metaDataSize
+ data.chunk_size = self.chunkSize
+ if self.profile:
+ data.profile = self.profile.name
+
+class LVMThinLogicalVolumeDevice(LVMLogicalVolumeDevice):
+ """ An LVM Thin Logical Volume """
+ _type = "lvmthinlv"
+
+ @property
+ def pool(self):
+ return self.parents[0]
+
+ @property
+ def vg(self):
+ return self.pool.vg
+
+ @property
+ def poolSpaceUsed(self):
+ """ The total space used within the thin pool by this volume.
+
+ This should probably align to the greater of vg extent size and
+ pool chunk size. If it ends up causing overcommit in the amount of
+ less than one chunk per thin lv, so be it.
+ """
+ return self.vg.align(self.size, roundup=True)
+
+ @property
+ def vgSpaceUsed(self):
+ return 0 # the pool's size is already accounted for in the vg
+
+ def _setSize(self, size):
+ log.debug("setting lv %s size to %dMB" % (self.name, size))
+ size = self.vg.align(size)
+ size = self.vg.align(numeric_type(size))
+ self._size = size
+ self.targetSize = size
+
+ size = property(StorageDevice._getSize, _setSize)
+
+ def _preCreate(self):
+ # skip LVMLogicalVolumeDevice's _preCreate() method as it checks for a
+ # free space in a VG which doesn't make sense for a ThinLV and causes a
+ # bug by limitting the ThinLV's size to VG free space which is nonsense
+ super(LVMLogicalVolumeDevice, self)._preCreate() # pylint: disable=bad-super-call
+
+ def _create(self, progress=None):
+ """ Create the device. """
+ log_method_call(self, self.name, status=self.status)
+ lvm.thinlvcreate(self.vg.name, self.pool.lvname, self.lvname,
+ self.size, progress=progress)
+
+ def removeHook(self, modparent=True):
+ if modparent:
+ self.pool._removeLogVol(self)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeDevice, self).removeHook(modparent=modparent)
+
+ def addHook(self, new=True):
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeDevice, self).addHook(new=new)
+ if new:
+ return
+
+ if self not in self.pool.lvs:
+ self.pool._addLogVol(self)
+
+ def populateKSData(self, data):
+ super(LVMThinLogicalVolumeDevice, self).populateKSData(data)
+ data.thin_volume = True
+ data.pool_name = self.pool.lvname
+
class MDRaidArrayDevice(StorageDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
diff --git a/storage/devicetree.py b/storage/devicetree.py
index 83673cc..6f2ed50 100644
--- a/storage/devicetree.py
+++ b/storage/devicetree.py
@@ -1547,101 +1547,134 @@ class DeviceTree(object):
log.debug("no LVs listed for VG %s" % vg_name)
return False
- def lv_attr_cmp(a, b):
- """ Sort so that mirror images come first and snapshots last. """
- mirror_chars = "Iil"
- snapshot_chars = "Ss"
- if a[0] in mirror_chars and b[0] not in mirror_chars:
- return -1
- elif a[0] not in mirror_chars and b[0] in mirror_chars:
- return 1
- elif a[0] not in snapshot_chars and b[0] in snapshot_chars:
- return -1
- elif a[0] in snapshot_chars and b[0] not in snapshot_chars:
- return 1
- else:
- return 0
-
- # make a list of indices with mirror volumes up front and snapshots at
- # the end
- indices = range(len(lv_names))
- indices.sort(key=lambda i: lv_attr[i], cmp=lv_attr_cmp)
- raid = {}
- for index in indices:
- lv_name = lv_names[index]
- if not lv_name:
- continue
-
+ def addRequiredLV(name, msg):
+ """ Add a prerequisite/parent LV.
+
+ The parent is strictly required in order to be able to add
+ some other LV that depends on it. For this reason, failure to
+ add the specified LV results in a DeviceTreeError with the
+ message string specified in the msg parameter.
+
+ :param str name: the full name of the LV (including vgname)
+ :param str msg: message to pass DeviceTreeError ctor on error
+ :returns: None
+ :raises: :class:`~.errors.DeviceTreeError` on failure
+
+ """
+ vol = self.getDeviceByName(name)
+ if vol is None:
+ addLV(lv_info[name])
+ vol = self.getDeviceByName(name)
+
+ if vol is None:
+ log.error("%s: %s", msg, name)
+ raise DeviceTreeError(msg)
+
+ def addLV(lv):
+ """ Instantiate and add an LV based on data from the VG. """
+ lv_name = udev.device_get_lv_name(lv)
+ lv_uuid = udev.device_get_lv_uuid(lv)
+ lv_attr = udev.device_get_lv_attr(lv)
+ lv_size = udev.device_get_lv_size(lv)
+ lv_type = udev.device_get_lv_type(lv)
+
+ lv_class = LVMLogicalVolumeDevice
+ lv_parents = [vg_device]
+ lv_kwargs = {}
name = "%s-%s" % (vg_name, lv_name)
- if lv_attr[index][0] in 'Ss':
- log.debug("found lvm snapshot volume '%s'" % name)
- origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
- if not origin_name:
- log.error("lvm snapshot '%s-%s' has unknown origin"
- % (vg_name, lv_name))
- continue
- origin = self.getDeviceByName("%s-%s" % (vg_name,
- origin_name))
- if not origin:
- if origin_name.endswith("_vorigin]"):
- log.info("snapshot volume '%s' has vorigin" % name)
- vg_device.voriginSnapshots[lv_name] = lv_sizes[index]
- else:
- log.warning("snapshot lv '%s' origin lv '%s-%s' "
- "not found" % (name,
- vg_name, origin_name))
- continue
+ if self.getDeviceByName(name):
+ # some lvs may have been added on demand below
+ log.debug("already added %s", name)
+ return
- if lv_name in origin.snapshots:
- continue
+ if lv_attr[0] in 'Ss':
+ log.info("found lvm snapshot volume '%s'", name)
+ origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
+ if not origin_name:
+ log.error("lvm snapshot '%s-%s' has unknown origin",
+ vg_name, lv_name)
+ return
- log.debug("adding %dMB to %s snapshot total"
- % (lv_sizes[index], origin.name))
- origin.snapshotSpace += lv_sizes[index]
- origin.snapshots.append(lv_name)
- continue
- elif lv_attr[index][0] == 'v':
+ if origin_name.endswith("_vorigin]"):
+ lv_kwargs["vorigin"] = True
+ origin = None
+ else:
+ origin_device_name = "%s-%s" % (vg_name, origin_name)
+ addRequiredLV(origin_device_name,
+ "failed to locate origin lv")
+ origin = self.getDeviceByName(origin_device_name)
+
+ lv_kwargs["origin"] = origin
+ lv_class = LVMSnapShotDevice
+ elif lv_attr[0] == 'v':
# skip vorigins
- continue
- elif lv_attr[index][0] in 'Ii':
+ return
+ elif lv_attr[0] in 'Ii':
# mirror image
- lv_name = re.sub(r'_[rm]image.+', '', lv_name[1:-1])
- name = "%s-%s" % (vg_name, lv_name)
- if name not in raid:
- raid[name] = {"copies": 0, "log": 0, "meta": 0}
-
+ rname = re.sub(r'_[rm]image.+', '', lv_name[1:-1])
+ name = "%s-%s" % (vg_name, rname)
+ addRequiredLV(name, "failed to look up raid lv")
raid[name]["copies"] += 1
- continue
- elif lv_attr[index][0] == 'e':
+ return
+ elif lv_attr[0] == 'e':
+ if lv_name.endswith("_pmspare]"):
+ # spare metadata area for any thin pool that needs repair
+ return
+
# raid metadata volume
- lv_name = re.sub(r'_rmeta.+', '', lv_name[1:-1])
+ lv_name = re.sub(r'_[tr]meta.*', '', lv_name[1:-1])
name = "%s-%s" % (vg_name, lv_name)
- raid[name]["meta"] += lv_sizes[index]
- continue
- elif lv_attr[index][0] == 'l':
+ addRequiredLV(name, "failed to look up raid lv")
+ raid[name]["meta"] += lv_size
+ return
+ elif lv_attr[0] == 'l':
# log volume
- lv_name = re.sub(r'_mlog.*', '', lv_name[1:-1])
- name = "%s-%s" % (vg_name, lv_name)
- if name not in raid:
- raid[name] = {"copies": 0, "log": 0, "meta": 0}
+ rname = re.sub(r'_mlog.*', '', lv_name[1:-1])
+ name = "%s-%s" % (vg_name, rname)
+ addRequiredLV(name, "failed to look up log lv")
+ raid[name]["log"] = lv_size
+ return
+ elif lv_attr[0] == 't':
+ # thin pool
+ lv_class = LVMThinPoolDevice
+ elif lv_attr[0] == 'V':
+ # thin volume
+ pool_name = devicelibs.lvm.thinlvpoolname(vg_name, lv_name)
+ pool_device_name = "%s-%s" % (vg_name, pool_name)
+ addRequiredLV(pool_device_name, "failed to look up thin pool")
- raid[name]["log"] = lv_sizes[index]
- continue
+ origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
+ if origin_name:
+ origin_device_name = "%s-%s" % (vg_name, origin_name)
+ addRequiredLV(origin_device_name, "failed to locate origin lv")
+ origin = self.getDeviceByName(origin_device_name)
+ lv_kwargs["origin"] = origin
+ lv_class = LVMThinSnapShotDevice
+ else:
+ lv_class = LVMThinLogicalVolumeDevice
- lv_device = self.getDeviceByName(name)
- if lv_device is None:
- lv_uuid = lv_uuids[index]
- lv_size = lv_sizes[index]
- lv_type = lv_types[index]
- lv_device = LVMLogicalVolumeDevice(lv_name,
- vg_device,
- uuid=lv_uuid,
- size=lv_size,
- segType=lv_type,
- exists=True)
- self._addDevice(lv_device)
+ lv_parents = [self.getDeviceByName(pool_device_name)]
+ elif lv_name.endswith(']'):
+ # Internal LVM2 device
+ return
+ elif lv_attr[0] not in '-mMrRoO':
+ # Ignore anything else except for the following:
+ # - normal lv
+ # m mirrored
+ # M mirrored without initial sync
+ # r raid
+ # R raid without initial sync
+ # o origin
+ # O origin with merging snapshot
+ return
+ lv_dev = self.getDeviceByUuid(lv_uuid)
+ if lv_dev is None:
+ lv_device = lv_class(lv_name, parents=lv_parents,
+ uuid=lv_uuid, size=lv_size,segType=lv_type,
+ exists=True, **lv_kwargs)
+ self._addDevice(lv_device)
try:
lv_device.setup()
ret = True
@@ -1649,15 +1682,15 @@ class DeviceTree(object):
log.info("setup of %s failed: %s"
% (lv_device.name, msg))
- for name, data in raid.items():
- lv_dev = self.getDeviceByName(name)
- lv_dev.copies = data["copies"]
- lv_dev.metaDataSize = data["meta"]
- lv_dev.logSize = data["log"]
- log.debug("set %s copies to %d, metadata size to %dMB, log size "
- "to %dMB, total size %dMB"
- % (lv_dev.name, lv_dev.copies, lv_dev.metaDataSize,
- lv_dev.logSize, lv_dev.vgSpaceUsed))
+ if lv_device.status:
+ lv_device.updateSysfsPath()
+ lv_info = udev_get_block_device(lv_device.sysfsPath)
+ if not lv_info:
+ log.error("failed to get udev data for lv %s", lv_device.name)
+ return
+
+ # do format handling now
+ self.addUdevDevice(lv_info)
return ret
diff --git a/storage/partitioning.py b/storage/partitioning.py
index 1e7f583..184295c 100644
--- a/storage/partitioning.py
+++ b/storage/partitioning.py
@@ -34,6 +34,7 @@ from errors import *
from deviceaction import *
from devices import PartitionDevice, LUKSDevice, devicePathToName
from formats import getFormat
+from devicelibs.lvm import get_pool_padding
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
@@ -1756,88 +1757,134 @@ def growLVM(storage):
# figure out how much to grow each LV
grow_amounts = {}
lv_total = vg.size - total_free
- log.debug("used: %dMB ; vg.size: %dMB" % (lv_total, vg.size))
-
- # This first loop is to calculate percentage-based growth
- # amounts. These are based on total free space.
- lvs = vg.lvs
- lvs.sort(cmp=lvCompare)
- for lv in lvs:
- if not lv.req_grow or not lv.req_percent:
- continue
+ log.debug("used: %dMB ; vg.size: %dMB" % (vg.size - total_free, vg.size))
- portion = (lv.req_percent * 0.01)
- grow = portion * vg.freeSpace
- new_size = lv.req_size + grow
- if lv.req_max_size and new_size > lv.req_max_size:
- grow -= (new_size - lv.req_max_size)
-
- if lv.format.maxSize and lv.format.maxSize < new_size:
- grow -= (new_size - lv.format.maxSize)
-
- # clamp growth amount to a multiple of vg extent size
- grow_amounts[lv.name] = vg.align(grow)
- total_free -= grow
- lv_total += grow
-
- # This second loop is to calculate non-percentage-based growth
- # amounts. These are based on free space remaining after
- # calculating percentage-based growth amounts.
-
- # keep a tab on space not allocated due to format or requested
- # maximums -- we'll dole it out to subsequent requests
- leftover = 0
- for lv in lvs:
- log.debug("checking lv %s: req_grow: %s ; req_percent: %s"
- % (lv.name, lv.req_grow, lv.req_percent))
- if not lv.req_grow or lv.req_percent:
- continue
+ ##
+ ## First, grow non-thin LVs. Percentage-based growth comes first.
+ ##
+
+ # don't include thin lvs in the vg's growth calculation
+ fatlvs = [lv for lv in vg.lvs if lv not in vg.thinlvs]
+ for lv in fatlvs:
+ if lv in vg.thinpools:
+ # make sure the pool's base size is at least the sum of its lvs'
+ req_size = max(lv.req_size, lv.usedSpace)
+
+ # add the required padding to the requested pool size
+ req_size += get_pool_padding(lv.req_size, pesize=vg.peSize)
+
+ total_free -= req_size - lv.req_size
+ lv.req_size = req_size
+
+ def growPercentageLVs(vg, lvs, free, growth):
+ """ Grow percentage-based LVs within a VG or thin pool.
- portion = float(lv.req_size) / float(lv_total)
- grow = portion * total_free
- log.debug("grow is %dMB" % grow)
-
- todo = lvs[lvs.index(lv):]
- unallocated = reduce(lambda x,y: x+y,
- [l.req_size for l in todo
- if l.req_grow and not l.req_percent])
- extra_portion = float(lv.req_size) / float(unallocated)
- extra = extra_portion * leftover
- log.debug("%s getting %dMB (%d%%) of %dMB leftover space"
- % (lv.name, extra, extra_portion * 100, leftover))
- leftover -= extra
- grow += extra
- log.debug("grow is now %dMB" % grow)
- max_size = lv.req_size + grow
- if lv.req_max_size and max_size > lv.req_max_size:
- max_size = lv.req_max_size
-
- if lv.format.maxSize and max_size > lv.format.maxSize:
- max_size = lv.format.maxSize
-
- log.debug("max size is %dMB" % max_size)
- max_size = max_size
- leftover += (lv.req_size + grow) - max_size
- grow = max_size - lv.req_size
- log.debug("lv %s gets %dMB" % (lv.name, vg.align(grow)))
- grow_amounts[lv.name] = vg.align(grow)
+ :param vg: the VG, used only for aligning growth amounts
+ :param lvs: the set of lvs to consider
+ :param free: the total free space available for growth in MiB
+ :param dict growth: growth amounts for lvs (in+out)
+ :returns: remaining free space in MiB
+
+ Percentages for thin volumes are relative to the free space in
+ the pool -- not the whole vg.
+ """
+ for lv in lvs:
+ if not lv.req_grow or not lv.req_percent:
+ continue
+
+ portion = (lv.req_percent * 0.01)
+ grow = portion * free
+ new_size = lv.req_size + grow
+ if lv.req_max_size and new_size > lv.req_max_size:
+ grow -= (new_size - lv.req_max_size)
+
+ if lv.format.maxSize and lv.format.maxSize < new_size:
+ grow -= (new_size - lv.format.maxSize)
+
+ # clamp growth amount to a multiple of vg extent size
+ growth[lv.name] = vg.align(grow)
+ free -= grow
+
+ return free
+
+ fatlvs.sort(cmp=lvCompare)
+ total_free = growPercentageLVs(vg, fatlvs, total_free, grow_amounts)
+
+ def growLVs(vg, lvs, free, growth):
+ """ Grow LVs within a VG or thin pool.
+
+ :param vg: the VG, used only for aligning growth amounts
+ :param lvs: the set of lvs to consider
+ :param free: the total free space available for growth in MiB
+ :param dict growth: growth amounts for lvs (in+out)
+ :returns: leftover space in MiB (from requests with max size)
+ """
+ # keep a tab on space not allocated due to format or requested
+ # maximums -- we'll dole it out to subsequent requests
+ leftover = 0
+ growth_base = float(sum(lv.req_size for lv in lvs if lv.req_grow))
+ for lv in lvs:
+ log.debug("checking lv %s: req_grow: %s ; req_percent: %s"
+ % (lv.name, lv.req_grow, lv.req_percent))
+ if not lv.req_grow or lv.req_percent:
+ continue
+
+ portion = float(lv.req_size) / growth_base
+ grow = portion * free
+ log.debug("grow is %dMB" % grow)
+
+ todo = lvs[lvs.index(lv):]
+ unallocated = reduce(lambda x,y: x+y,
+ [l.req_size for l in todo
+ if l.req_grow and not l.req_percent])
+ extra_portion = float(lv.req_size) / float(unallocated)
+ extra = extra_portion * leftover
+ log.debug("%s getting %dMB (%d%%) of %dMB leftover space"
+ % (lv.name, extra, extra_portion * 100, leftover))
+ leftover -= extra
+ grow += extra
+ log.debug("grow is now %dMB" % grow)
+ max_size = lv.req_size + grow
+ if lv.req_max_size and max_size > lv.req_max_size:
+ max_size = lv.req_max_size
+
+ if lv.format.maxSize and max_size > lv.format.maxSize:
+ max_size = lv.format.maxSize
+
+ log.debug("max size is %dMB" % max_size)
+ max_size = max_size
+ leftover += (lv.req_size + grow) - max_size
+ grow = max_size - lv.req_size
+ log.debug("lv %s gets %dMB" % (lv.name, vg.align(grow)))
+ growth[lv.name] = vg.align(grow)
+
+ return leftover
+
+ leftover = growLVs(vg, fatlvs, total_free, grow_amounts)
if not grow_amounts:
log.debug("no growable lvs in vg %s" % vg.name)
continue
# now grow the lvs by the amounts we've calculated above
- for lv in lvs:
+ for lv in fatlvs:
if lv.name not in grow_amounts.keys():
continue
- lv.size += grow_amounts[lv.name]
+
+ size = lv.req_size + grow_amounts[lv.name]
+
+ # reduce the size of thin pools by the pad size
+ if hasattr(lv, "lvs"):
+ size -= get_pool_padding(size, pesize=vg.peSize, reverse=True)
+
+ lv.size = size
# now there shouldn't be any free space left, but if there is we
# should allocate it to one of the LVs
vg_free = vg.freeSpace
log.debug("vg %s has %dMB free" % (vg.name, vg_free))
if vg_free:
- for lv in lvs:
+ for lv in fatlvs:
if not lv.req_grow:
continue
@@ -1858,7 +1905,28 @@ def growLVM(storage):
if lv.format.maxSize and projected > lv.format.maxSize:
projected = lv.format.maxSize
+ # reduce the size of thin pools by the pad size
+ if hasattr(lv, "lvs"):
+ projected -= get_pool_padding(projected, pesize=vg.peSize, reverse=True)
+
log.debug("giving leftover %dMB to %s" % (projected - lv.size,
lv.name))
lv.size = projected
+ ##
+ ## Grow thin lvs within their respective pools, percentage-based first.
+ ##
+ for pool in vg.thinpools:
+ log.debug("%s size=%d free=%d lvs=%s)", pool.lvname, pool.size, pool.freeSpace, [lv.lvname for lv in pool.lvs])
+ lvs = sorted(pool.lvs, cmp=lvCompare)
+
+ total_free = growPercentageLVs(vg, lvs, pool.freeSpace, grow_amounts)
+
+ growLVs(vg, lvs, pool.freeSpace, grow_amounts)
+
+ # now grow the thin lvs by the amounts we just calculated
+ for lv in pool.lvs:
+ if lv.name not in grow_amounts.keys():
+ continue
+ lv.size += grow_amounts[lv.name]
+
--
1.9.3
More information about the anaconda-patches
mailing list