[anaconda][PATCH 3/3] Add support for lvm thin provisioning via ks custom storage.
Vratislav Podzimek
vpodzime at redhat.com
Thu Jan 29 08:06:31 UTC 2015
On Tue, 2015-01-27 at 12:10 -0600, David Lehman wrote:
> Support is limited to custom kickstart layouts. There is no support
> for interactive installation or automatic partitioning.
>
> Resolves: rhbz#1083459
> ---
> kickstart.py | 45 ++++++++-
> loader/loader.c | 2 +-
> scripts/mk-images | 2 +-
> storage/__init__.py | 50 +++++++++-
> storage/devicelibs/lvm.py | 102 ++++++++++++++++++++
> storage/devices.py | 236 +++++++++++++++++++++++++++++++++++++++++++---
> storage/devicetree.py | 215 +++++++++++++++++++++++------------------
> storage/partitioning.py | 200 ++++++++++++++++++++++++++-------------
> 8 files changed, 672 insertions(+), 180 deletions(-)
> - def createSuggestedLVName(self, vg, swap=None, mountpoint=None):
> + def createSuggestedLVName(self, vg, swap=None, mountpoint=None, pool=False):
> """ Return a suitable, unused name for a new logical volume. """
> # FIXME: this is not at all guaranteed to work
> if mountpoint:
> @@ -973,6 +1007,18 @@ class Storage(object):
> break
> else:
> lvtemplate = "lv_swap"
> + elif pool:
> + pool_count = len([p for p in self.thinpools if p in vg.lvs])
> + if pool_count:
> + idx = pool_count
> + while True:
> + lvtemplate = "lv_pool%02d" % idx
> + if lvtemplate in [lv.lvname for lv in vg.lvs]:
Using generator here or 'lv_names = set(lv.lvname for lv in vg.lvs)'
outside of the loop would be nicer.
> + idx += 1
> + else:
> + break
> + else:
> + lvtemplate = "lv_pool"
> else:
> idx = len(vg.lvs)
> while True:
> diff --git a/storage/devicelibs/lvm.py b/storage/devicelibs/lvm.py
> index 7759e45..06f8304 100644
> --- a/storage/devicelibs/lvm.py
> +++ b/storage/devicelibs/lvm.py
> @@ -23,6 +23,7 @@
> import os
> import math
> import re
> +from collections import namedtuple
>
> import iutil
> import logging
> @@ -33,9 +34,22 @@ from constants import *
>
> import gettext
> _ = lambda x: gettext.ldgettext("anaconda", x)
> +N_ = lambda x: x
>
> MAX_LV_SLOTS = 256
>
> +LVM_PE_SIZE = 4 # MiB
> +
> +# thinp constants
> +LVM_THINP_MIN_METADATA_SIZE = 2
> +LVM_THINP_MAX_METADATA_SIZE = 16384
> +LVM_THINP_MIN_CHUNK_SIZE = 0.0625 # 64 KiB
> +LVM_THINP_MAX_CHUNK_SIZE = 1024
> +
> +ThPoolProfile = namedtuple("ThPoolProfile", ["name", "desc"])
> +KNOWN_THPOOL_PROFILES = (ThPoolProfile("thin-generic", N_("Generic")),
> + ThPoolProfile("thin-performance", N_("Performance")))
We don't actually need the profile descriptions here, since we don't
expect any interactive/UI thin pool creation so a plain tuple
("thin-generic", "thin-performance") would be enough. But I'm not
against having the same code/constants here and on master/rhel7-branch.
> diff --git a/storage/devices.py b/storage/devices.py
> index e179339..c716bae 100644
> --- a/storage/devices.py
> +++ b/storage/devices.py
> @@ -2136,7 +2136,9 @@ class LVMVolumeGroupDevice(DMDevice):
>
> # verify we have the space, then add it
> # do not verify for growing vg (because of ks)
> - if not lv.exists and not self.growable and lv.size > self.freeSpace:
> + if not lv.exists and not self.growable and \
> + not isinstance(lv, LVMThinLogicalVolumeDevice) and \
> + lv.size > self.freeSpace:
> raise DeviceError("new lv is too large to fit in free space", self.name)
>
> log.debug("Adding %s/%dMB to %s" % (lv.name, lv.size, self.name))
> @@ -2284,6 +2286,14 @@ class LVMVolumeGroupDevice(DMDevice):
> return self._lvs[:] # we don't want folks changing our list
>
> @property
> + def thinpools(self):
> + return [l for l in self._lvs if isinstance(l, LVMThinPoolDevice)]
> +
> + @property
> + def thinlvs(self):
> + return [l for l in self._lvs if isinstance(l, LVMThinLogicalVolumeDevice)]
> +
> + @property
> def complete(self):
> """Check if the vg has all its pvs in the system
> Return True if complete.
> @@ -2333,13 +2343,14 @@ class LVMLogicalVolumeDevice(DMDevice):
> percent -- percent of VG space to take
>
> """
> - if isinstance(vgdev, list):
> - if len(vgdev) != 1:
> - raise ValueError("constructor requires a single LVMVolumeGroupDevice instance")
> - elif not isinstance(vgdev[0], LVMVolumeGroupDevice):
> + if self.__class__.__name__ == "LVMLogicalVolumeDevice":
This check is worth an explanation in a comment.
> + if isinstance(vgdev, list):
> + if len(vgdev) != 1:
> + raise ValueError("constructor requires a single LVMVolumeGroupDevice instance")
> + elif not isinstance(vgdev[0], LVMVolumeGroupDevice):
> + raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
> + elif not isinstance(vgdev, LVMVolumeGroupDevice):
> raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
> - elif not isinstance(vgdev, LVMVolumeGroupDevice):
> - raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
> DMDevice.__init__(self, name, size=size, format=format,
> sysfsPath=sysfsPath, parents=vgdev,
> exists=exists)
> @@ -2382,7 +2393,7 @@ class LVMLogicalVolumeDevice(DMDevice):
> raise SinglePhysicalVolumeError(self.singlePVerr)
>
> # here we go with the circular references
> - self.vg._addLogVol(self)
> + self.parents[0]._addLogVol(self)
>
> def __str__(self):
> s = DMDevice.__str__(self)
> @@ -2545,6 +2556,14 @@ class LVMLogicalVolumeDevice(DMDevice):
>
> return [validpvs[0].path]
>
> + def _create(self, progress=None):
> + # should we use --zero for safety's sake?
> + if self.singlePV:
> + lvm.lvcreate(self.vg.name, self._name, self.size, progress=progress,
> + pvs=self._getSinglePV())
> + else:
> + lvm.lvcreate(self.vg.name, self._name, self.size, progress=progress)
> +
> def create(self, intf=None):
> """ Create the device. """
> log_method_call(self, self.name, status=self.status)
> @@ -2560,13 +2579,7 @@ class LVMLogicalVolumeDevice(DMDevice):
> try:
> self.createParents()
> self.setupParents()
> -
> - # should we use --zero for safety's sake?
> - if self.singlePV:
> - lvm.lvcreate(self.vg.name, self._name, self.size, progress=w,
> - pvs=self._getSinglePV())
> - else:
> - lvm.lvcreate(self.vg.name, self._name, self.size, progress=w)
> + self._create(progress=w)
> except Exception:
> raise
> else:
> @@ -2632,6 +2645,199 @@ class LVMLogicalVolumeDevice(DMDevice):
> problem = _("small")
> return problem
>
> +class LVMThinPoolDevice(LVMLogicalVolumeDevice):
> + """ An LVM Thin Pool """
> + _type = "lvmthinpool"
> + _resizable = False
> +
> + def __init__(self, name, vgdev, size=None, uuid=None,
> + format=None, exists=False, sysfsPath='',
> + grow=None, maxsize=None, percent=None,
> + metadatasize=None, chunksize=None, segType=None, profile=None):
> + """
> + :param name: the device name (generally a device node's basename)
> + :type name: str
> + :param vgdev: the vg that will contain this pool
> + :type vgdev: :class:`~.LVMVolumeGroupDevice`
> + :keyword exists: does this device exist?
> + :type exists: bool
> + :keyword size: the device's size
> + :type size: :class:`~.size.Size`
> + :keyword format: this device's formatting
> + :type format: :class:`~.formats.DeviceFormat` or a subclass of it
> + :keyword sysfsPath: sysfs device path
> + :type sysfsPath: str
> + :keyword uuid: the device UUID
> + :type uuid: str
> + :keyword segType: segment type
> + :type segType: str
> +
> + For non-existent pools only:
> +
> + :keyword grow: whether to grow this LV
> + :type grow: bool
> + :keyword maxsize: maximum size for growable LV
> + :type maxsize: :class:`~.size.Size`
> + :keyword percent: percent of VG space to take
> + :type percent: int
> + :keyword metadatasize: the size of the metadata LV
> + :type metadatasize: :class:`~.size.Size`
> + :keyword chunksize: chunk size for the pool
> + :type chunksize: :class:`~.size.Size`
> + :keyword profile: (allocation) profile for the pool or None (unspecified)
> + """
> + if metadatasize is not None and \
> + not lvm.is_valid_thin_pool_metadata_size(metadatasize):
> + raise ValueError("invalid metadatasize value")
> +
> + if chunksize is not None and \
> + not lvm.is_valid_thin_pool_chunk_size(chunksize):
> + raise ValueError("invalid chunksize value")
> +
> + super(LVMThinPoolDevice, self).__init__(name, vgdev,
> + size=size, uuid=uuid,
> + format=format, exists=exists,
> + sysfsPath=sysfsPath, grow=grow,
> + maxsize=maxsize,
> + percent=percent,
> + segType=segType)
> +
> + self.metaDataSize = metadatasize or 0
> + self.chunkSize = chunksize or 0
> + self.profile = profile
> + self._lvs = []
> +
> + def _addLogVol(self, lv):
> + """ Add an LV to this pool. """
> + if lv in self._lvs:
> + raise ValueError("lv is already part of this vg")
> +
> + # TODO: add some checking to prevent overcommit for preexisting
> + self.vg._addLogVol(lv)
> + log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
> + self._lvs.append(lv)
> +
> + def _removeLogVol(self, lv):
> + """ Remove an LV from this pool. """
> + if lv not in self._lvs:
> + raise ValueError("specified lv is not part of this vg")
> +
> + self._lvs.remove(lv)
> + self.vg._removeLogVol(lv)
> +
> + @property
> + def lvs(self):
> + """ A list of this pool's LVs """
> + return self._lvs[:] # we don't want folks changing our list
> +
> + @property
> + def vgSpaceUsed(self):
> + space = super(LVMThinPoolDevice, self).vgSpaceUsed
> + space += lvm.get_pool_padding(space, pesize=self.vg.peSize)
> + return space
> +
> + @property
> + def usedSpace(self):
> + return sum(l.poolSpaceUsed for l in self.lvs)
> +
> + @property
> + def freeSpace(self):
> + return self.size - self.usedSpace
> +
> + def _create(self, progress=None):
> + """ Create the device. """
> + log_method_call(self, self.name, status=self.status)
> + lvm.thinpoolcreate(self.vg.name, self.lvname, self.size,
> + metadatasize=self.metaDataSize,
> + chunksize=self.chunkSize,
> + profile=self.profile.name if self.profile else "",
> + progress=progress)
> +
> + def dracutSetupArgs(self):
> + return set()
> +
> + @property
> + def direct(self):
> + """ Is this device directly accessible? """
> + return False
Is this property needed on the rhel6-branch?
> +
> + def populateKSData(self, data):
> + super(LVMThinPoolDevice, self).populateKSData(data)
> + data.mountpoint = "none"
> + data.thin_pool = True
> + data.metadata_size = self.metaDataSize
> + data.chunk_size = self.chunkSize
> + if self.profile:
> + data.profile = self.profile.name
> +
> +class LVMThinLogicalVolumeDevice(LVMLogicalVolumeDevice):
> + """ An LVM Thin Logical Volume """
> + _type = "lvmthinlv"
> +
> + @property
> + def pool(self):
> + return self.parents[0]
> +
> + @property
> + def vg(self):
> + return self.pool.vg
> +
> + @property
> + def poolSpaceUsed(self):
> + """ The total space used within the thin pool by this volume.
> +
> + This should probably align to the greater of vg extent size and
> + pool chunk size. If it ends up causing overcommit in the amount of
> + less than one chunk per thin lv, so be it.
> + """
> + return self.vg.align(self.size, roundup=True)
The docstring says something else than the code does. Actually, I don't
think extent size really matters here. It should only be the chunk size
that matters. (but let me check it with the LVM team)
> +
> + @property
> + def vgSpaceUsed(self):
> + return 0 # the pool's size is already accounted for in the vg
> +
> + def _setSize(self, size):
> + log.debug("setting lv %s size to %dMB" % (self.name, size))
> + size = self.vg.align(size)
> + size = self.vg.align(numeric_type(size))
> + self._size = size
> + self.targetSize = size
> +
> + size = property(StorageDevice._getSize, _setSize)
> +
> + def _preCreate(self):
> + # skip LVMLogicalVolumeDevice's _preCreate() method as it checks for a
> + # free space in a VG which doesn't make sense for a ThinLV and causes a
> + # bug by limitting the ThinLV's size to VG free space which is nonsense
> + super(LVMLogicalVolumeDevice, self)._preCreate() # pylint: disable=bad-super-call
> +
> + def _create(self, progress=None):
> + """ Create the device. """
> + log_method_call(self, self.name, status=self.status)
> + lvm.thinlvcreate(self.vg.name, self.pool.lvname, self.lvname,
> + self.size, progress=progress)
> +
> + def removeHook(self, modparent=True):
> + if modparent:
> + self.pool._removeLogVol(self)
> +
> + # pylint: disable=bad-super-call
> + super(LVMLogicalVolumeDevice, self).removeHook(modparent=modparent)
> +
> + def addHook(self, new=True):
> + # pylint: disable=bad-super-call
> + super(LVMLogicalVolumeDevice, self).addHook(new=new)
> + if new:
> + return
> +
> + if self not in self.pool.lvs:
> + self.pool._addLogVol(self)
These two *Hook methods do not seem to be used on the rhel6-branch.
> +
> + def populateKSData(self, data):
> + super(LVMThinLogicalVolumeDevice, self).populateKSData(data)
> + data.thin_volume = True
> + data.pool_name = self.pool.lvname
Same here.
> +
> class MDRaidArrayDevice(StorageDevice):
> """ An mdraid (Linux RAID) device. """
> _type = "mdarray"
> diff --git a/storage/devicetree.py b/storage/devicetree.py
> index 83673cc..6f2ed50 100644
> --- a/storage/devicetree.py
> +++ b/storage/devicetree.py
> @@ -1547,101 +1547,134 @@ class DeviceTree(object):
> log.debug("no LVs listed for VG %s" % vg_name)
> return False
>
> - def lv_attr_cmp(a, b):
> - """ Sort so that mirror images come first and snapshots last. """
> - mirror_chars = "Iil"
> - snapshot_chars = "Ss"
> - if a[0] in mirror_chars and b[0] not in mirror_chars:
> - return -1
> - elif a[0] not in mirror_chars and b[0] in mirror_chars:
> - return 1
> - elif a[0] not in snapshot_chars and b[0] in snapshot_chars:
> - return -1
> - elif a[0] in snapshot_chars and b[0] not in snapshot_chars:
> - return 1
> - else:
> - return 0
> -
> - # make a list of indices with mirror volumes up front and snapshots at
> - # the end
> - indices = range(len(lv_names))
> - indices.sort(key=lambda i: lv_attr[i], cmp=lv_attr_cmp)
> - raid = {}
> - for index in indices:
> - lv_name = lv_names[index]
> - if not lv_name:
> - continue
> -
> + def addRequiredLV(name, msg):
> + """ Add a prerequisite/parent LV.
> +
> + The parent is strictly required in order to be able to add
> + some other LV that depends on it. For this reason, failure to
> + add the specified LV results in a DeviceTreeError with the
> + message string specified in the msg parameter.
> +
> + :param str name: the full name of the LV (including vgname)
> + :param str msg: message to pass DeviceTreeError ctor on error
> + :returns: None
> + :raises: :class:`~.errors.DeviceTreeError` on failure
> +
> + """
> + vol = self.getDeviceByName(name)
> + if vol is None:
> + addLV(lv_info[name])
> + vol = self.getDeviceByName(name)
> +
> + if vol is None:
> + log.error("%s: %s", msg, name)
> + raise DeviceTreeError(msg)
> +
> + def addLV(lv):
> + """ Instantiate and add an LV based on data from the VG. """
> + lv_name = udev.device_get_lv_name(lv)
> + lv_uuid = udev.device_get_lv_uuid(lv)
> + lv_attr = udev.device_get_lv_attr(lv)
> + lv_size = udev.device_get_lv_size(lv)
> + lv_type = udev.device_get_lv_type(lv)
> +
> + lv_class = LVMLogicalVolumeDevice
> + lv_parents = [vg_device]
> + lv_kwargs = {}
> name = "%s-%s" % (vg_name, lv_name)
> - if lv_attr[index][0] in 'Ss':
> - log.debug("found lvm snapshot volume '%s'" % name)
> - origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
> - if not origin_name:
> - log.error("lvm snapshot '%s-%s' has unknown origin"
> - % (vg_name, lv_name))
> - continue
>
> - origin = self.getDeviceByName("%s-%s" % (vg_name,
> - origin_name))
> - if not origin:
> - if origin_name.endswith("_vorigin]"):
> - log.info("snapshot volume '%s' has vorigin" % name)
> - vg_device.voriginSnapshots[lv_name] = lv_sizes[index]
> - else:
> - log.warning("snapshot lv '%s' origin lv '%s-%s' "
> - "not found" % (name,
> - vg_name, origin_name))
> - continue
> + if self.getDeviceByName(name):
> + # some lvs may have been added on demand below
> + log.debug("already added %s", name)
> + return
>
> - if lv_name in origin.snapshots:
> - continue
> + if lv_attr[0] in 'Ss':
> + log.info("found lvm snapshot volume '%s'", name)
> + origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
> + if not origin_name:
> + log.error("lvm snapshot '%s-%s' has unknown origin",
> + vg_name, lv_name)
> + return
>
> - log.debug("adding %dMB to %s snapshot total"
> - % (lv_sizes[index], origin.name))
> - origin.snapshotSpace += lv_sizes[index]
> - origin.snapshots.append(lv_name)
> - continue
> - elif lv_attr[index][0] == 'v':
> + if origin_name.endswith("_vorigin]"):
> + lv_kwargs["vorigin"] = True
> + origin = None
> + else:
> + origin_device_name = "%s-%s" % (vg_name, origin_name)
> + addRequiredLV(origin_device_name,
> + "failed to locate origin lv")
> + origin = self.getDeviceByName(origin_device_name)
> +
> + lv_kwargs["origin"] = origin
> + lv_class = LVMSnapShotDevice
This class doesn't exist on the rhel6-branch.
> + elif lv_attr[0] == 'v':
> # skip vorigins
> - continue
> - elif lv_attr[index][0] in 'Ii':
> + return
> + elif lv_attr[0] in 'Ii':
> # mirror image
> - lv_name = re.sub(r'_[rm]image.+', '', lv_name[1:-1])
> - name = "%s-%s" % (vg_name, lv_name)
> - if name not in raid:
> - raid[name] = {"copies": 0, "log": 0, "meta": 0}
> -
> + rname = re.sub(r'_[rm]image.+', '', lv_name[1:-1])
> + name = "%s-%s" % (vg_name, rname)
> + addRequiredLV(name, "failed to look up raid lv")
> raid[name]["copies"] += 1
> - continue
> - elif lv_attr[index][0] == 'e':
> + return
> + elif lv_attr[0] == 'e':
> + if lv_name.endswith("_pmspare]"):
> + # spare metadata area for any thin pool that needs repair
> + return
> +
> # raid metadata volume
> - lv_name = re.sub(r'_rmeta.+', '', lv_name[1:-1])
> + lv_name = re.sub(r'_[tr]meta.*', '', lv_name[1:-1])
> name = "%s-%s" % (vg_name, lv_name)
> - raid[name]["meta"] += lv_sizes[index]
> - continue
> - elif lv_attr[index][0] == 'l':
> + addRequiredLV(name, "failed to look up raid lv")
> + raid[name]["meta"] += lv_size
> + return
> + elif lv_attr[0] == 'l':
> # log volume
> - lv_name = re.sub(r'_mlog.*', '', lv_name[1:-1])
> - name = "%s-%s" % (vg_name, lv_name)
> - if name not in raid:
> - raid[name] = {"copies": 0, "log": 0, "meta": 0}
> + rname = re.sub(r'_mlog.*', '', lv_name[1:-1])
> + name = "%s-%s" % (vg_name, rname)
> + addRequiredLV(name, "failed to look up log lv")
> + raid[name]["log"] = lv_size
> + return
> + elif lv_attr[0] == 't':
> + # thin pool
> + lv_class = LVMThinPoolDevice
> + elif lv_attr[0] == 'V':
> + # thin volume
> + pool_name = devicelibs.lvm.thinlvpoolname(vg_name, lv_name)
> + pool_device_name = "%s-%s" % (vg_name, pool_name)
> + addRequiredLV(pool_device_name, "failed to look up thin pool")
>
> - raid[name]["log"] = lv_sizes[index]
> - continue
> + origin_name = devicelibs.lvm.lvorigin(vg_name, lv_name)
> + if origin_name:
> + origin_device_name = "%s-%s" % (vg_name, origin_name)
> + addRequiredLV(origin_device_name, "failed to locate origin lv")
> + origin = self.getDeviceByName(origin_device_name)
> + lv_kwargs["origin"] = origin
> + lv_class = LVMThinSnapShotDevice
No LVMThinSnapShotDevice class on the rhel6-branch.
> + else:
> + lv_class = LVMThinLogicalVolumeDevice
>
> - lv_device = self.getDeviceByName(name)
> - if lv_device is None:
> - lv_uuid = lv_uuids[index]
> - lv_size = lv_sizes[index]
> - lv_type = lv_types[index]
> - lv_device = LVMLogicalVolumeDevice(lv_name,
> - vg_device,
> - uuid=lv_uuid,
> - size=lv_size,
> - segType=lv_type,
> - exists=True)
> - self._addDevice(lv_device)
> + lv_parents = [self.getDeviceByName(pool_device_name)]
> + elif lv_name.endswith(']'):
> + # Internal LVM2 device
> + return
> + elif lv_attr[0] not in '-mMrRoO':
> + # Ignore anything else except for the following:
> + # - normal lv
> + # m mirrored
> + # M mirrored without initial sync
> + # r raid
> + # R raid without initial sync
> + # o origin
> + # O origin with merging snapshot
> + return
>
> + lv_dev = self.getDeviceByUuid(lv_uuid)
> + if lv_dev is None:
> + lv_device = lv_class(lv_name, parents=lv_parents,
> + uuid=lv_uuid, size=lv_size,segType=lv_type,
> + exists=True, **lv_kwargs)
> + self._addDevice(lv_device)
> try:
> lv_device.setup()
> ret = True
> @@ -1649,15 +1682,15 @@ class DeviceTree(object):
> log.info("setup of %s failed: %s"
> % (lv_device.name, msg))
>
> - for name, data in raid.items():
> - lv_dev = self.getDeviceByName(name)
> - lv_dev.copies = data["copies"]
> - lv_dev.metaDataSize = data["meta"]
> - lv_dev.logSize = data["log"]
> - log.debug("set %s copies to %d, metadata size to %dMB, log size "
> - "to %dMB, total size %dMB"
> - % (lv_dev.name, lv_dev.copies, lv_dev.metaDataSize,
> - lv_dev.logSize, lv_dev.vgSpaceUsed))
> + if lv_device.status:
> + lv_device.updateSysfsPath()
> + lv_info = udev_get_block_device(lv_device.sysfsPath)
> + if not lv_info:
> + log.error("failed to get udev data for lv %s", lv_device.name)
> + return
> +
> + # do format handling now
> + self.addUdevDevice(lv_info)
>
> return ret
>
> diff --git a/storage/partitioning.py b/storage/partitioning.py
> index 1e7f583..184295c 100644
> --- a/storage/partitioning.py
> +++ b/storage/partitioning.py
> @@ -34,6 +34,7 @@ from errors import *
> from deviceaction import *
> from devices import PartitionDevice, LUKSDevice, devicePathToName
> from formats import getFormat
> +from devicelibs.lvm import get_pool_padding
>
> import gettext
> _ = lambda x: gettext.ldgettext("anaconda", x)
> @@ -1756,88 +1757,134 @@ def growLVM(storage):
> # figure out how much to grow each LV
> grow_amounts = {}
> lv_total = vg.size - total_free
> - log.debug("used: %dMB ; vg.size: %dMB" % (lv_total, vg.size))
> -
> - # This first loop is to calculate percentage-based growth
> - # amounts. These are based on total free space.
> - lvs = vg.lvs
> - lvs.sort(cmp=lvCompare)
> - for lv in lvs:
> - if not lv.req_grow or not lv.req_percent:
> - continue
> + log.debug("used: %dMB ; vg.size: %dMB" % (vg.size - total_free, vg.size))
>
> - portion = (lv.req_percent * 0.01)
> - grow = portion * vg.freeSpace
> - new_size = lv.req_size + grow
> - if lv.req_max_size and new_size > lv.req_max_size:
> - grow -= (new_size - lv.req_max_size)
> -
> - if lv.format.maxSize and lv.format.maxSize < new_size:
> - grow -= (new_size - lv.format.maxSize)
> -
> - # clamp growth amount to a multiple of vg extent size
> - grow_amounts[lv.name] = vg.align(grow)
> - total_free -= grow
> - lv_total += grow
> -
> - # This second loop is to calculate non-percentage-based growth
> - # amounts. These are based on free space remaining after
> - # calculating percentage-based growth amounts.
> -
> - # keep a tab on space not allocated due to format or requested
> - # maximums -- we'll dole it out to subsequent requests
> - leftover = 0
> - for lv in lvs:
> - log.debug("checking lv %s: req_grow: %s ; req_percent: %s"
> - % (lv.name, lv.req_grow, lv.req_percent))
> - if not lv.req_grow or lv.req_percent:
> - continue
> + ##
> + ## First, grow non-thin LVs. Percentage-based growth comes first.
> + ##
> +
> + # don't include thin lvs in the vg's growth calculation
> + fatlvs = [lv for lv in vg.lvs if lv not in vg.thinlvs]
> + for lv in fatlvs:
> + if lv in vg.thinpools:
> + # make sure the pool's base size is at least the sum of its lvs'
> + req_size = max(lv.req_size, lv.usedSpace)
> +
> + # add the required padding to the requested pool size
> + req_size += get_pool_padding(lv.req_size, pesize=vg.peSize)
> +
> + total_free -= req_size - lv.req_size
> + lv.req_size = req_size
> +
> + def growPercentageLVs(vg, lvs, free, growth):
> + """ Grow percentage-based LVs within a VG or thin pool.
>
> - portion = float(lv.req_size) / float(lv_total)
> - grow = portion * total_free
> - log.debug("grow is %dMB" % grow)
> -
> - todo = lvs[lvs.index(lv):]
> - unallocated = reduce(lambda x,y: x+y,
> - [l.req_size for l in todo
> - if l.req_grow and not l.req_percent])
> - extra_portion = float(lv.req_size) / float(unallocated)
> - extra = extra_portion * leftover
> - log.debug("%s getting %dMB (%d%%) of %dMB leftover space"
> - % (lv.name, extra, extra_portion * 100, leftover))
> - leftover -= extra
> - grow += extra
> - log.debug("grow is now %dMB" % grow)
> - max_size = lv.req_size + grow
> - if lv.req_max_size and max_size > lv.req_max_size:
> - max_size = lv.req_max_size
> -
> - if lv.format.maxSize and max_size > lv.format.maxSize:
> - max_size = lv.format.maxSize
> -
> - log.debug("max size is %dMB" % max_size)
> - max_size = max_size
> - leftover += (lv.req_size + grow) - max_size
> - grow = max_size - lv.req_size
> - log.debug("lv %s gets %dMB" % (lv.name, vg.align(grow)))
> - grow_amounts[lv.name] = vg.align(grow)
> + :param vg: the VG, used only for aligning growth amounts
> + :param lvs: the set of lvs to consider
> + :param free: the total free space available for growth in MiB
> + :param dict growth: growth amounts for lvs (in+out)
> + :returns: remaining free space in MiB
> +
> + Percentages for thin volumes are relative to the free space in
> + the pool -- not the whole vg.
> + """
> + for lv in lvs:
> + if not lv.req_grow or not lv.req_percent:
> + continue
> +
> + portion = (lv.req_percent * 0.01)
> + grow = portion * free
> + new_size = lv.req_size + grow
> + if lv.req_max_size and new_size > lv.req_max_size:
> + grow -= (new_size - lv.req_max_size)
> +
> + if lv.format.maxSize and lv.format.maxSize < new_size:
> + grow -= (new_size - lv.format.maxSize)
> +
> + # clamp growth amount to a multiple of vg extent size
> + growth[lv.name] = vg.align(grow)
> + free -= grow
> +
> + return free
> +
> + fatlvs.sort(cmp=lvCompare)
> + total_free = growPercentageLVs(vg, fatlvs, total_free, grow_amounts)
> +
> + def growLVs(vg, lvs, free, growth):
> + """ Grow LVs within a VG or thin pool.
> +
> + :param vg: the VG, used only for aligning growth amounts
> + :param lvs: the set of lvs to consider
> + :param free: the total free space available for growth in MiB
> + :param dict growth: growth amounts for lvs (in+out)
> + :returns: leftover space in MiB (from requests with max size)
> + """
> + # keep a tab on space not allocated due to format or requested
> + # maximums -- we'll dole it out to subsequent requests
> + leftover = 0
> + growth_base = float(sum(lv.req_size for lv in lvs if lv.req_grow))
> + for lv in lvs:
> + log.debug("checking lv %s: req_grow: %s ; req_percent: %s"
> + % (lv.name, lv.req_grow, lv.req_percent))
> + if not lv.req_grow or lv.req_percent:
> + continue
> +
> + portion = float(lv.req_size) / growth_base
> + grow = portion * free
> + log.debug("grow is %dMB" % grow)
> +
> + todo = lvs[lvs.index(lv):]
> + unallocated = reduce(lambda x,y: x+y,
> + [l.req_size for l in todo
> + if l.req_grow and not l.req_percent])
> + extra_portion = float(lv.req_size) / float(unallocated)
> + extra = extra_portion * leftover
> + log.debug("%s getting %dMB (%d%%) of %dMB leftover space"
> + % (lv.name, extra, extra_portion * 100, leftover))
> + leftover -= extra
> + grow += extra
> + log.debug("grow is now %dMB" % grow)
> + max_size = lv.req_size + grow
> + if lv.req_max_size and max_size > lv.req_max_size:
> + max_size = lv.req_max_size
> +
> + if lv.format.maxSize and max_size > lv.format.maxSize:
> + max_size = lv.format.maxSize
> +
> + log.debug("max size is %dMB" % max_size)
> + max_size = max_size
> + leftover += (lv.req_size + grow) - max_size
> + grow = max_size - lv.req_size
> + log.debug("lv %s gets %dMB" % (lv.name, vg.align(grow)))
> + growth[lv.name] = vg.align(grow)
> +
> + return leftover
> +
> + leftover = growLVs(vg, fatlvs, total_free, grow_amounts)
>
> if not grow_amounts:
> log.debug("no growable lvs in vg %s" % vg.name)
> continue
>
> # now grow the lvs by the amounts we've calculated above
> - for lv in lvs:
> + for lv in fatlvs:
> if lv.name not in grow_amounts.keys():
> continue
> - lv.size += grow_amounts[lv.name]
> +
> + size = lv.req_size + grow_amounts[lv.name]
> +
> + # reduce the size of thin pools by the pad size
> + if hasattr(lv, "lvs"):
> + size -= get_pool_padding(size, pesize=vg.peSize, reverse=True)
> +
> + lv.size = size
>
> # now there shouldn't be any free space left, but if there is we
> # should allocate it to one of the LVs
> vg_free = vg.freeSpace
> log.debug("vg %s has %dMB free" % (vg.name, vg_free))
> if vg_free:
> - for lv in lvs:
> + for lv in fatlvs:
> if not lv.req_grow:
> continue
>
> @@ -1858,7 +1905,28 @@ def growLVM(storage):
> if lv.format.maxSize and projected > lv.format.maxSize:
> projected = lv.format.maxSize
>
> + # reduce the size of thin pools by the pad size
> + if hasattr(lv, "lvs"):
> + projected -= get_pool_padding(projected, pesize=vg.peSize, reverse=True)
> +
> log.debug("giving leftover %dMB to %s" % (projected - lv.size,
> lv.name))
> lv.size = projected
>
> + ##
> + ## Grow thin lvs within their respective pools, percentage-based first.
> + ##
> + for pool in vg.thinpools:
> + log.debug("%s size=%d free=%d lvs=%s)", pool.lvname, pool.size, pool.freeSpace, [lv.lvname for lv in pool.lvs])
> + lvs = sorted(pool.lvs, cmp=lvCompare)
> +
> + total_free = growPercentageLVs(vg, lvs, pool.freeSpace, grow_amounts)
> +
> + growLVs(vg, lvs, pool.freeSpace, grow_amounts)
> +
> + # now grow the thin lvs by the amounts we just calculated
> + for lv in pool.lvs:
> + if lv.name not in grow_amounts.keys():
> + continue
> + lv.size += grow_amounts[lv.name]
> +
I must say I didn't study the new growing code too much, but from what
I remember, there was some change in the behaviour of the percentage
specification and behaviour on the rhel7-branch. Do we want to take that
change to the rhel6-branch too? (which is what I think the ported code
does)
--
Vratislav Podzimek
Anaconda Rider | Red Hat, Inc. | Brno - Czech Republic
More information about the anaconda-patches
mailing list