[2.0-devel 1/1] Set both req_size and size of thin pool when growing
LVM
by dashea
From: Vratislav Podzimek <vpodzime(a)redhat.com>
This fixes an issue with growing thin pool requests where the thin pools don't
have their size set but their req_size is set to the sum of their thin LVs.
---
blivet/partitioning.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index 37a19e1..d2aff6b 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -1976,6 +1976,7 @@ def grow_lvm(storage):
if lv in vg.thinpools:
# make sure the pool's base size is at least the sum of its lvs'
lv.req_size = max(lv.req_size, lv.used_space)
+ lv.size = lv.req_size
# add the required padding to the requested pool size
lv.req_size += Size(blockdev.lvm.get_thpool_padding(lv.req_size, vg.pe_size))
--
To view this commit on github, visit https://github.com/rhinstaller/blivet/commit/8380e8ceda95f67d03ae228a2ab6...
8 years, 1 month
Re: [rhinstaller/blivet/pulls/351 2.1-devel] Installer isolation:
Part One
by dashea
> @@ -1094,6 +1099,653 @@ def get(self, key, default=None):
> return self.mappings.get(key, default)
>
>
> +class InstallerStorage(Blivet):
> + """ Top-level class for managing installer-related storage configuration. """
> + def __init__(self, ksdata=None):
> + """
> + :keyword ksdata: kickstart data store
> + :type ksdata: :class:`pykickstart.Handler`
> + """
> + super().__init__(ksdata=ksdata)
> +
> + self.config = StorageDiscoveryConfig()
> + self.autopart_type = AUTOPART_TYPE_LVM
> +
> + self.__luks_devs = {}
> +
> + # these will both be empty until our reset method gets called
> + # instantiate our own devicetree here to override the default created
> + # in Blivet so that protected_dev_specs gets handled
> + self.devicetree = DeviceTree(passphrase=self.encryption_passphrase,
> + luks_dict=self.__luks_devs,
> + ignored_disks=self.ignored_disks,
> + exclusive_disks=self.exclusive_disks,
> + disk_images=self.disk_images,
> + protected_dev_specs=self.config.protected_dev_specs)
> + self.fsset = FSSet(self.devicetree)
> + self._free_space_snapshot = None
> +
> + def do_it(self, callbacks=None):
> + """
> + Commit queued changes to disk.
> +
> + :param callbacks: callbacks to be invoked when actions are executed
> + :type callbacks: return value of the :func:`~.callbacks.create_new_callbacks_
> +
> + """
> + super().do_it(callbacks=callbacks)
> +
> + if not flags.installer_mode:
> + return
> +
> + # now set the boot partition's flag
> + if self.bootloader and not self.bootloader.skip_bootloader:
> + if self.bootloader.stage2_bootable:
> + boot = self.boot_device
> + else:
> + boot = self.bootloader_device
> +
> + if boot.type == "mdarray":
> + boot_devs = boot.parents
> + else:
> + boot_devs = [boot]
> +
> + for dev in boot_devs:
> + if not hasattr(dev, "bootable"):
> + log.info("Skipping %s, not bootable", dev)
> + continue
> +
> + # Dos labels can only have one partition marked as active
> + # and unmarking ie the windows partition is not a good idea
> + skip = False
> + if dev.disk.format.parted_disk.type == "msdos":
> + for p in dev.disk.format.parted_disk.partitions:
> + if p.type == parted.PARTITION_NORMAL and \
> + p.getFlag(parted.PARTITION_BOOT):
> + skip = True
> + break
> +
> + # GPT labeled disks should only have bootable set on the
> + # EFI system partition (parted sets the EFI System GUID on
> + # GPT partitions with the boot flag)
> + if dev.disk.format.label_type == "gpt" and \
> + dev.format.type not in ["efi", "macefi"]:
> + skip = True
> +
> + if skip:
> + log.info("Skipping %s", dev.name)
> + continue
> +
> + # hfs+ partitions on gpt can't be marked bootable via parted
> + if dev.disk.format.parted_disk.type != "gpt" or \
> + dev.format.type not in ["hfs+", "macefi"]:
> + log.info("setting boot flag on %s", dev.name)
> + dev.bootable = True
> +
> + # Set the boot partition's name on disk labels that support it
> + if dev.parted_partition.disk.supportsFeature(parted.DISK_TYPE_PARTITION_NAME):
> + ped_partition = dev.parted_partition.getPedPartition()
> + ped_partition.setName(dev.format.name)
> + log.info("Setting label on %s to '%s'", dev, dev.format.name)
> +
> + dev.disk.setup()
> + dev.disk.format.commit_to_disk()
> +
> + if flags.installer_mode:
> + self.dump_state("final")
> +
> + def reset(self, cleanup_only=False):
> + """ Reset storage configuration to reflect actual system state.
> + This will cancel any queued actions and rescan from scratch but not
> + clobber user-obtained information like passphrases, iscsi config, &c
> +
> + :keyword cleanup_only: prepare the tree only to deactivate devices
> + :type cleanup_only: bool
> +
> + See :meth:`devicetree.Devicetree.populate` for more information
> + about the cleanup_only keyword argument.
> + """
> + log.info("resetting Blivet (version %s) instance %s", __version__, self)
> + if flags.installer_mode:
> + # save passphrases for luks devices so we don't have to reprompt
> + self.encryption_passphrase = None
> + for device in self.devices:
> + if device.format.type == "luks" and device.format.exists:
> + self.save_passphrase(device)
> +
> + if flags.installer_mode and not flags.image_install:
> + iscsi.startup()
> + fcoe.startup()
> + zfcp.startup()
> +
> + self.devicetree.reset(passphrase=self.encryption_passphrase,
> + luks_dict=self.__luks_devs,
> + ignored_disks=self.ignored_disks,
> + exclusive_disks=self.exclusive_disks,
> + disk_images=self.disk_images,
> + protected_dev_specs=self.config.protected_dev_specs)
> + self.devicetree.populate(cleanup_only=cleanup_only)
Weren't you going to replace these with `super().reset(...)`?
--
To view this pull request on github, visit https://github.com/rhinstaller/blivet/pull/351#discussion_r58110766
8 years, 1 month