[PATCH rhel6-branch] Increment MD container child counter even if its volumes are ignored (#1120640)
by Artur Paszkiewicz
Volumes which are not selected in Specialized Storage Devices are
ignored in addUdevDevice. This causes that their parent container
won't have its child counter incremented, which can later cause an
incorrect unusedRaidMembersWarning.
Resolves: rhbz#1120640
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz(a)intel.com>
---
storage/devicetree.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/storage/devicetree.py b/storage/devicetree.py
index 4045cdf..b9ea399 100644
--- a/storage/devicetree.py
+++ b/storage/devicetree.py
@@ -1328,6 +1328,13 @@ class DeviceTree(object):
# slice off the "/dev/" part, lvm filter cares only about the rest
partitions_paths = [p[5:] for p in partitions_paths]
map(lvm.lvm_cc_addFilterRejectRegexp, partitions_paths)
+
+ if udev_device_get_md_container(info):
+ parentName = devicePathToName(udev_device_get_md_container(info))
+ container = self.getDeviceByName(parentName)
+ if container:
+ container.addChild()
+
return
log.debug("scanning %s (%s)..." % (name, sysfs_path))
--
1.8.4.5
8 years, 3 months
[rhel7-branch] Check that cache PVs (if any) are in the VG the LV belongs to (#1263258)
by Vratislav Podzimek
Looks like this will be a common mistake in kickstart and we should catch it
early instead of ending up with a traceback when trying to create the cached LV.
Signed-off-by: Vratislav Podzimek <vpodzime(a)redhat.com>
---
pyanaconda/kickstart.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/pyanaconda/kickstart.py b/pyanaconda/kickstart.py
index e3f956a..8f31b26 100644
--- a/pyanaconda/kickstart.py
+++ b/pyanaconda/kickstart.py
@@ -860,6 +860,13 @@ class LogVolData(commands.logvol.RHEL7_LogVolData):
raise KickstartValueError(formatErrorMsg(self.lineno,
msg=_("No volume group exists with the name \"%s\". Specify volume groups before logical volumes.") % self.vgname))
+ # If cache PVs specified, check that they belong to the same VG this LV is a member of
+ if self.cache_pvs:
+ pv_devices = (lookupAlias(devicetree, pv) for pv in self.cache_pvs)
+ if not all(pv in vg.pvs for pv in pv_devices):
+ raise KickstartValueError(formatErrorMsg(self.lineno,
+ msg=_("Cache PVs must belong to the same VG as the cached LV")))
+
pool = None
if self.thin_volume:
pool = devicetree.getDeviceByName("%s-%s" % (vg.name, self.pool_name))
--
2.1.0
8 years, 6 months