[blivet:master] Factor out MDRaidArrayDevice w/ type in ("mdcontainer", "mdbiosraidarray")

mulhern amulhern at redhat.com
Tue Oct 14 20:54:27 UTC 2014


This patch factors out functionality associated w/ type
"mdcontainer" or "mdbiosraidarray" into separate subclasses.

It is also able to revert commit c197d2d39104cdef234daecf5c44098bc345b506
entirely, since the refactoring into MDBiosRaidArrayDevice w/
formatClassName None, handles all that functionality.

I tried to make the transformation as semantics preserving as possible.
It is kind of nice that the only changes needed to be made to
devices tests were to change the names of constructors.

Signed-off-by: mulhern <amulhern at redhat.com>
---
 blivet/devicelibs/mdraid.py                      |   2 +-
 blivet/devices/__init__.py                       |   2 +-
 blivet/devices/container.py                      |  18 +--
 blivet/devices/md.py                             | 189 +++++++++++++++--------
 tests/devicelibs_test/mdraid_interrogate_test.py |   4 +-
 tests/devicelibs_test/mdraid_test.py             |   1 -
 tests/devices_test.py                            |  16 +-
 7 files changed, 141 insertions(+), 91 deletions(-)

diff --git a/blivet/devicelibs/mdraid.py b/blivet/devicelibs/mdraid.py
index 3dc93da..182e5cd 100644
--- a/blivet/devicelibs/mdraid.py
+++ b/blivet/devicelibs/mdraid.py
@@ -44,7 +44,7 @@ class MDRaidLevels(raid.RAIDLevels):
            hasattr(level, 'get_recommended_stride') and \
            hasattr(level, 'get_size')
 
-RAID_levels = MDRaidLevels(["raid0", "raid1", "raid4", "raid5", "raid6", "raid10", "container", "linear"])
+RAID_levels = MDRaidLevels(["raid0", "raid1", "raid4", "raid5", "raid6", "raid10", "linear"])
 
 def get_raid_superblock_size(size, version=None):
     """ mdadm has different amounts of space reserved for its use depending
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 0b2eef2..cf1ec34 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -27,7 +27,7 @@ from .partition import PartitionDevice
 from .dm import DMDevice, DMLinearDevice, DMCryptDevice
 from .luks import LUKSDevice
 from .lvm import LVMVolumeGroupDevice, LVMLogicalVolumeDevice, LVMSnapShotDevice, LVMThinPoolDevice, LVMThinLogicalVolumeDevice, LVMThinSnapShotDevice
-from .md import MDRaidArrayDevice
+from .md import MDBiosRaidArrayDevice, MDContainerDevice, MDRaidArrayDevice
 from .btrfs import BTRFSDevice, BTRFSVolumeDevice, BTRFSSubVolumeDevice, BTRFSSnapShotDevice
 from .file import FileDevice, DirectoryDevice, SparseFileDevice
 from .loop import LoopDevice
diff --git a/blivet/devices/container.py b/blivet/devices/container.py
index 6227de0..173ffa5 100644
--- a/blivet/devices/container.py
+++ b/blivet/devices/container.py
@@ -62,18 +62,6 @@ class ContainerDevice(StorageDevice):
 
         super(ContainerDevice, self).__init__(*args, **kwargs)
 
-    def _verifyMemberFormat(self, member):
-        """ Whether the member has the format expected by the device.
-
-            :param member: the member device to add
-            :type member: :class:`.StorageDevice`
-            :returns: error msg if the member has incorrect format, else None
-            :rtype: str or NoneType
-        """
-        if not isinstance(member.format, self.formatClass):
-            return "Member format %(format)s is not a subtype of expected format %(expected)s." % {'format' : member.format, 'expected' : self.formatClass}
-        return None
-
     def _addParent(self, member):
         """ Add a member device to the container.
 
@@ -84,10 +72,8 @@ class ContainerDevice(StorageDevice):
             contents at all.
         """
         log_method_call(self, self.name, member=member.name)
-
-        error = self._verifyMemberFormat(member)
-        if error:
-            raise ValueError(error)
+        if not isinstance(member.format, self.formatClass):
+            raise ValueError("member has wrong format")
 
         if member.format.exists and self.uuid and self._formatUUIDAttr and \
            getattr(member.format, self._formatUUIDAttr) != self.uuid:
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index 9f4f50f..48f1e1c 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -86,9 +86,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
                                                 parents=parents,
                                                 sysfsPath=sysfsPath)
 
-        if level == "container":
-            self._type = "mdcontainer"
-
         # avoid attribute-defined-outside-init pylint warning
         self._level = None
 
@@ -112,24 +109,12 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         else:
             self.metadataVersion = metadataVersion
 
-        # For container members probe size now, as we cannot determine it
-        # when teared down.
-        if self.parents and self.parents[0].type == "mdcontainer":
-            self._size = self.currentSize
-            self._type = "mdbiosraidarray"
-
         if self.exists and self.uuid and not flags.testing:
             # this is a hack to work around mdadm's insistence on giving
             # really high minors to arrays it has no config entry for
             open("/etc/mdadm.conf", "a").write("ARRAY %s UUID=%s\n"
                                                 % (self.path, self.uuid))
 
-    def _verifyMemberFormat(self, member):
-        if member.type == "mdcontainer":
-            return None
-
-        return super(MDRaidArrayDevice, self)._verifyMemberFormat(member)
-
     @property
     def level(self):
         """ Return the raid level
@@ -139,6 +124,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         """
         return self._level
 
+    @property
+    def _levels(self):
+        """ Allowed RAID level for this type of device."""
+        return mdraid.RAID_levels
+
     @level.setter
     def level(self, value):
         """ Set the RAID level and enforce restrictions based on it.
@@ -150,7 +140,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
             :returns:     None
         """
         try:
-            level = self._getLevel(value, mdraid.RAID_levels)
+            level = self._getLevel(value, self._levels)
         except ValueError as e:
             raise errors.DeviceError(e)
 
@@ -190,11 +180,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         """Returns the actual or estimated size depending on whether or
            not the array exists.
         """
-        # For container members return probed size, as we cannot determine it
-        # when teared down.
-        if self.type == "mdbiosraidarray":
-            return self._size
-
         if not self.exists or not self.partedDevice:
             try:
                 size = self.level.get_size([d.size for d in self.devices],
@@ -213,14 +198,8 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def description(self):
-        if self.type == "mdcontainer":
-            return "BIOS RAID container"
-        else:
-            levelstr = self.level.nick if self.level.nick else self.level.name
-            if self.type == "mdbiosraidarray":
-                return "BIOS RAID set (%s)" % levelstr
-            else:
-                return "MDRAID set (%s)" % levelstr
+        levelstr = self.level.nick if self.level.nick else self.level.name
+        return "MDRAID set (%s)" % levelstr
 
     def __repr__(self):
         s = StorageDevice.__repr__(self)
@@ -249,11 +228,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         if self.memberDevices is None or not self.uuid:
             raise errors.DeviceError("array is not fully defined", self.name)
 
-        # containers and the sets within must only have a UUID= parameter
-        if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
-            fmt = "ARRAY %s UUID=%s\n"
-            return fmt % (self.path, self.uuid)
-
         fmt = "ARRAY %s level=%s num-devices=%d UUID=%s\n"
         return fmt % (self.path, self.level, self.memberDevices, self.uuid)
 
@@ -328,6 +302,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         self.memberDevices -= 1
 
     @property
+    def _trueStatusStrings(self):
+        """ Strings in state file for which status() should return True."""
+        return ("clean", "active", "active-idle", "readonly", "read-auto")
+
+    @property
     def status(self):
         """ This device's status.
 
@@ -356,10 +335,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         state_file = "%s/md/array_state" % self.sysfsPath
         try:
             state = open(state_file).read().strip()
-            if state in ("clean", "active", "active-idle", "readonly", "read-auto"):
-                status = True
-            # mdcontainers have state inactive when started (clear if stopped)
-            if self.type == "mdcontainer" and state == "inactive":
+            if state in self._trueStatusStrings:
                 status = True
         except IOError:
             status = False
@@ -395,17 +371,9 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
     def members(self):
         """ Returns this array's members.
 
-            If the array is a BIOS RAID array then its unique parent
-            is a container and its actual member devices are the
-            container's parents.
-
             :rtype: list of :class:`StorageDevice`
         """
-        if self.type == "mdbiosraidarray":
-            members = self.parents[0].parents
-        else:
-            members = self.parents
-        return list(members)
+        return list(self.parents)
 
     @property
     def complete(self):
@@ -451,12 +419,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         # see comment just above mddeactivate call
         self._preTeardown(recursive=recursive)
 
-        # Since BIOS RAID sets (containers in mdraid terminology) never change
-        # there is no need to stop them and later restart them. Not stopping
-        # (and thus also not starting) them also works around bug 523334
-        if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
-            return
-
         # We don't really care what the array's state is. If the device
         # file exists, we want to deactivate it. mdraid has too many
         # states.
@@ -545,14 +507,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def mediaPresent(self):
-        # Containers should not get any format handling done
-        # (the device node does not allow read / write calls)
-        if self.type == "mdcontainer":
-            return False
-        # BIOS RAID sets should show as present even when teared down
-        elif self.type == "mdbiosraidarray":
-            return True
-        elif flags.testing:
+        if flags.testing:
             return True
         else:
             return self.partedDevice is not None
@@ -563,11 +518,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def partitionable(self):
-        return self.type == "mdbiosraidarray"
+        return False
 
     @property
     def isDisk(self):
-        return self.type == "mdbiosraidarray"
+        return False
 
     def dracutSetupArgs(self):
         return set(["rd.md.uuid=%s" % self.uuid])
@@ -582,3 +537,111 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         data.members = ["raid.%d" % p.id for p in self.parents]
         data.preexist = self.exists
         data.device = self.name
+
+class MDContainerDevice(MDRaidArrayDevice):
+
+    _type = "mdcontainer"
+
+    def __init__(self, name, **kwargs):
+        kwargs['level'] = raid.Container
+        super(MDContainerDevice, self).__init__(name, **kwargs)
+
+    @property
+    def _levels(self):
+        return mdraid.MDRaidLevels(["container"])
+
+    @property
+    def description(self):
+        return "BIOS RAID container"
+
+    @property
+    def mdadmConfEntry(self):
+        if not self.uuid:
+            raise errors.DeviceError("array is not fully defined", self.name)
+
+        return "ARRAY %s UUID=%s\n" % (self.path, self.uuid)
+
+    @property
+    def _trueStatusStrings(self):
+        return ("clean", "active", "active-idle", "readonly", "read-auto", "inactive")
+
+    def teardown(self, recursive=None):
+        log_method_call(self, self.name, status=self.status,
+                        controllable=self.controllable)
+        # we don't really care about the return value of _preTeardown here.
+        # see comment just above mddeactivate call
+        self._preTeardown(recursive=recursive)
+
+        # Since BIOS RAID sets (containers in mdraid terminology) never change
+        # there is no need to stop them and later restart them. Not stopping
+        # (and thus also not starting) them also works around bug 523334
+        return
+
+    @property
+    def mediaPresent(self):
+        # Containers should not get any format handling done
+        # (the device node does not allow read / write calls)
+        return False
+
+class MDBiosRaidArrayDevice(MDRaidArrayDevice):
+
+    _type = "mdbiosraidarray"
+    _formatClassName = property(lambda s: None)
+
+    def __init__(self, name, **kwargs):
+        super(MDBiosRaidArrayDevice, self).__init__(name, **kwargs)
+
+        # For container members probe size now, as we cannot determine it
+        # when teared down.
+        self._size = self.currentSize
+
+    @property
+    def size(self):
+        # For container members return probed size, as we cannot determine it
+        # when teared down.
+        return self._size
+
+    @property
+    def description(self):
+        levelstr = self.level.nick if self.level.nick else self.level.name
+        return "BIOS RAID set (%s)" % levelstr
+
+    @property
+    def mdadmConfEntry(self):
+        if not self.uuid:
+            raise errors.DeviceError("array is not fully defined", self.name)
+
+        return "ARRAY %s UUID=%s\n" % (self.path, self.uuid)
+
+    @property
+    def members(self):
+        # If the array is a BIOS RAID array then its unique parent
+        # is a container and its actual member devices are the
+        # container's parents.
+        return list(self.parents[0].parents)
+
+    def teardown(self, recursive=None):
+        log_method_call(self, self.name, status=self.status,
+                        controllable=self.controllable)
+        # we don't really care about the return value of _preTeardown here.
+        # see comment just above mddeactivate call
+        self._preTeardown(recursive=recursive)
+
+        # Since BIOS RAID sets (containers in mdraid terminology) never change
+        # there is no need to stop them and later restart them. Not stopping
+        # (and thus also not starting) them also works around bug 523334
+        return
+
+    @property
+    def mediaPresent(self):
+        # BIOS RAID sets should show as present even when teared down
+        return True
+
+    @property
+    def isDisk(self):
+        return True
+
+    @property
+    def partitionable(self):
+        return True
+
diff --git a/tests/devicelibs_test/mdraid_interrogate_test.py b/tests/devicelibs_test/mdraid_interrogate_test.py
index 034c929..33bb5ad 100755
--- a/tests/devicelibs_test/mdraid_interrogate_test.py
+++ b/tests/devicelibs_test/mdraid_interrogate_test.py
@@ -91,7 +91,7 @@ class MDExamineTestCase(MDRaidInterrogateTestCase):
               - RAID level and number of devices are correct
               - UUIDs have canonical form
         """
-        level = mdraid.RAID_levels.raidLevel(level or raid.RAID1)
+        level = raid.getRaidLevel(level or raid.RAID1)
         mdraid.mdcreate(self._dev_name, level, self.loopDevices, metadataVer=metadataVersion, spares=spares)
         time.sleep(2) # wait for raid to settle
 
@@ -194,7 +194,7 @@ class MDDetailTestCase(MDRaidInterrogateTestCase):
               - exactly the predicted names are returned by mddetail
               - UUIDs have canonical form
         """
-        level = mdraid.RAID_levels.raidLevel(level) if level is not None else raid.RAID1
+        level = raid.getRaidLevel(level) if level is not None else raid.RAID1
 
         mdraid.mdcreate(self._dev_name, level, self.loopDevices, metadataVer=metadataVersion, spares=spares)
         time.sleep(2) # wait for raid to settle
diff --git a/tests/devicelibs_test/mdraid_test.py b/tests/devicelibs_test/mdraid_test.py
index 687ec12..54e259f 100755
--- a/tests/devicelibs_test/mdraid_test.py
+++ b/tests/devicelibs_test/mdraid_test.py
@@ -16,7 +16,6 @@ class MDRaidTestCase(unittest.TestCase):
         ##
         ## level lookup
         ##
-        self.assertEqual(mdraid.RAID_levels.raidLevel("container").name, "container")
         self.assertEqual(mdraid.RAID_levels.raidLevel("stripe").name, "raid0")
         self.assertEqual(mdraid.RAID_levels.raidLevel("mirror").name, "raid1")
         self.assertEqual(mdraid.RAID_levels.raidLevel("4").name, "raid4")
diff --git a/tests/devices_test.py b/tests/devices_test.py
index 74e12a6..d175c03 100644
--- a/tests/devices_test.py
+++ b/tests/devices_test.py
@@ -20,6 +20,8 @@ from blivet.devices import LVMThinPoolDevice
 from blivet.devices import LVMThinLogicalVolumeDevice
 from blivet.devices import LVMThinSnapShotDevice
 from blivet.devices import LVMVolumeGroupDevice
+from blivet.devices import MDBiosRaidArrayDevice
+from blivet.devices import MDContainerDevice
 from blivet.devices import MDRaidArrayDevice
 from blivet.devices import OpticalDevice
 from blivet.devices import StorageDevice
@@ -127,7 +129,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        self.dev1 = MDRaidArrayDevice("dev1", level="container", parents=parents)
+        self.dev1 = MDContainerDevice("dev1", level="container", parents=parents)
 
         parents = [
            DiskDevice("name1", fmt=getFormat("mdmember")),
@@ -177,7 +179,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents_1 = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        dev_1 = MDRaidArrayDevice(
+        dev_1 = MDContainerDevice(
            "parent",
            level="container",
            parents=parents_1
@@ -192,7 +194,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            fmt=getFormat("mdmember"),
            parents=parents_2
         )
-        self.dev9 = MDRaidArrayDevice(
+        self.dev9 = MDBiosRaidArrayDevice(
            "dev9",
            level="raid0",
            memberDevices=2,
@@ -213,7 +215,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents_1 = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        dev_1 = MDRaidArrayDevice(
+        dev_1 = MDContainerDevice(
            "parent",
            level="container",
            parents=parents
@@ -228,7 +230,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            fmt=getFormat("mdmember"),
            parents=parents_2
         )
-        self.dev11 = MDRaidArrayDevice(
+        self.dev11 = MDBiosRaidArrayDevice(
            "dev11",
            level=1,
            memberDevices=2,
@@ -236,7 +238,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            size=Size("32 MiB"),
            totalDevices=2)
 
-        self.dev12 = MDRaidArrayDevice(
+        self.dev12 = MDBiosRaidArrayDevice(
            "dev12",
            level=1,
            memberDevices=2,
@@ -581,7 +583,7 @@ class BTRFSDeviceTestCase(DeviceStateTestCase):
         with self.assertRaisesRegexp(ValueError, "BTRFSDevice.*must have at least one parent"):
             BTRFSVolumeDevice("dev")
 
-        with self.assertRaisesRegexp(ValueError, "is not.*expected format"):
+        with self.assertRaisesRegexp(ValueError, "wrong format"):
             BTRFSVolumeDevice("dev", parents=[OpticalDevice("deva")])
 
         with self.assertRaisesRegexp(DeviceError, "btrfs subvolume.*must be a btrfs volume"):
-- 
1.9.3



More information about the anaconda-patches mailing list