[blivet:master 1/3] Factor out MDRaidArrayDevice w/ type in ("mdcontainer", "mdbiosraidarray")

mulhern amulhern at redhat.com
Fri Dec 12 18:35:06 UTC 2014


This patch factors out functionality associated w/ type
"mdcontainer" or "mdbiosraidarray" into separate subclasses.

It is also able to revert commit c197d2d39104cdef234daecf5c44098bc345b506
entirely, since the refactoring into MDBiosRaidArrayDevice w/
formatClassName None, handles all that functionality.

Leave some checks in MDRaidArrayDevice.__init__ to prevent using this class
instead of its subclass.

I tried to make the transformation as semantics preserving as possible.
It is kind of nice that the only changes needed to be made to
devices tests were to change the names of constructors.

Signed-off-by: mulhern <amulhern at redhat.com>
---
 blivet/devicelibs/mdraid.py                      |   2 +-
 blivet/devices/__init__.py                       |   2 +-
 blivet/devices/container.py                      |  18 +--
 blivet/devices/md.py                             | 189 +++++++++++++++--------
 tests/devicelibs_test/mdraid_interrogate_test.py |   4 +-
 tests/devicelibs_test/mdraid_test.py             |   1 -
 tests/devices_test.py                            |  14 +-
 7 files changed, 141 insertions(+), 89 deletions(-)

diff --git a/blivet/devicelibs/mdraid.py b/blivet/devicelibs/mdraid.py
index 11e06d7..5479a03 100644
--- a/blivet/devicelibs/mdraid.py
+++ b/blivet/devicelibs/mdraid.py
@@ -45,7 +45,7 @@ class MDRaidLevels(raid.RAIDLevels):
            hasattr(level, 'get_recommended_stride') and \
            hasattr(level, 'get_size')
 
-RAID_levels = MDRaidLevels(["raid0", "raid1", "raid4", "raid5", "raid6", "raid10", "container", "linear"])
+RAID_levels = MDRaidLevels(["raid0", "raid1", "raid4", "raid5", "raid6", "raid10", "linear"])
 
 def get_raid_superblock_size(size, version=None):
     """ mdadm has different amounts of space reserved for its use depending
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 0b2eef2..cf1ec34 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -27,7 +27,7 @@ from .partition import PartitionDevice
 from .dm import DMDevice, DMLinearDevice, DMCryptDevice
 from .luks import LUKSDevice
 from .lvm import LVMVolumeGroupDevice, LVMLogicalVolumeDevice, LVMSnapShotDevice, LVMThinPoolDevice, LVMThinLogicalVolumeDevice, LVMThinSnapShotDevice
-from .md import MDRaidArrayDevice
+from .md import MDBiosRaidArrayDevice, MDContainerDevice, MDRaidArrayDevice
 from .btrfs import BTRFSDevice, BTRFSVolumeDevice, BTRFSSubVolumeDevice, BTRFSSnapShotDevice
 from .file import FileDevice, DirectoryDevice, SparseFileDevice
 from .loop import LoopDevice
diff --git a/blivet/devices/container.py b/blivet/devices/container.py
index 4604231..612b9fc 100644
--- a/blivet/devices/container.py
+++ b/blivet/devices/container.py
@@ -62,18 +62,6 @@ class ContainerDevice(StorageDevice):
 
         super(ContainerDevice, self).__init__(*args, **kwargs)
 
-    def _verifyMemberFormat(self, member):
-        """ Whether the member has the format expected by the device.
-
-            :param member: the member device to add
-            :type member: :class:`.StorageDevice`
-            :returns: error msg if the member has incorrect format, else None
-            :rtype: str or NoneType
-        """
-        if not isinstance(member.format, self.formatClass):
-            return "Member format %(format)s is not a subtype of expected format %(expected)s." % {'format' : member.format, 'expected' : self.formatClass}
-        return None
-
     def _verifyMemberUuid(self, member, expect_equality=True, require_existence=True):
         """ Whether the member's array UUID has the proper relationship
             with its array's UUID.
@@ -126,10 +114,8 @@ class ContainerDevice(StorageDevice):
             contents at all.
         """
         log_method_call(self, self.name, member=member.name)
-
-        error = self._verifyMemberFormat(member)
-        if error:
-            raise ValueError(error)
+        if not isinstance(member.format, self.formatClass):
+            raise ValueError("member has wrong format")
 
         error = self._verifyMemberUuid(member)
         if error:
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index dcd0c54..4067f2f 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -86,9 +86,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
                                                 parents=parents,
                                                 sysfsPath=sysfsPath)
 
-        if level == "container":
-            self._type = "mdcontainer"
-
         # avoid attribute-defined-outside-init pylint warning
         self._level = None
 
@@ -112,11 +109,8 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         else:
             self.metadataVersion = metadataVersion
 
-        # For container members probe size now, as we cannot determine it
-        # when teared down.
-        if self.parents and self.parents[0].type == "mdcontainer":
-            self._size = self.currentSize
-            self._type = "mdbiosraidarray"
+        if self.parents and self.parents[0].type == "mdcontainer" and self.type != "mdbiosraidarray":
+            raise errors.DeviceError("A device with mdcontainer member must be mdbiosraidarray.")
 
         if self.exists and self.mdadmFormatUUID and not flags.testing:
             # this is a hack to work around mdadm's insistence on giving
@@ -124,12 +118,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
             with open("/etc/mdadm.conf", "a") as c:
                 c.write("ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID))
 
-    def _verifyMemberFormat(self, member):
-        if member.type == "mdcontainer":
-            return None
-
-        return super(MDRaidArrayDevice, self)._verifyMemberFormat(member)
-
     @property
     def mdadmFormatUUID(self):
         """ This array's UUID, formatted for external use.
@@ -156,6 +144,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         """
         return self._level
 
+    @property
+    def _levels(self):
+        """ Allowed RAID level for this type of device."""
+        return mdraid.RAID_levels
+
     @level.setter
     def level(self, value):
         """ Set the RAID level and enforce restrictions based on it.
@@ -167,7 +160,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
             :returns:     None
         """
         try:
-            level = self._getLevel(value, mdraid.RAID_levels)
+            level = self._getLevel(value, self._levels)
         except ValueError as e:
             raise errors.DeviceError(e)
 
@@ -207,11 +200,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         """Returns the actual or estimated size depending on whether or
            not the array exists.
         """
-        # For container members return probed size, as we cannot determine it
-        # when teared down.
-        if self.type == "mdbiosraidarray":
-            return self._size
-
         if not self.exists or not self.partedDevice:
             try:
                 size = self.level.get_size([d.size for d in self.devices],
@@ -230,14 +218,8 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def description(self):
-        if self.type == "mdcontainer":
-            return "BIOS RAID container"
-        else:
-            levelstr = self.level.nick if self.level.nick else self.level.name
-            if self.type == "mdbiosraidarray":
-                return "BIOS RAID set (%s)" % levelstr
-            else:
-                return "MDRAID set (%s)" % levelstr
+        levelstr = self.level.nick if self.level.nick else self.level.name
+        return "MDRAID set (%s)" % levelstr
 
     def __repr__(self):
         s = StorageDevice.__repr__(self)
@@ -266,11 +248,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         if self.memberDevices is None or not self.mdadmFormatUUID:
             raise errors.DeviceError("array is not fully defined", self.name)
 
-        # containers and the sets within must only have a UUID= parameter
-        if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
-            fmt = "ARRAY %s UUID=%s\n"
-            return fmt % (self.path, self.mdadmFormatUUID)
-
         fmt = "ARRAY %s level=%s num-devices=%d UUID=%s\n"
         return fmt % (self.path, self.level, self.memberDevices, self.mdadmFormatUUID)
 
@@ -345,6 +322,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         self.memberDevices -= 1
 
     @property
+    def _trueStatusStrings(self):
+        """ Strings in state file for which status() should return True."""
+        return ("clean", "active", "active-idle", "readonly", "read-auto")
+
+    @property
     def status(self):
         """ This device's status.
 
@@ -373,10 +355,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         state_file = "%s/md/array_state" % self.sysfsPath
         try:
             state = open(state_file).read().strip()
-            if state in ("clean", "active", "active-idle", "readonly", "read-auto"):
-                status = True
-            # mdcontainers have state inactive when started (clear if stopped)
-            if self.type == "mdcontainer" and state == "inactive":
+            if state in self._trueStatusStrings:
                 status = True
         except IOError:
             status = False
@@ -412,17 +391,9 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
     def members(self):
         """ Returns this array's members.
 
-            If the array is a BIOS RAID array then its unique parent
-            is a container and its actual member devices are the
-            container's parents.
-
             :rtype: list of :class:`StorageDevice`
         """
-        if self.type == "mdbiosraidarray":
-            members = self.parents[0].parents
-        else:
-            members = self.parents
-        return list(members)
+        return list(self.parents)
 
     @property
     def complete(self):
@@ -468,12 +439,6 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         # see comment just above mddeactivate call
         self._preTeardown(recursive=recursive)
 
-        # Since BIOS RAID sets (containers in mdraid terminology) never change
-        # there is no need to stop them and later restart them. Not stopping
-        # (and thus also not starting) them also works around bug 523334
-        if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
-            return
-
         # We don't really care what the array's state is. If the device
         # file exists, we want to deactivate it. mdraid has too many
         # states.
@@ -562,14 +527,7 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def mediaPresent(self):
-        # Containers should not get any format handling done
-        # (the device node does not allow read / write calls)
-        if self.type == "mdcontainer":
-            return False
-        # BIOS RAID sets should show as present even when teared down
-        elif self.type == "mdbiosraidarray":
-            return True
-        elif flags.testing:
+        if flags.testing:
             return True
         else:
             return self.partedDevice is not None
@@ -580,11 +538,11 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
 
     @property
     def partitionable(self):
-        return self.type == "mdbiosraidarray"
+        return False
 
     @property
     def isDisk(self):
-        return self.type == "mdbiosraidarray"
+        return False
 
     def dracutSetupArgs(self):
         return set(["rd.md.uuid=%s" % self.mdadmFormatUUID])
@@ -599,3 +557,110 @@ class MDRaidArrayDevice(ContainerDevice, RaidDevice):
         data.members = ["raid.%d" % p.id for p in self.parents]
         data.preexist = self.exists
         data.device = self.name
+
+class MDContainerDevice(MDRaidArrayDevice):
+
+    _type = "mdcontainer"
+
+    def __init__(self, name, **kwargs):
+        kwargs['level'] = raid.Container
+        super(MDContainerDevice, self).__init__(name, **kwargs)
+
+    @property
+    def _levels(self):
+        return mdraid.MDRaidLevels(["container"])
+
+    @property
+    def description(self):
+        return "BIOS RAID container"
+
+    @property
+    def mdadmConfEntry(self):
+        if not self.uuid:
+            raise errors.DeviceError("array is not fully defined", self.name)
+
+        return "ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID)
+
+    @property
+    def _trueStatusStrings(self):
+        return ("clean", "active", "active-idle", "readonly", "read-auto", "inactive")
+
+    def teardown(self, recursive=None):
+        log_method_call(self, self.name, status=self.status,
+                        controllable=self.controllable)
+        # we don't really care about the return value of _preTeardown here.
+        # see comment just above mddeactivate call
+        self._preTeardown(recursive=recursive)
+
+        # Since BIOS RAID sets (containers in mdraid terminology) never change
+        # there is no need to stop them and later restart them. Not stopping
+        # (and thus also not starting) them also works around bug 523334
+        return
+
+    @property
+    def mediaPresent(self):
+        # Containers should not get any format handling done
+        # (the device node does not allow read / write calls)
+        return False
+
+class MDBiosRaidArrayDevice(MDRaidArrayDevice):
+
+    _type = "mdbiosraidarray"
+    _formatClassName = property(lambda s: None)
+
+    def __init__(self, name, **kwargs):
+        super(MDBiosRaidArrayDevice, self).__init__(name, **kwargs)
+
+        # For container members probe size now, as we cannot determine it
+        # when teared down.
+        self._size = self.currentSize
+
+    @property
+    def size(self):
+        # For container members return probed size, as we cannot determine it
+        # when teared down.
+        return self._size
+
+    @property
+    def description(self):
+        levelstr = self.level.nick if self.level.nick else self.level.name
+        return "BIOS RAID set (%s)" % levelstr
+
+    @property
+    def mdadmConfEntry(self):
+        if not self.uuid:
+            raise errors.DeviceError("array is not fully defined", self.name)
+
+        return "ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID)
+
+    @property
+    def members(self):
+        # If the array is a BIOS RAID array then its unique parent
+        # is a container and its actual member devices are the
+        # container's parents.
+        return list(self.parents[0].parents)
+
+    def teardown(self, recursive=None):
+        log_method_call(self, self.name, status=self.status,
+                        controllable=self.controllable)
+        # we don't really care about the return value of _preTeardown here.
+        # see comment just above mddeactivate call
+        self._preTeardown(recursive=recursive)
+
+        # Since BIOS RAID sets (containers in mdraid terminology) never change
+        # there is no need to stop them and later restart them. Not stopping
+        # (and thus also not starting) them also works around bug 523334
+        return
+
+    @property
+    def mediaPresent(self):
+        # BIOS RAID sets should show as present even when teared down
+        return True
+
+    @property
+    def isDisk(self):
+        return True
+
+    @property
+    def partitionable(self):
+        return True
diff --git a/tests/devicelibs_test/mdraid_interrogate_test.py b/tests/devicelibs_test/mdraid_interrogate_test.py
index 034c929..33bb5ad 100755
--- a/tests/devicelibs_test/mdraid_interrogate_test.py
+++ b/tests/devicelibs_test/mdraid_interrogate_test.py
@@ -91,7 +91,7 @@ class MDExamineTestCase(MDRaidInterrogateTestCase):
               - RAID level and number of devices are correct
               - UUIDs have canonical form
         """
-        level = mdraid.RAID_levels.raidLevel(level or raid.RAID1)
+        level = raid.getRaidLevel(level or raid.RAID1)
         mdraid.mdcreate(self._dev_name, level, self.loopDevices, metadataVer=metadataVersion, spares=spares)
         time.sleep(2) # wait for raid to settle
 
@@ -194,7 +194,7 @@ class MDDetailTestCase(MDRaidInterrogateTestCase):
               - exactly the predicted names are returned by mddetail
               - UUIDs have canonical form
         """
-        level = mdraid.RAID_levels.raidLevel(level) if level is not None else raid.RAID1
+        level = raid.getRaidLevel(level) if level is not None else raid.RAID1
 
         mdraid.mdcreate(self._dev_name, level, self.loopDevices, metadataVer=metadataVersion, spares=spares)
         time.sleep(2) # wait for raid to settle
diff --git a/tests/devicelibs_test/mdraid_test.py b/tests/devicelibs_test/mdraid_test.py
index bbf4f94..4cd50a3 100755
--- a/tests/devicelibs_test/mdraid_test.py
+++ b/tests/devicelibs_test/mdraid_test.py
@@ -16,7 +16,6 @@ class MDRaidTestCase(unittest.TestCase):
         ##
         ## level lookup
         ##
-        self.assertEqual(mdraid.RAID_levels.raidLevel("container").name, "container")
         self.assertEqual(mdraid.RAID_levels.raidLevel("stripe").name, "raid0")
         self.assertEqual(mdraid.RAID_levels.raidLevel("mirror").name, "raid1")
         self.assertEqual(mdraid.RAID_levels.raidLevel("4").name, "raid4")
diff --git a/tests/devices_test.py b/tests/devices_test.py
index 0302fdf..3d7c28e 100644
--- a/tests/devices_test.py
+++ b/tests/devices_test.py
@@ -22,6 +22,8 @@ from blivet.devices import LVMThinPoolDevice
 from blivet.devices import LVMThinLogicalVolumeDevice
 from blivet.devices import LVMThinSnapShotDevice
 from blivet.devices import LVMVolumeGroupDevice
+from blivet.devices import MDBiosRaidArrayDevice
+from blivet.devices import MDContainerDevice
 from blivet.devices import MDRaidArrayDevice
 from blivet.devices import OpticalDevice
 from blivet.devices import PartitionDevice
@@ -133,7 +135,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        self.dev1 = MDRaidArrayDevice("dev1", level="container", parents=parents)
+        self.dev1 = MDContainerDevice("dev1", level="container", parents=parents)
 
         parents = [
            DiskDevice("name1", fmt=getFormat("mdmember")),
@@ -183,7 +185,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents_1 = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        dev_1 = MDRaidArrayDevice(
+        dev_1 = MDContainerDevice(
            "parent",
            level="container",
            parents=parents_1
@@ -198,7 +200,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            fmt=getFormat("mdmember"),
            parents=parents_2
         )
-        self.dev9 = MDRaidArrayDevice(
+        self.dev9 = MDBiosRaidArrayDevice(
            "dev9",
            level="raid0",
            memberDevices=2,
@@ -219,7 +221,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
         parents_1 = [
            DiskDevice("name1", fmt=getFormat("mdmember"))
         ]
-        dev_1 = MDRaidArrayDevice(
+        dev_1 = MDContainerDevice(
            "parent",
            level="container",
            parents=parents
@@ -234,7 +236,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            fmt=getFormat("mdmember"),
            parents=parents_2
         )
-        self.dev11 = MDRaidArrayDevice(
+        self.dev11 = MDBiosRaidArrayDevice(
            "dev11",
            level=1,
            memberDevices=2,
@@ -242,7 +244,7 @@ class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
            size=Size("32 MiB"),
            totalDevices=2)
 
-        self.dev12 = MDRaidArrayDevice(
+        self.dev12 = MDBiosRaidArrayDevice(
            "dev12",
            level=1,
            memberDevices=2,
-- 
1.9.3



More information about the anaconda-patches mailing list