Change in vdsm[master]: blockSD: Avoid stale lvs

nsoffer at redhat.com nsoffer at redhat.com
Sun May 1 10:20:44 UTC 2016


Nir Soffer has uploaded a new change for review.

Change subject: blockSD: Avoid stale lvs
......................................................................

blockSD: Avoid stale lvs

When connecting to iSCSI storage, all lvs are automatically activate -
this seems to be new behavior introduced in during EL7 development that
we missed. This creates huge amount of stale devices that are not used
by anyone, but slow down significantly lvm commands.

Even worse, active lv may not reflect the real mapping on storage when
we start to use it, leading to unneeded pausing of vms, or even to data
corruption.

Additionally, when storage domain is deactivated, we do not deactivate
lvs, leaving around stale devices.

This patch introduces new life cycle methods to storage domain:

- setup         called when storage domain monitor produce the storage
                domain object, before starting to monitor the domain.

- teardown      called when storage domain monitor has finished.

The BlockStorageDomain implementation deactivate unused lvs in both
setup and teardown to avoid stale devices.

Change-Id: I7227bb43c2e1ee67a6239956aae48173a27f566e
Bug-Url: https://bugzilla.redhat.com/1331978
Signed-off-by: Nir Soffer <nsoffer at redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/lvm.py
M vdsm/storage/monitor.py
M vdsm/storage/sd.py
4 files changed, 78 insertions(+), 28 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/76/56876/1

diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 3c7c290..1a513bf 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -821,6 +821,24 @@
         self._registerResourceNamespaces()
         self._lastUncachedSelftest = 0
 
+    # Life cycle
+
+    def setup(self):
+        """
+        Ensure that unused normal lvs are deactivated, avoiding stale devices.
+        """
+        log.debug("Deactivating domain %s normal lvs", self.sdUUID)
+        lvm.deactivateUnusedLVs(self.sdUUID, refreshlvs=SPECIAL_LVS)
+
+    def teardown(self):
+        """
+        Ensure that all lvs are deactivated, avoiding stale devices.
+        """
+        log.debug("Deactivating domain %s lvs", self.sdUUID)
+        lvm.deactivateUnusedLVs(self.sdUUID)
+
+    # Other
+
     @property
     def logBlkSize(self):
         return self._manifest.logBlkSize
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 7a2ca2f..ae6d288 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -651,36 +651,38 @@
     refreshlvs = set(refreshlvs)
 
     for vg in _lvminfo.getAllVgs():
-        deactivate = []
-        refresh = []
+        deactivateUnusedLVs(vg.name, refreshlvs=refreshlvs)
 
-        for lv in _lvminfo.getLv(vg.name):
-            if lv.active:
-                if lv.name in refreshlvs:
-                    refresh.append(lv.name)
-                elif lv.opened:
-                    log.debug("Skipping open lv: vg=%s lv=%s", vg.name,
-                              lv.name)
-                else:
-                    deactivate.append(lv.name)
 
-        if deactivate:
-            log.info("Deactivating lvs: vg=%s lvs=%s", vg.name, deactivate)
-            try:
-                _setLVAvailability(vg.name, deactivate, "n")
-            except se.CannotDeactivateLogicalVolume:
-                log.error("Error deactivating lvs: vg=%s lvs=%s", vg.name,
-                          deactivate)
-            # Some lvs are inactive now
-            _lvminfo._invalidatelvs(vg.name, deactivate)
+def deactivateUnusedLVs(vgname, refreshlvs=()):
+    deactivate = []
+    refresh = []
 
-        if refresh:
-            log.info("Refreshing lvs: vg=%s lvs=%s", vg.name, refresh)
-            try:
-                refreshLVs(vg.name, refresh)
-            except se.LogicalVolumeRefreshError:
-                log.error("Error refreshing lvs: vg=%s lvs=%s", vg.name,
-                          refresh)
+    for lv in _lvminfo.getLv(vgname):
+        if lv.active:
+            if lv.name in refreshlvs:
+                refresh.append(lv.name)
+            elif lv.opened:
+                log.debug("Skipping open lv: vg=%s lv=%s", vgname, lv.name)
+            else:
+                deactivate.append(lv.name)
+
+    if deactivate:
+        log.info("Deactivating lvs: vg=%s lvs=%s", vgname, deactivate)
+        try:
+            _setLVAvailability(vgname, deactivate, "n")
+        except se.CannotDeactivateLogicalVolume:
+            log.error("Error deactivating lvs: vg=%s lvs=%s", vgname,
+                      deactivate)
+        # Some lvs are inactive now
+        _lvminfo._invalidatelvs(vgname, deactivate)
+
+    if refresh:
+        log.info("Refreshing lvs: vg=%s lvs=%s", vgname, refresh)
+        try:
+            refreshLVs(vgname, refresh)
+        except se.LogicalVolumeRefreshError:
+            log.error("Error refreshing lvs: vg=%s lvs=%s", vgname, refresh)
 
 
 def invalidateCache():
diff --git a/vdsm/storage/monitor.py b/vdsm/storage/monitor.py
index b4a0846..78690be 100644
--- a/vdsm/storage/monitor.py
+++ b/vdsm/storage/monitor.py
@@ -237,6 +237,7 @@
                            self.sdUUID, self.wasShutdown)
             if self._shouldReleaseHostId():
                 self._releaseHostId()
+            self._teardownDomain()
 
     def _monitorLoop(self):
         while not self.stopEvent.is_set():
@@ -322,7 +323,9 @@
     @utils.cancelpoint
     def _produceDomain(self):
         self.log.debug("Producing domain %s", self.sdUUID)
-        self.domain = sdCache.produce(self.sdUUID)
+        domain = sdCache.produce(self.sdUUID)
+        domain.setup()
+        self.domain = domain
 
     @utils.cancelpoint
     def _setIsoDomainInfo(self):
@@ -394,3 +397,14 @@
         except:
             self.log.exception("Error releasing host id %s for domain %s",
                                self.hostId, self.sdUUID)
+
+    # Domain life cycle
+
+    def _teardownDomain(self):
+        if not self.domain:
+            return
+        try:
+            self.domain.teardown()
+        except Exception:
+            self.log.exception("Error tearing down domain %s", self.sdUUID)
+        self.domain = None
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 3e261bf..fe3a330 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -500,6 +500,22 @@
         self._lock = threading.Lock()
         self.stat = None
 
+    # Life cycle
+
+    def setup(self):
+        """
+        Called when after storage domain is produced in the storage domain
+        monitor.
+        """
+
+    def teardown(self):
+        """
+        Called after storage domain monitor finished and will never access the
+        storage domain object.
+        """
+
+    # Other
+
     def __del__(self):
         if self.stat:
             concurrent.thread(self.stat.stop).start()


-- 
To view, visit https://gerrit.ovirt.org/56876
To unsubscribe, visit https://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I7227bb43c2e1ee67a6239956aae48173a27f566e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer at redhat.com>


More information about the vdsm-patches mailing list