Change in vdsm[ovirt-3.2]: misc: rename safelease to clusterlock

fsimonce at redhat.com fsimonce at redhat.com
Mon Jan 28 14:03:15 UTC 2013


Hello Allon Mureinik, Dan Kenigsberg,

I'd like you to do a code review.  Please visit

    http://gerrit.ovirt.org/11463

to review the following change.

Change subject: misc: rename safelease to clusterlock
......................................................................

misc: rename safelease to clusterlock

The safelease module is now contaning also the sanlock implementation
and soon it might contain other (e.g.: a special lock for local storage
domains), for this reason it has been renamed with a more general name
clusterlock. The safelease implementation also required some cleanup in
order to achieve more uniformity between the locking mechanisms.

Change-Id: I74070ebb43dd726362900a0746c08b2ee3d6eac7
Signed-off-by: Federico Simoncelli <fsimonce at redhat.com>
Reviewed-on: http://gerrit.ovirt.org/10067
Reviewed-by: Allon Mureinik <amureini at redhat.com>
Reviewed-by: Dan Kenigsberg <danken at redhat.com>
---
M vdsm.spec.in
M vdsm/API.py
M vdsm/storage/Makefile.am
M vdsm/storage/blockSD.py
R vdsm/storage/clusterlock.py
M vdsm/storage/hsm.py
M vdsm/storage/imageRepository/formatConverter.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
9 files changed, 68 insertions(+), 68 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/63/11463/1

diff --git a/vdsm.spec.in b/vdsm.spec.in
index dfc2459..8ad4dce 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -685,7 +685,7 @@
 %{_datadir}/%{vdsm_name}/storage/resourceFactories.py*
 %{_datadir}/%{vdsm_name}/storage/remoteFileHandler.py*
 %{_datadir}/%{vdsm_name}/storage/resourceManager.py*
-%{_datadir}/%{vdsm_name}/storage/safelease.py*
+%{_datadir}/%{vdsm_name}/storage/clusterlock.py*
 %{_datadir}/%{vdsm_name}/storage/sdc.py*
 %{_datadir}/%{vdsm_name}/storage/sd.py*
 %{_datadir}/%{vdsm_name}/storage/securable.py*
diff --git a/vdsm/API.py b/vdsm/API.py
index 732f8a3..a050a51 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -33,7 +33,7 @@
 from vdsm import netinfo
 from vdsm import constants
 import storage.misc
-import storage.safelease
+import storage.clusterlock
 import storage.volume
 import storage.sd
 import storage.image
@@ -992,7 +992,7 @@
     def spmStart(self, prevID, prevLver, enableScsiFencing,
                  maxHostID=None, domVersion=None):
         if maxHostID is None:
-            maxHostID = storage.safelease.MAX_HOST_ID
+            maxHostID = storage.clusterlock.MAX_HOST_ID
         recoveryMode = None   # unused
         return self._irs.spmStart(self._UUID, prevID, prevLver,
                 recoveryMode, enableScsiFencing, maxHostID, domVersion)
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index cff09be..abc1545 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -25,6 +25,7 @@
 	__init__.py \
 	blockSD.py \
 	blockVolume.py \
+	clusterlock.py \
 	devicemapper.py \
 	dispatcher.py \
 	domainMonitor.py \
@@ -35,8 +36,8 @@
 	hba.py \
 	hsm.py \
 	image.py \
+	iscsiadm.py \
 	iscsi.py \
-        iscsiadm.py \
 	localFsSD.py \
 	lvm.py \
 	misc.py \
@@ -48,7 +49,6 @@
 	remoteFileHandler.py \
 	resourceFactories.py \
 	resourceManager.py \
-	safelease.py \
 	sdc.py \
 	sd.py \
 	securable.py \
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 61ec996..862e413 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -37,7 +37,7 @@
 import fileUtils
 import sd
 import lvm
-import safelease
+import clusterlock
 import blockVolume
 import multipath
 import resourceFactories
@@ -63,7 +63,7 @@
 
 # FIXME: Make this calculated from something logical
 RESERVED_METADATA_SIZE = 40 * (2 ** 20)
-RESERVED_MAILBOX_SIZE = MAILBOX_SIZE * safelease.MAX_HOST_ID
+RESERVED_MAILBOX_SIZE = MAILBOX_SIZE * clusterlock.MAX_HOST_ID
 METADATA_BASE_SIZE = 378
 # VG's min metadata threshold is 20%
 VG_MDA_MIN_THRESHOLD = 0.2
diff --git a/vdsm/storage/safelease.py b/vdsm/storage/clusterlock.py
similarity index 88%
rename from vdsm/storage/safelease.py
rename to vdsm/storage/clusterlock.py
index 88a4eae..4525b2f 100644
--- a/vdsm/storage/safelease.py
+++ b/vdsm/storage/clusterlock.py
@@ -19,15 +19,16 @@
 #
 
 import os
-from vdsm.config import config
-import misc
-import subprocess
-import sanlock
-from contextlib import nested
-from vdsm import constants
-import storage_exception as se
 import threading
 import logging
+import subprocess
+from contextlib import nested
+import sanlock
+
+import misc
+import storage_exception as se
+from vdsm import constants
+from vdsm.config import config
 
 
 MAX_HOST_ID = 250
@@ -39,36 +40,33 @@
 SDM_LEASE_OFFSET = 512 * 2048
 
 
-class ClusterLock(object):
-    log = logging.getLogger("ClusterLock")
+class SafeLease(object):
+    log = logging.getLogger("SafeLease")
+
     lockUtilPath = config.get('irs', 'lock_util_path')
     lockCmd = config.get('irs', 'lock_cmd')
     freeLockCmd = config.get('irs', 'free_lock_cmd')
 
-    def __init__(self, sdUUID, idFile, leaseFile,
-            lockRenewalIntervalSec,
-            leaseTimeSec,
-            leaseFailRetry,
-            ioOpTimeoutSec):
-        self._lock = threading.RLock()
+    def __init__(self, sdUUID, idsPath, leasesPath, lockRenewalIntervalSec,
+                 leaseTimeSec, leaseFailRetry, ioOpTimeoutSec):
+        self._lock = threading.Lock()
         self._sdUUID = sdUUID
-        self._leaseFile = leaseFile
-        self.setParams(lockRenewalIntervalSec, leaseTimeSec,
-                       leaseFailRetry, ioOpTimeoutSec)
+        self._idsPath = idsPath
+        self._leasesPath = leasesPath
+        self.setParams(lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
+                       ioOpTimeoutSec)
 
     def initLock(self):
         lockUtil = os.path.join(self.lockUtilPath, "safelease")
-        initCommand = [lockUtil, "release", "-f", self._leaseFile, "0"]
+        initCommand = [lockUtil, "release", "-f", self._leasesPath, "0"]
         rc, out, err = misc.execCmd(initCommand, sudo=False,
                 cwd=self.lockUtilPath)
         if rc != 0:
             self.log.warn("could not initialise spm lease (%s): %s", rc, out)
             raise se.ClusterLockInitError()
 
-    def setParams(self, lockRenewalIntervalSec,
-                    leaseTimeSec,
-                    leaseFailRetry,
-                    ioOpTimeoutSec):
+    def setParams(self, lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
+                  ioOpTimeoutSec):
         self._lockRenewalIntervalSec = lockRenewalIntervalSec
         self._leaseTimeSec = leaseTimeSec
         self._leaseFailRetry = leaseFailRetry
@@ -78,10 +76,12 @@
         return 1000
 
     def acquireHostId(self, hostId, async):
-        pass
+        self.log.debug("Host id for domain %s successfully acquired (id: %s)",
+                       self._sdUUID, hostId)
 
     def releaseHostId(self, hostId, async, unused):
-        pass
+        self.log.debug("Host id for domain %s released successfully (id: %s)",
+                       self._sdUUID, hostId)
 
     def hasHostId(self, hostId):
         return True
@@ -94,10 +94,11 @@
                     self._sdUUID)
 
             lockUtil = self.getLockUtilFullPath()
-            acquireLockCommand = subprocess.list2cmdline([lockUtil, "start",
-                self._sdUUID, str(hostID), str(self._lockRenewalIntervalSec),
-                str(self._leaseFile), str(leaseTimeMs), str(ioOpTimeoutMs),
-                str(self._leaseFailRetry)])
+            acquireLockCommand = subprocess.list2cmdline([
+                lockUtil, "start", self._sdUUID, str(hostID),
+                str(self._lockRenewalIntervalSec), str(self._leasesPath),
+                str(leaseTimeMs), str(ioOpTimeoutMs), str(self._leaseFailRetry)
+            ])
 
             cmd = [constants.EXT_SETSID, constants.EXT_IONICE, '-c1', '-n0',
                 constants.EXT_SU, misc.IOUSER, '-s', constants.EXT_SH, '-c',
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 62e9f74..8bbe3b8 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -53,7 +53,7 @@
 import misc
 from misc import deprecated
 import taskManager
-import safelease
+import clusterlock
 import storage_exception as se
 from threadLocal import vars
 from vdsm import constants
@@ -528,7 +528,7 @@
 
     @public
     def spmStart(self, spUUID, prevID, prevLVER, recoveryMode, scsiFencing,
-                 maxHostID=safelease.MAX_HOST_ID, domVersion=None,
+                 maxHostID=clusterlock.MAX_HOST_ID, domVersion=None,
                  options=None):
         """
         Starts an SPM.
@@ -845,7 +845,7 @@
         :raises: an :exc:`Storage_Exception.InvalidParameterException` if the
                  master domain is not supplied in the domain list.
         """
-        safeLease = sd.packLeaseParams(
+        leaseParams = sd.packLeaseParams(
             lockRenewalIntervalSec=lockRenewalIntervalSec,
             leaseTimeSec=leaseTimeSec,
             ioOpTimeoutSec=ioOpTimeoutSec,
@@ -853,9 +853,9 @@
         vars.task.setDefaultException(
             se.StoragePoolCreationError(
                 "spUUID=%s, poolName=%s, masterDom=%s, domList=%s, "
-                "masterVersion=%s, safelease params: (%s)" %
+                "masterVersion=%s, clusterlock params: (%s)" %
                 (spUUID, poolName, masterDom, domList, masterVersion,
-                 safeLease)))
+                 leaseParams)))
         misc.validateUUID(spUUID, 'spUUID')
         if masterDom not in domList:
             raise se.InvalidParameterException("masterDom", str(masterDom))
@@ -892,7 +892,7 @@
 
         return sp.StoragePool(
             spUUID, self.taskMng).create(poolName, masterDom, domList,
-                                         masterVersion, safeLease)
+                                         masterVersion, leaseParams)
 
     @public
     def connectStoragePool(self, spUUID, hostID, scsiKey,
@@ -1701,7 +1701,7 @@
         :returns: Nothing ? pool.reconstructMaster return nothing
         :rtype: ?
         """
-        safeLease = sd.packLeaseParams(
+        leaseParams = sd.packLeaseParams(
             lockRenewalIntervalSec=lockRenewalIntervalSec,
             leaseTimeSec=leaseTimeSec,
             ioOpTimeoutSec=ioOpTimeoutSec,
@@ -1710,9 +1710,9 @@
 
         vars.task.setDefaultException(
             se.ReconstructMasterError(
-                "spUUID=%s, masterDom=%s, masterVersion=%s, safelease "
+                "spUUID=%s, masterDom=%s, masterVersion=%s, clusterlock "
                 "params: (%s)" % (spUUID, masterDom, masterVersion,
-                                  safeLease)))
+                                  leaseParams)))
 
         self.log.info("spUUID=%s master=%s", spUUID, masterDom)
 
@@ -1738,7 +1738,7 @@
                 domDict[d] = sd.validateSDDeprecatedStatus(status)
 
         return pool.reconstructMaster(hostId, poolName, masterDom, domDict,
-                                      masterVersion, safeLease)
+                                      masterVersion, leaseParams)
 
     def _logResp_getDeviceList(self, response):
         logableDevs = deepcopy(response)
diff --git a/vdsm/storage/imageRepository/formatConverter.py b/vdsm/storage/imageRepository/formatConverter.py
index 88b053d..0742560 100644
--- a/vdsm/storage/imageRepository/formatConverter.py
+++ b/vdsm/storage/imageRepository/formatConverter.py
@@ -26,7 +26,7 @@
 from storage import sd
 from storage import blockSD
 from storage import image
-from storage import safelease
+from storage import clusterlock
 from storage import volume
 from storage import blockVolume
 from storage import storage_exception as se
@@ -115,8 +115,8 @@
         domain.setMetadataPermissions()
 
     log.debug("Initializing the new cluster lock for domain %s", domain.sdUUID)
-    newClusterLock = safelease.SANLock(domain.sdUUID, domain.getIdsFilePath(),
-                                       domain.getLeasesFilePath())
+    newClusterLock = clusterlock.SANLock(
+        domain.sdUUID, domain.getIdsFilePath(), domain.getLeasesFilePath())
     newClusterLock.initLock()
 
     log.debug("Acquiring the host id %s for domain %s", hostId, domain.sdUUID)
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 1b11017..dbc1beb 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -31,7 +31,7 @@
 from resourceFactories import IMAGE_NAMESPACE, VOLUME_NAMESPACE
 import resourceManager as rm
 from vdsm import constants
-import safelease
+import clusterlock
 import outOfProcess as oop
 from persistentDict import unicodeEncoder, unicodeDecoder
 
@@ -307,12 +307,12 @@
                 DEFAULT_LEASE_PARAMS[DMDK_LEASE_TIME_SEC],
                 DEFAULT_LEASE_PARAMS[DMDK_LEASE_RETRIES],
                 DEFAULT_LEASE_PARAMS[DMDK_IO_OP_TIMEOUT_SEC])
-            self._clusterLock = safelease.ClusterLock(self.sdUUID,
-                    self.getIdsFilePath(), self.getLeasesFilePath(),
-                    *leaseParams)
+            self._clusterLock = clusterlock.SafeLease(
+                self.sdUUID, self.getIdsFilePath(), self.getLeasesFilePath(),
+                *leaseParams)
         elif domversion in DOM_SANLOCK_VERS:
-            self._clusterLock = safelease.SANLock(self.sdUUID,
-                    self.getIdsFilePath(), self.getLeasesFilePath())
+            self._clusterLock = clusterlock.SANLock(
+                self.sdUUID, self.getIdsFilePath(), self.getLeasesFilePath())
         else:
             raise se.UnsupportedDomainVersion(domversion)
 
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 40d15b3..e13d088 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -494,7 +494,7 @@
             return config.getint("irs", "maximum_domains_in_pool")
 
     @unsecured
-    def _acquireTemporaryClusterLock(self, msdUUID, safeLease):
+    def _acquireTemporaryClusterLock(self, msdUUID, leaseParams):
         try:
             # Master domain is unattached and all changes to unattached domains
             # must be performed under storage lock
@@ -504,7 +504,7 @@
             # assigned id for this pool
             self.id = msd.getReservedId()
 
-            msd.changeLeaseParams(safeLease)
+            msd.changeLeaseParams(leaseParams)
 
             msd.acquireHostId(self.id)
 
@@ -527,7 +527,7 @@
         self.id = SPM_ID_FREE
 
     @unsecured
-    def create(self, poolName, msdUUID, domList, masterVersion, safeLease):
+    def create(self, poolName, msdUUID, domList, masterVersion, leaseParams):
         """
         Create new storage pool with single/multiple image data domain.
         The command will create new storage pool meta-data attach each
@@ -537,10 +537,9 @@
          'msdUUID' - master domain of this pool (one of domList)
          'domList' - list of domains (i.e sdUUID,sdUUID,...,sdUUID)
         """
-        self.log.info("spUUID=%s poolName=%s master_sd=%s "
-                      "domList=%s masterVersion=%s %s",
-                      self.spUUID, poolName, msdUUID,
-                      domList, masterVersion, str(safeLease))
+        self.log.info("spUUID=%s poolName=%s master_sd=%s domList=%s "
+                      "masterVersion=%s %s", self.spUUID, poolName, msdUUID,
+                      domList, masterVersion, leaseParams)
 
         if msdUUID not in domList:
             raise se.InvalidParameterException("masterDomain", msdUUID)
@@ -565,7 +564,7 @@
                     raise se.StorageDomainAlreadyAttached(spUUIDs[0], sdUUID)
 
         fileUtils.createdir(self.poolPath)
-        self._acquireTemporaryClusterLock(msdUUID, safeLease)
+        self._acquireTemporaryClusterLock(msdUUID, leaseParams)
 
         try:
             self._setSafe()
@@ -573,7 +572,7 @@
             # We should do it before actually attaching this domain to the pool.
             # During 'master' marking we create pool metadata and each attached
             # domain should register there
-            self.createMaster(poolName, msd, masterVersion, safeLease)
+            self.createMaster(poolName, msd, masterVersion, leaseParams)
             self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
             # Attach storage domains to the storage pool
             # Since we are creating the pool then attach is done from the hsm and not the spm
@@ -722,10 +721,10 @@
 
     @unsecured
     def reconstructMaster(self, hostId, poolName, msdUUID, domDict,
-                          masterVersion, safeLease):
+                          masterVersion, leaseParams):
         self.log.info("spUUID=%s hostId=%s poolName=%s msdUUID=%s domDict=%s "
                       "masterVersion=%s leaseparams=(%s)", self.spUUID, hostId,
-                      poolName, msdUUID, domDict, masterVersion, str(safeLease))
+                      poolName, msdUUID, domDict, masterVersion, leaseParams)
 
         if msdUUID not in domDict:
             raise se.InvalidParameterException("masterDomain", msdUUID)
@@ -736,7 +735,7 @@
         # For backward compatibility we must support a reconstructMaster
         # that doesn't specify an hostId.
         if not hostId:
-            self._acquireTemporaryClusterLock(msdUUID, safeLease)
+            self._acquireTemporaryClusterLock(msdUUID, leaseParams)
             temporaryLock = True
         else:
             # Forcing to acquire the host id (if it's not acquired already).
@@ -749,7 +748,7 @@
 
         try:
             self.createMaster(poolName, futureMaster, masterVersion,
-                              safeLease)
+                              leaseParams)
 
             for sdUUID in domDict:
                 domDict[sdUUID] = domDict[sdUUID].capitalize()


--
To view, visit http://gerrit.ovirt.org/11463
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I74070ebb43dd726362900a0746c08b2ee3d6eac7
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: ovirt-3.2
Gerrit-Owner: Federico Simoncelli <fsimonce at redhat.com>
Gerrit-Reviewer: Allon Mureinik <amureini at redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken at redhat.com>


More information about the vdsm-patches mailing list