Change in vdsm[master]: vm: move queryBlockJobs under services

fromani at redhat.com fromani at redhat.com
Mon Feb 9 08:53:21 UTC 2015


Francesco Romani has uploaded a new change for review.

Change subject: vm: move queryBlockJobs under services
......................................................................

vm: move queryBlockJobs under services

Vm/Block Jobs monitoring is a fundamental
part of live merge support. However, to call
this 'sampling' it is an hard sell.

This patch moves the periodic monitoring of
the VM block jobs under the services umbrella,
with no intended changes in behaviour nor in
the output.

The only code changes are limited to the glue code
needed to adapt from sampling to services
framework.

Change-Id: I2bba47881033af1b928195a94456f7130a7ac343
Signed-off-by: Francesco Romani <fromani at redhat.com>
---
M vdsm/virt/services.py
M vdsm/virt/vm.py
2 files changed, 21 insertions(+), 23 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/95/37595/1

diff --git a/vdsm/virt/services.py b/vdsm/virt/services.py
index 5bd4225..23ee2e3 100644
--- a/vdsm/virt/services.py
+++ b/vdsm/virt/services.py
@@ -58,7 +58,9 @@
 
     _calls = [
         RepeatingCall(UpdateVolumes(cif),
-                      config.getint('irs', 'vol_size_sample_interval'))]
+                      config.getint('irs', 'vol_size_sample_interval')),
+        RepeatingCall(SampleVmJobs(cif),
+                      config.getint('vars', 'vm_sample_jobs_interval'))]
 
     for call in _calls:
         call.start()
@@ -150,3 +152,13 @@
                 # Avoid queries from storage during recovery process
                 for vmDrive in vmObj.getDiskDevices():
                     vmObj.updateDriveVolume(vmDrive)
+
+
+class SampleVmJobs(object):
+    def __init__(self, cif):
+        self._cif = cif
+
+    def __call__(self):
+        vms = self._cif.getVMs()
+        for vmId, vmObj in vms.iteritems():
+            vmObj.updateVmJobs()
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index ddc852d..046549f 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -209,10 +209,6 @@
             AdvancedStatsFunction(
                 self._sampleBalloon,
                 config.getint('vars', 'vm_sample_balloon_interval'), 1))
-        self.sampleVmJobs = (
-            AdvancedStatsFunction(
-                self._sampleVmJobs,
-                config.getint('vars', 'vm_sample_jobs_interval'), 1))
         self.sampleVcpuPinning = (
             AdvancedStatsFunction(
                 self._sampleVcpuPinning,
@@ -225,7 +221,7 @@
         self.addStatsFunction(
             self.highWrite, self.sampleCpu,
             self.sampleDisk, self.sampleNet, self.sampleBalloon,
-            self.sampleVmJobs, self.sampleVcpuPinning, self.sampleCpuTune)
+            self.sampleVcpuPinning, self.sampleCpuTune)
 
     def _highWrite(self):
         if not self._vm.isDisksStatsCollectionEnabled():
@@ -357,9 +353,6 @@
         """
         infos = self._vm._dom.info()
         return infos[2]
-
-    def _sampleVmJobs(self):
-        return self._vm.queryBlockJobs()
 
     def _sampleCpuTune(self):
         """
@@ -631,14 +624,6 @@
         if vmNumaNodeRuntimeMap:
             stats['vNodeRuntimeInfo'] = vmNumaNodeRuntimeMap
 
-    def _getVmJobs(self, stats):
-        info = self.sampleVmJobs.getLastSample()
-        if info is not None:
-            # If we are unable to collect stats we must not return anything at
-            # all since an empty dictionary would be interpreted as vm jobs
-            # finishing.
-            stats['vmJobs'] = info
-
     def get(self):
         stats = {}
 
@@ -646,7 +631,6 @@
         self._getNetworkStats(stats)
         self._getDiskStats(stats)
         self._getBalloonStats(stats)
-        self._getVmJobs(stats)
         self._getNumaStats(stats)
         self._getCpuTuneInfo(stats)
         self._getCpuCount(stats)
@@ -872,6 +856,7 @@
         self._liveMergeCleanupThreads = {}
         self._shutdownReason = None
         self._vcpuLimit = None
+        self._vmJobs = None
 
     def _get_lastStatus(self):
         # note that we don't use _statusLock here. One of the reasons is the
@@ -1816,7 +1801,7 @@
                 stats[var] = decStats[var]
             elif type(decStats[var]) is not dict:
                 stats[var] = utils.convertToStr(decStats[var])
-            elif var in ('network', 'balloonInfo', 'vmJobs',
+            elif var in ('network', 'balloonInfo',
                          'vNodeRuntimeInfo'):
                 stats[var] = decStats[var]
             else:
@@ -1835,6 +1820,8 @@
             stats['watchdogEvent'] = self._watchdogEvent
         if self._vcpuLimit:
             stats['vcpuUserLimit'] = self._vcpuLimit
+        if self._vmJobs:
+            stats['vmJobs'] = self._vmJobs
         return stats
 
     def _getVmStatus(self):
@@ -4612,6 +4599,9 @@
             return True
         return False
 
+    def updateVmJobs(self):
+        self._vmJobs = self.queryBlockJobs()
+
     def queryBlockJobs(self):
         def startCleanup(job, drive, needPivot):
             t = LiveMergeCleanupThread(self, job['jobID'], drive, needPivot)
@@ -4775,10 +4765,6 @@
         # plus a bit more to accomodate additional writes to 'top' during the
         # live merge operation.
         self.extendDriveVolume(drive, baseVolUUID, topSize)
-
-        # Trigger the collection of stats before returning so that callers
-        # of getVmStats after this returns will see the new job
-        self._vmStats.sampleVmJobs()
 
         return {'status': doneCode}
 


-- 
To view, visit http://gerrit.ovirt.org/37595
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I2bba47881033af1b928195a94456f7130a7ac343
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani at redhat.com>


More information about the vdsm-patches mailing list