Change in vdsm[master]: periodic: extract required_on staticmethod

fromani at redhat.com fromani at redhat.com
Wed Sep 30 09:16:23 UTC 2015


Francesco Romani has uploaded a new change for review.

Change subject: periodic: extract required_on staticmethod
......................................................................

periodic: extract required_on staticmethod

It possible, that certains periodic operations should not be done
on some VMs, perhaps temporarily.

To check this, Operations exported a "required" attribute.
Problem is, this attribute was instance attribute, forcing the
periodic code to do a clumsy dance like:
- assume every operation is required on each vm
- create per-vm Operation instances
- check if a give operation should indeed run on a given vm
- if the operation should not run, discard it.

This patch eliminates this clumsiness translating "required"
attribute into a "required_on" staticmethod.
Now the periodic code can filter out unneeded Operation instances
in a much simpler way.

Change-Id: I2ef0750948c905a87750b99adf702c4c08fc925d
Bug-Url: https://bugzilla.redhat.com/1250839
Signed-off-by: Francesco Romani <fromani at redhat.com>
---
M vdsm/virt/periodic.py
1 file changed, 29 insertions(+), 35 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/33/46833/1

diff --git a/vdsm/virt/periodic.py b/vdsm/virt/periodic.py
index 68f8888..050c789 100644
--- a/vdsm/virt/periodic.py
+++ b/vdsm/virt/periodic.py
@@ -64,7 +64,8 @@
 
     def per_vm_operation(func, period):
         disp = VmDispatcher(
-            cif.getVMs, _executor, func, _timeout_from(period))
+            cif.getVMs, _executor, func, _timeout_from(period),
+            func.required_on)
         return Operation(disp, period, scheduler)
 
     _operations = [
@@ -204,7 +205,8 @@
 
     _log = logging.getLogger("virt.periodic.VmDispatcher")
 
-    def __init__(self, get_vms, executor, create, timeout):
+    def __init__(self, get_vms, executor, create, timeout,
+                 required_on=lambda vm_obj: True):
         """
         get_vms: callable which will return a dict which maps
                  vm_ids to vm_instances
@@ -216,12 +218,15 @@
         self._executor = executor
         self._create = create
         self._timeout = timeout
+        self._required_on = required_on
         self._tracker = VmTracker()
 
     def __call__(self):
         # python 3 future compatibility: we want to materialize the sequence
         vm_objs = set(self._get_vms().values())
-        candidates = set(vm.id for vm in vm_objs)
+        candidates = set(vm_obj.id
+                         for vm_obj in vm_objs
+                         if self._required_on(vm_obj))
         available = set(self._tracker.reserve_available(candidates))
         skipped = set(candidates - available)
 
@@ -233,27 +238,11 @@
             # still OK if occasional misdetection occours, but we
             # definitely want to avoid known-bad situation and to
             # needlessly overload libvirt.
-            if vm_obj.id not in available:
-                skipped.append(vm_obj.id)
-                continue
-
+            op = self._create(self._tracker, vm_obj)
             try:
-                op = self._create(self._tracker, vm_obj)
-
-                if not op.required:
-                    self._tracker.release(vm_obj.id)
-                    continue
-
-            except Exception:
-                self._tracker.release(vm_obj.id)
-                # we want to make sure to have VM UUID logged
-                self._log.exception("while dispatching %s to VM '%s'",
-                                    self._create, vm_obj.id)
-            else:
-                try:
-                    self._executor.dispatch(op, self._timeout)
-                except executor.TooManyTasks:
-                    skipped.add(vm_obj.id)
+                self._executor.dispatch(op, self._timeout)
+            except executor.TooManyTasks:
+                skipped.add(vm_obj.id)
 
         if skipped:
             self._log.warning('could not run %s on %s',
@@ -265,9 +254,14 @@
 
 
 class _RunnableOnVm(object):
+
     def __init__(self, tracker, vm):
         self._tracker = tracker
         self._vm = vm
+
+    @staticmethod
+    def required_on(vm):
+        return True
 
     def __call__(self):
         try:
@@ -286,10 +280,10 @@
 
 class UpdateVolumes(_RunnableOnVm):
 
-    @property
-    def required(self):
+    @staticmathod
+    def required_on(vm):
         # Avoid queries from storage during recovery process
-        return self._vm.isDisksStatsCollectionEnabled()
+        return vm.isDisksStatsCollectionEnabled()
 
     def _execute(self):
         for drive in self._vm.getDiskDevices():
@@ -301,9 +295,9 @@
 
 class NumaInfoMonitor(_RunnableOnVm):
 
-    @property
-    def required(self):
-        return self._vm.hasGuestNumaNode
+    @staticmethod
+    def required_on(vm):
+        return vm.hasGuestNumaNode
 
     def _execute(self):
         self._vm.updateNumaInfo()
@@ -311,14 +305,14 @@
 
 class BlockjobMonitor(_RunnableOnVm):
 
-    @property
-    def required(self):
+    @staticmethod
+    def required_on(vm):
         # For performance reasons, we must avoid as much
         # as possible to create per-vm executor tasks, even
         # though they will do nothing but a few check and exit
         # early, as they do if a VM doesn't have Block Jobs to
         # monitor (most often true).
-        return self._vm.hasVmJobs
+        return vm.hasVmJobs
 
     def _execute(self):
         self._vm.updateVmJobs()
@@ -326,10 +320,10 @@
 
 class DriveWatermarkMonitor(_RunnableOnVm):
 
-    @property
-    def required(self):
+    @staticmethod
+    def required_on(vm):
         # Avoid queries from storage during recovery process
-        return self._vm.isDisksStatsCollectionEnabled()
+        return vm.isDisksStatsCollectionEnabled()
 
     def _execute(self):
         self._vm.extendDrivesIfNeeded()


-- 
To view, visit https://gerrit.ovirt.org/46833
To unsubscribe, visit https://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I2ef0750948c905a87750b99adf702c4c08fc925d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani at redhat.com>


More information about the vdsm-patches mailing list