On Tue, May 14, 2019 at 11:40:45AM +0200, olichtne(a)redhat.com wrote:
From: Ondrej Lichtner <olichtne(a)redhat.com>
Based on experiences with the two Baseline evaluators (CPU and Flow
based) they share some common mechanisms and design which should be
generalized into a base class to reduce code duplication.
This commit introduces the BaselineEvaluator class which defines the
common methods and the general mechanism of evaluating results by
comparing against a baseline. It also provides default implementations
for these methods which means that the class can be used on it's own,
though all evaluation results will be False (failures).
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../Evaluators/BaselineCPUAverageEvaluator.py | 91 ++++++++-----------
.../Perf/Evaluators/BaselineEvaluator.py | 42 +++++++++
.../BaselineFlowAverageEvaluator.py | 38 ++++----
3 files changed, 100 insertions(+), 71 deletions(-)
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
index a6f2956..0125a31 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -1,29 +1,18 @@
from __future__ import division
-from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
-
-from lnst.RecipeCommon.Perf.Measurements.BaseCPUMeasurement import (
- CPUMeasurementResults,
- AggregatedCPUMeasurementResults,
+from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
+ BaselineEvaluator,
)
+
from lnst.RecipeCommon.Perf.Results import result_averages_difference
-class BaselineCPUAverageEvaluator(BaseEvaluator):
+class BaselineCPUAverageEvaluator(BaselineEvaluator):
def __init__(self, pass_difference, evaluation_filter=None):
self._pass_difference = pass_difference
self._evaluation_filter = evaluation_filter
- def evaluate_results(self, recipe, results):
- filtered_results = self._filter_results(results)
-
- for host_results in self._divide_results_by_host(filtered_results).values():
- self._evaluate_host_results(recipe, host_results)
-
- def get_baseline(self, recipe, result):
- return None
-
- def _filter_results(self, results):
+ def filter_results(self, recipe, results):
if self._evaluation_filter is None:
return results
@@ -36,6 +25,11 @@ class BaselineCPUAverageEvaluator(BaseEvaluator):
filtered.append(result)
return filtered
+ def group_results(self, recipe, results):
+ results_by_host = self._divide_results_by_host(results)
+ for host_results in results_by_host.values():
+ yield host_results
+
def _divide_results_by_host(self, results):
results_by_host = {}
for result in results:
@@ -44,49 +38,44 @@ class BaselineCPUAverageEvaluator(BaseEvaluator):
results_by_host[result.host].append(result)
return results_by_host
- def _evaluate_host_results(self, recipe, host_results):
- comparison_result = True
- result_text = [
+ def describe_group_results(self, recipe, results):
+ return [
"CPU Baseline average evaluation for Host {hostid}:".format(
- hostid=host_results[0].host.hostid
+ hostid=results[0].host.hostid
),
"Configured {diff}% difference as acceptable".format(
diff=self._pass_difference
),
]
- pairs = [
- (result, self.get_baseline(recipe, result))
- for result in host_results
- ]
- for result, baseline in pairs:
- if baseline is None:
- comparison_result = False
- result_text.append(
- "CPU {cpuid}: no baseline found".format(
- cpuid=result.cpu
- )
+
+ def compare_result_with_baseline(self, recipe, result, baseline):
+ comparison = True
+ text = []
+ if baseline is None:
+ comparison = False
+ text.append(
+ "CPU {cpuid}: no baseline found".format(cpuid=result.cpu)
+ )
+ else:
+ try:
+ difference = result_averages_difference(
+ result.utilization, baseline.utilization
)
- else:
- try:
- difference = result_averages_difference(
- result.utilization, baseline.utilization
- )
- if abs(difference) > self._pass_difference:
- comparison_result = False
+ if abs(difference) > self._pass_difference:
+ comparison = False
- result_text.append(
- "CPU {cpuid}: utilization {diff:.2f}% {direction} than
baseline".format(
- cpuid=result.cpu,
- diff=abs(difference),
- direction="higher" if difference >= 0 else
"lower",
- )
+ text.append(
+ "CPU {cpuid}: utilization {diff:.2f}% {direction} than
baseline".format(
+ cpuid=result.cpu,
+ diff=abs(difference),
+ direction="higher" if difference >= 0 else
"lower",
)
- except ZeroDivisionError:
- result_text.append(
- "CPU {cpuid}: zero division by baseline".format(
- cpuid=result.cpu
- )
+ )
+ except ZeroDivisionError:
+ text.append(
+ "CPU {cpuid}: zero division by baseline".format(
+ cpuid=result.cpu
)
-
- recipe.add_result(comparison_result, "\n".join(result_text))
+ )
+ return comparison, text
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
new file mode 100644
index 0000000..20e5a94
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
@@ -0,0 +1,42 @@
+from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
+
+
+class BaselineEvaluator(BaseEvaluator):
+ def evaluate_results(self, recipe, results):
+ filtered_results = self.filter_results(recipe, results)
+
+ for group in self.group_results(recipe, filtered_results):
+ self.evaluate_group_results(recipe, group)
+
+ def filter_results(self, recipe, results):
+ return results
+
+ def group_results(self, recipe, results):
+ for result in results:
+ yield [result]
+
+ def evaluate_group_results(self, recipe, results):
+ comparison_result = True
+ result_text = self.describe_group_results(recipe, results)
+
+ baselines = self.get_baselines(recipe, results)
+ for result, baseline in zip(results, baselines):
+ comparison, text = self.compare_result_with_baseline(
+ recipe, result, baseline
+ )
+ comparison_result = comparison_result and comparison
+ result_text.extend(text)
+
+ recipe.add_result(comparison_result, "\n".join(result_text))
+
+ def describe_group_results(recipe, results):
+ return []
+
+ def get_baselines(self, recipe, results):
+ return [self.get_baseline(recipe, result) for result in results]
+
+ def get_baseline(self, recipe, result):
+ return None
+
+ def compare_result_with_baseline(self, recipe, result, baseline):
+ return False, "Result to baseline comparison not implemented"
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
index b05849c..72d0a73 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -1,39 +1,38 @@
from __future__ import division
-from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
-
-from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import (
- FlowMeasurementResults,
- AggregatedFlowMeasurementResults,
+from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
+ BaselineEvaluator,
)
+
from lnst.RecipeCommon.Perf.Results import result_averages_difference
-class BaselineFlowAverageEvaluator(BaseEvaluator):
+class BaselineFlowAverageEvaluator(BaselineEvaluator):
def __init__(self, pass_difference):
self._pass_difference = pass_difference
- def evaluate_results(self, recipe, results):
- for result in results:
- baseline = self.get_baseline(recipe, result)
- self._compare_result_with_baseline(recipe, result, baseline)
-
- def get_baseline(self, recipe, result):
- return None
-
- def _compare_result_with_baseline(self, recipe, result, baseline):
- comparison_result = True
- result_text = [
+ def describe_group_results(self, recipe, results):
+ result = results[0]
+ return [
"Flow {} Baseline average evaluation".format(result.flow),
"Configured {}% difference as acceptable".format(
self._pass_difference
),
]
+
+ def compare_result_with_baseline(self, recipe, result, baseline):
+ comparison_result = True
+ result_text = []
if baseline is None:
comparison_result = False
result_text.append("No baseline found for this flow")
else:
- for i in ["generator_results", "generator_cpu_stats",
"receiver_results", "receiver_cpu_stats"]:
+ for i in [
+ "generator_results",
+ "generator_cpu_stats",
+ "receiver_results",
+ "receiver_cpu_stats",
+ ]:
comparison, text = self._average_diff_comparison(
name="{} average".format(i),
target=getattr(result, i),
@@ -41,8 +40,7 @@ class BaselineFlowAverageEvaluator(BaseEvaluator):
)
result_text.append(text)
comparison_result = comparison_result and comparison
-
- recipe.add_result(comparison_result, "\n".join(result_text))
+ return comparison_result, result_text
def _average_diff_comparison(self, name, target, baseline):
difference = result_averages_difference(target, baseline)
--
2.21.0
pushed.
-Ondrej