From: Ondrej Lichtner <olichtne(a)redhat.com>
This package defines the class hierarchy for ResultMeasurementResults
evaluators, starting with the base class "BaseEvaluator". Adding a
couple of simple and obvious evaluators now though I expect more to be
added later.
NonzeroFlowEvaluator - simply checks if the flow measurement results are
zero and reports FAIL if they are or PASS if not.
BaselineFlowAverageEvaluator - compares the flow measurement result
against a different flow measurement result. Based on the required
initialization parameter defining the percentage of allowed
difference it reports a PASS if the difference is smaller or FAIL if
the difference is higher.
This is an incomplete class in the sense that it defines a
"get_baseline" method which should be overriden to find and return
the flow measurement result to compare against, the default
implementation simply returns None.
BaselineCPUAverageEvaluator - same as BaselineFlowAverageEvaluator but
for CPU measurement results.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../Perf/Evaluators/BaseEvaluator.py | 3 +
.../Evaluators/BaselineCPUAverageEvaluator.py | 36 ++++++++++++
.../BaselineFlowAverageEvaluator.py | 57 +++++++++++++++++++
.../Perf/Evaluators/NonzeroFlowEvaluator.py | 26 +++++++++
lnst/RecipeCommon/Perf/Evaluators/__init__.py | 4 ++
5 files changed, 126 insertions(+)
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/BaseEvaluator.py
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
create mode 100644 lnst/RecipeCommon/Perf/Evaluators/__init__.py
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaseEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaseEvaluator.py
new file mode 100644
index 0000000..be82841
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaseEvaluator.py
@@ -0,0 +1,3 @@
+class BaseEvaluator(object):
+ def evaluate_results(self, recipe, results):
+ raise NotImplementedError()
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
new file mode 100644
index 0000000..de0b83d
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -0,0 +1,36 @@
+from __future__ import division
+
+from .BaseEvaluator import BaseEvaluator
+
+from ..Measurements.BaseCPUMeasurement import (
+ CPUMeasurementResults,
+ AggregatedCPUMeasurementResults,
+)
+
+
+class BaselineCPUAverageEvaluator(BaseEvaluator):
+ def __init__(self, pass_difference):
+ self._pass_difference = pass_difference
+
+ def evaluate_results(self, recipe, results):
+ for result in results:
+ baseline = self.get_baseline(recipe, result)
+ self._compare_result_with_baseline(recipe, result, baseline)
+
+ def get_baseline(self, recipe, result):
+ return None
+
+ def _compare_result_with_baseline(self, recipe, result, baseline):
+ comparison_result = True
+ result_text = [
+ "CPU Baseline average evaluation".format(),
+ "Configured {}% difference as
acceptable".format(self._pass_difference),
+ ]
+ if baseline is None:
+ comparison_result = False
+ result_text.append("No baseline found for this CPU measurement")
+ else:
+ result_text.append("I don't know how to compare CPU averages
yet!!!")
+ comparison_result = False
+
+ recipe.add_result(comparison_result, "\n".join(result_text))
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
new file mode 100644
index 0000000..3f49ab0
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -0,0 +1,57 @@
+from __future__ import division
+
+from .BaseEvaluator import BaseEvaluator
+
+from ..Measurements.BaseFlowMeasurement import (
+ FlowMeasurementResults,
+ AggregatedFlowMeasurementResults,
+)
+
+
+class BaselineFlowAverageEvaluator(BaseEvaluator):
+ def __init__(self, pass_difference):
+ self._pass_difference = pass_difference
+
+ def evaluate_results(self, recipe, results):
+ for result in results:
+ baseline = self.get_baseline(recipe, result)
+ self._compare_result_with_baseline(recipe, result, baseline)
+
+ def get_baseline(self, recipe, result):
+ return None
+
+ def _compare_result_with_baseline(self, recipe, result, baseline):
+ comparison_result = True
+ result_text = [
+ "Flow {} Baseline average evaluation".format(result.flow),
+ "Configured {}% difference as
acceptable".format(self._pass_difference),
+ ]
+ if baseline is None:
+ comparison_result = False
+ result_text.append("No baseline found for this flow")
+ else:
+ generator_diff = _result_averages_difference(
+ result.generator_results,
+ baseline.generator_results)
+ result_text.append(
+ "Generator average is {:.2f}% different from the baseline
generator average"
+ .format(generator_diff))
+
+ receiver_diff = _result_averages_difference(
+ result.receiver_results,
+ baseline.receiver_results)
+ result_text.append(
+ "Receiver average is {:.2f}% different from the baseline
receiver average"
+ .format(receiver_diff))
+
+ if (
+ abs(generator_diff) > self._pass_difference
+ or abs(receiver_diff) > self._pass_difference
+ ):
+ comparison_result = False
+
+ recipe.add_result(comparison_result, "\n".join(result_text))
+
+
+def _result_averages_difference(a, b):
+ return 100 - ((a.average / b.average)*100)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
new file mode 100644
index 0000000..5ee9853
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
@@ -0,0 +1,26 @@
+from .BaseEvaluator import BaseEvaluator
+
+from ..Measurements.BaseFlowMeasurement import (
+ FlowMeasurementResults,
+ AggregatedFlowMeasurementResults,
+)
+
+
+class NonzeroFlowEvaluator(BaseEvaluator):
+ def evaluate_results(self, recipe, results):
+ for flow_results in results:
+ result = True
+ result_text = ["Flow {} Nonzero
evaluation".format(flow_results.flow)]
+ if flow_results.generator_results.average > 0:
+ result_text.append("Generator reported non-zero throughput")
+ else:
+ result = False
+ result_text.append("Generator reported zero throughput")
+
+ if flow_results.receiver_results.average > 0:
+ result_text.append("Receiver reported non-zero throughput")
+ else:
+ result = False
+ result_text.append("Receiver reported zero throughput")
+
+ recipe.add_result(result, "\n".join(result_text))
diff --git a/lnst/RecipeCommon/Perf/Evaluators/__init__.py
b/lnst/RecipeCommon/Perf/Evaluators/__init__.py
new file mode 100644
index 0000000..9186ba7
--- /dev/null
+++ b/lnst/RecipeCommon/Perf/Evaluators/__init__.py
@@ -0,0 +1,4 @@
+from .NonzeroFlowEvaluator import NonzeroFlowEvaluator
+from .BaselineFlowAverageEvaluator import BaselineFlowAverageEvaluator
+
+from .BaselineCPUAverageEvaluator import BaselineCPUAverageEvaluator
--
2.21.0