On Thu, Nov 15, 2018 at 10:23:42AM +0100, Jan Tluka wrote:
Wed, Nov 14, 2018 at 04:04:46PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Refactoring the Perf and PerfResult modules into a separate package lnst.RecipeCommon.Perf that will host everything related to the Perf recipe template.
I'm also considering later moving this into the lnst.Recipes package where it might make more sense as an actual recipe, with an example test method that will show off the basic usage of the template.
Changes summary:
- moved lnst/RecipeCommon/Perf.py to lnst/RecipeCommon/Perf/Recipe.py
- renamed PerfTestAndEvaluate class to just Recipe since the "Perf" part
is obvious from the namespace
- PerfConf class renamed to RecipeConf
- RecipeConf only contains configuration for the Recipe - the
list of measurements to do and the number of repeats for these
- PerfMeasurementTool removed, this will be replaced by the Measurements
class hierarchy added in the following commit
- added RecipeResults class to store aggregated measurement results
associated with the current Recipe configuration
- moved lnst/RecipeCommon/PerfResults.py to lnst.RecipeCommon/Perf/Results.py
- removed StreamPerf, MultiStreamPerf, MultiRunPerf and replaced them
with SequentialPerfResult and ParallelPerfResult to improve code reuse
- added the PerfResult base class
- set PerfInterval string formatting precision to 2 decimals
- improved code reuse for item validation in PerfList class
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/RecipeCommon/Perf.py | 120 ------------------ lnst/RecipeCommon/Perf/Recipe.py | 73 +++++++++++ .../{PerfResult.py => Perf/Results.py} | 65 ++++------ lnst/RecipeCommon/Perf/__init__.py | 0 lnst/Recipes/ENRT/BaseEnrtRecipe.py | 20 +-- 5 files changed, 106 insertions(+), 172 deletions(-) delete mode 100644 lnst/RecipeCommon/Perf.py create mode 100644 lnst/RecipeCommon/Perf/Recipe.py rename lnst/RecipeCommon/{PerfResult.py => Perf/Results.py} (72%) create mode 100644 lnst/RecipeCommon/Perf/__init__.py
diff --git a/lnst/RecipeCommon/Perf.py b/lnst/RecipeCommon/Perf.py deleted file mode 100644 index 97aa0f1..0000000 --- a/lnst/RecipeCommon/Perf.py +++ /dev/null @@ -1,120 +0,0 @@ -from lnst.Controller.Recipe import BaseRecipe -from lnst.RecipeCommon.PerfResult import MultiRunPerf
-class PerfConf(object):
- def __init__(self,
perf_tool,
test_type,
generator, generator_bind,
receiver, receiver_bind,
msg_size, duration, iterations, streams):
self._perf_tool = perf_tool
self._test_type = test_type
self._generator = generator
self._generator_bind = generator_bind
self._receiver = receiver
self._receiver_bind = receiver_bind
self._msg_size = msg_size
self._duration = duration
self._iterations = iterations
self._streams = streams
- @property
- def perf_tool(self):
return self._perf_tool
- @property
- def generator(self):
return self._generator
- @property
- def generator_bind(self):
return self._generator_bind
- @property
- def receiver(self):
return self._receiver
- @property
- def receiver_bind(self):
return self._receiver_bind
- @property
- def test_type(self):
return self._test_type
- @property
- def msg_size(self):
return self._msg_size
- @property
- def duration(self):
return self._duration
- @property
- def iterations(self):
return self._iterations
- @property
- def streams(self):
return self._streams
-class PerfMeasurementTool(object):
- @staticmethod
- def perf_measure(perf_conf):
raise NotImplementedError
-class PerfTestAndEvaluate(BaseRecipe):
- def perf_test(self, perf_conf):
generator_measurements = MultiRunPerf()
receiver_measurements = MultiRunPerf()
for i in range(perf_conf.iterations):
tx, rx = perf_conf.perf_tool.perf_measure(perf_conf)
if tx:
generator_measurements.append(tx)
if rx:
receiver_measurements.append(rx)
return generator_measurements, receiver_measurements
- def perf_evaluate_and_report(self, perf_conf, results, baseline):
self.perf_evaluate(perf_conf, results, baseline)
self.perf_report(perf_conf, results, baseline)
- def perf_evaluate(self, perf_conf, results, baseline):
generator, receiver = results
if generator.average > 0:
self.add_result(True, "Generator reported non-zero throughput")
else:
self.add_result(False, "Generator reported zero throughput")
if receiver.average > 0:
self.add_result(True, "Receiver reported non-zero throughput")
else:
self.add_result(False, "Receiver reported zero throughput")
- def perf_report(self, perf_conf, results, baseline):
generator, receiver = results
self.add_result(
True,
"Generator measured throughput: {tput} +-{deviation}({percentage:.2}%) {unit} per second"
.format(tput=generator.average,
deviation=generator.std_deviation,
percentage=(generator.std_deviation/generator.average) * 100,
unit=generator.unit),
data = generator)
self.add_result(
True,
"Receiver measured throughput: {tput} +-{deviation}({percentage:.2}%) {unit} per second"
.format(tput=receiver.average,
deviation=receiver.std_deviation,
percentage=(receiver.std_deviation/receiver.average) * 100,
unit=receiver.unit),
data = receiver)
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py new file mode 100644 index 0000000..e305310 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -0,0 +1,73 @@ +from lnst.Controller.Recipe import BaseRecipe +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult +from lnst.RecipeCommon.Perf.Results import ParallelPerfResult
+class RecipeConf(object):
- def __init__(self, measurements, iterations):
self._measurements = measurements
self._iterations = iterations
- @property
- def measurements(self):
return self._measurements
- @property
- def iterations(self):
return self._iterations
+class RecipeResults(object):
- def __init__(self, perf_conf):
self._perf_conf = perf_conf
self._results = {}
- @property
- def perf_conf(self):
return self._perf_conf
- @property
- def results(self):
return self._results
- def add_measurement_results(self, measurement, new_results):
aggregated_results = self._results.get(measurement, None)
aggregated_results = measurement.aggregate_results(
aggregated_results, new_results)
self._results[measurement] = aggregated_results
+class Recipe(BaseRecipe):
- def perf_test(self, recipe_conf):
results = RecipeResults(recipe_conf)
for i in range(recipe_conf.iterations):
run_results = []
for measurement in recipe_conf.measurements:
measurement.start()
for measurement in reversed(recipe_conf.measurements):
I don't understand why it needs to be reversed here.
If I start the measurements as: m1 m2 m3 m4 I'd expect to finish them in the same order.
Please explain.
Start and finish isn't perfectly synchronized, specifying an order (m1 m2 m3) makes the tester (at least in my case) feel that they'll start at the same time or at worst in the specified order, in that case I'd expect that m1 starts first and will include measurement for the full duration of m2. And that m2 will include measurement for the full duration of m3 and so on. To do that you need to start them in the specified order and then finish them in the reverse order.
Does that answer the question?
-Ondrej
measurement.finish()
for measurement in recipe_conf.measurements:
measurement_results = measurement.collect_results()
results.add_measurement_results(
measurement, measurement_results)
return results