Wed, Nov 14, 2018 at 04:04:47PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
This is the second part of the refactorization of the PerfAndEvaluate recipe workflow. In generic terms it introduces a new package that will store a class hierarchy for various Measurement types and implementations.
At the base level there is the BaseMeasurement class and module that defines the interface that all the other classes have to implement. This interface is understood and relied upon by the lnst.RecipeCommon.Perf.Recipe class that uses it.
The refactorization includes a move+rename of the IperfMeasurementTool and TRexMeasurementTool into the new IperfFlowMeasurement and TRexMeasurement classes/modules. And the addition of the new StatCPUMeasurement class that uses the CPUStatMonitor test module to measure cpu utilization.
Finally these changes are added to the BaseEnrtRecipe so that everything stays working.
v2:
- fixed typo in IperfFlowMeasurement - the unit for cpu utilization returned by Iperf should be "cpu_percent".
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/RecipeCommon/IperfMeasurementTool.py | 83 ------- .../Perf/Measurements/BaseCPUMeasurement.py | 109 ++++++++++ .../Perf/Measurements/BaseFlowMeasurement.py | 202 ++++++++++++++++++ .../Perf/Measurements/BaseMeasurement.py | 29 +++ .../Perf/Measurements/IperfFlowMeasurement.py | 157 ++++++++++++++ .../Perf/Measurements/MeasurementError.py | 4 + .../Perf/Measurements/StatCPUMeasurement.py | 88 ++++++++ .../Measurements/TRexMeasurement.py} | 0 .../Perf/Measurements/__init__.py | 3 + lnst/Recipes/ENRT/BaseEnrtRecipe.py | 27 ++- 10 files changed, 614 insertions(+), 88 deletions(-) delete mode 100644 lnst/RecipeCommon/IperfMeasurementTool.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/MeasurementError.py create mode 100644 lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py rename lnst/RecipeCommon/{TRexMeasurementTool.py => Perf/Measurements/TRexMeasurement.py} (100%) create mode 100644 lnst/RecipeCommon/Perf/Measurements/__init__.py
diff --git a/lnst/RecipeCommon/IperfMeasurementTool.py b/lnst/RecipeCommon/IperfMeasurementTool.py deleted file mode 100644 index 9f2e49e..0000000 --- a/lnst/RecipeCommon/IperfMeasurementTool.py +++ /dev/null @@ -1,83 +0,0 @@ -import time -import signal -from lnst.Common.IpAddress import ipaddress -from lnst.Controller.Recipe import RecipeError -from lnst.Controller.RecipeResults import ResultLevel -from lnst.RecipeCommon.Perf import PerfConf, PerfMeasurementTool -from lnst.RecipeCommon.PerfResult import PerfInterval, StreamPerf -from lnst.RecipeCommon.PerfResult import MultiStreamPerf -from lnst.Tests.Iperf import IperfClient, IperfServer
-class IperfMeasurementTool(PerfMeasurementTool):
- @staticmethod
- def perf_measure(perf_conf):
_iperf_duration_overhead = 5
server_params = dict(bind = ipaddress(perf_conf.receiver_bind),
oneoff = True)
client_params = dict(server = server_params["bind"],
duration = perf_conf.duration,
parallel = perf_conf.streams)
if perf_conf.test_type == "tcp_stream":
#tcp stream is the default for iperf3
pass
elif perf_conf.test_type == "udp_stream":
client_params["udp"] = True
elif perf_conf.test_type == "sctp_stream":
client_params["sctp"] = True
else:
raise RecipeError("Unsupported test type '{}'"
.format(perf_conf.test_type))
server = IperfServer(**server_params)
client = IperfClient(**client_params)
server_host = perf_conf.receiver
client_host = perf_conf.generator
result = None
try:
server_job = server_host.run(server, bg=True,
job_level=ResultLevel.NORMAL)
#wait for server to start, TODO can this be improved?
time.sleep(2)
duration = client.params.duration + _iperf_duration_overhead
client_job = client_host.run(client, timeout=duration,
job_level=ResultLevel.NORMAL)
server_job.wait(timeout=5)
finally:
if client_job and not client_job.finished:
client_job.kill()
if server_job and not server_job.finished:
server_job.kill()
#TODO return something if not passed
if client_job.passed:
client_result = MultiStreamPerf()
for i in client_job.result["data"]["end"]["streams"]:
client_result.append(StreamPerf())
for interval in client_job.result["data"]["intervals"]:
for i, stream in enumerate(interval["streams"]):
client_result[i].append(PerfInterval(stream["bytes"] * 8,
stream["seconds"],
"bits"))
#TODO return something if not passed
if server_job.passed:
server_result = MultiStreamPerf()
for i in server_job.result["data"]["end"]["streams"]:
server_result.append(StreamPerf())
for interval in server_job.result["data"]["intervals"]:
for i, stream in enumerate(interval["streams"]):
server_result[i].append(PerfInterval(stream["bytes"] * 8,
stream["seconds"],
"bits"))
return client_result, server_result
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py new file mode 100644 index 0000000..2507f3c --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py @@ -0,0 +1,109 @@ +import signal +from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError +from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import BaseMeasurement +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
+class CPUMeasurementResults(object):
- def __init__(self, host, cpu):
self._host = host
self._cpu = cpu
- @property
- def host(self):
return self._host
- @property
- def cpu(self):
return self._cpu
- @property
- def utilization(self):
raise NotImplementedError()
+class AggregatedCPUMeasurementResults(CPUMeasurementResults):
- def __init__(self, host, cpu):
super(AggregatedCPUMeasurementResults, self).__init__(host, cpu)
self._individual_results = []
- @property
- def individual_results(self):
return self._individual_results
- @property
- def utilization(self):
return SequentialPerfResult([i.utilization
for i in self.individual_results])
- def add_results(self, results):
if results is None:
return
elif isinstance(results, AggregatedCPUMeasurementResults):
self.individual_results.extend(results.individual_results)
elif isinstance(results, CPUMeasurementResults):
self.individual_results.append(results)
else:
raise MeasurementError("Adding incorrect results.")
+class BaseCPUMeasurement(BaseMeasurement):
- @classmethod
- def aggregate_results(cls, old, new):
aggregated = []
if old is None:
old = [None] * len(new)
for old_measurements, new_measurements in zip(old, new):
aggregated.append(cls._aggregate_hostcpu_results(
old_measurements, new_measurements))
return aggregated
- @classmethod
- def report_results(cls, recipe, results):
results_by_host = cls._divide_results_by_host(results)
for host_results in results_by_host.values():
cls._report_host_results(recipe, host_results)
- @classmethod
- def evaluate_results(cls, recipe, results):
#TODO split off into a separate evaluator class
for result in results:
recipe.add_result(True,
"Base CPU evaluation for host {}, cpu {}".format(
result.host.hostid, result.cpu))
- @classmethod
- def _divide_results_by_host(cls, results):
results_by_host = {}
for result in results:
if result.host not in results_by_host:
results_by_host[result.host] = []
results_by_host[result.host].append(result)
return results_by_host
- @classmethod
- def _report_host_results(cls, recipe, results):
if not len(results):
return
cpu_data = {}
desc = ["CPU Utilization on host {host}:".format(
host=results[0].host.hostid)]
for result in results:
utilization = result.utilization
cpu_data[result.cpu] = utilization
desc.append("cpu '{cpu}': {average:.2f} +-{deviation:.2f} {unit} per second"
.format(cpu=result.cpu,
average=utilization.average,
deviation=utilization.std_deviation,
unit=utilization.unit))
recipe.add_result(True, "\n".join(desc), data=cpu_data)
- @classmethod
- def _aggregate_hostcpu_results(cls, old, new):
if (old is not None and
(old.host is not new.host or old.cpu != new.cpu)):
raise MeasurementError("Aggregating incompatible CPU Results")
new_result = AggregatedCPUMeasurementResults(new.host, new.cpu)
new_result.add_results(old)
new_result.add_results(new)
return new_result
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py new file mode 100644 index 0000000..203e104 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -0,0 +1,202 @@ +import signal +from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError +from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import BaseMeasurement +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
+class Flow(object):
- def __init__(self,
type,
generator, generator_bind,
receiver, receiver_bind,
msg_size, duration, parallel_streams):
self._type = type
self._generator = generator
self._generator_bind = generator_bind
self._receiver = receiver
self._receiver_bind = receiver_bind
self._msg_size = msg_size
self._duration = duration
self._parallel_streams = parallel_streams
- @property
- def type(self):
return self._type
- @property
- def generator(self):
return self._generator
- @property
- def generator_bind(self):
return self._generator_bind
- @property
- def receiver(self):
return self._receiver
- @property
- def receiver_bind(self):
return self._receiver_bind
- @property
- def msg_size(self):
return self._msg_size
- @property
- def duration(self):
return self._duration
- @property
- def parallel_streams(self):
return self._parallel_streams
+class FlowMeasurementResults(object):
- def __init__(self, flow):
self._flow = flow
self._generator_results = None
self._generator_cpu_stats = None
self._receiver_results = None
self._receiver_cpu_stats = None
- @property
- def flow(self):
return self._flow
- @property
- def generator_results(self):
return self._generator_results
- @generator_results.setter
- def generator_results(self, value):
self._generator_results = value
- @property
- def generator_cpu_stats(self):
return self._generator_cpu_stats
- @generator_cpu_stats.setter
- def generator_cpu_stats(self, value):
self._generator_cpu_stats = value
- @property
- def receiver_results(self):
return self._receiver_results
- @receiver_results.setter
- def receiver_results(self, value):
self._receiver_results = value
- @property
- def receiver_cpu_stats(self):
return self._receiver_cpu_stats
- @receiver_cpu_stats.setter
- def receiver_cpu_stats(self, value):
self._receiver_cpu_stats = value
+class AggregatedFlowMeasurementResults(FlowMeasurementResults):
- def __init__(self, flow):
self._flow = flow
self._generator_results = SequentialPerfResult()
self._generator_cpu_stats = SequentialPerfResult()
self._receiver_results = SequentialPerfResult()
self._receiver_cpu_stats = SequentialPerfResult()
self._individual_results = []
- @property
- def individual_results(self):
return self._individual_results
- def add_results(self, results):
if results is None:
return
elif isinstance(results, AggregatedFlowMeasurementResults):
self.individual_results.extend(results.individual_results)
self.generator_results.extend(results.generator_results)
self.generator_cpu_stats.extend(results.generator_cpu_stats)
self.receiver_results.extend(results.receiver_results)
self.receiver_cpu_stats.extend(results.receiver_cpu_stats)
elif isinstance(results, FlowMeasurementResults):
self.individual_results.append(results)
^^^^^^^ Should not this be append(results.individual_results) ?
self.generator_results.append(results.generator_results)
self.generator_cpu_stats.append(results.generator_cpu_stats)
self.receiver_results.append(results.receiver_results)
self.receiver_cpu_stats.append(results.receiver_cpu_stats)
else:
raise MeasurementError("Adding incorrect results.")
+class BaseFlowMeasurement(BaseMeasurement):
- @classmethod
- def report_results(cls, recipe, results):
for flow_results in results:
cls._report_flow_results(recipe, flow_results)
- @classmethod
- def evaluate_results(cls, recipe, results):
#TODO split off into a separate evaluator class
for flow_results in results:
if flow_results.generator_results.average > 0:
recipe.add_result(True, "Generator reported non-zero throughput")
else:
recipe.add_result(False, "Generator reported zero throughput")
if flow_results.receiver_results.average > 0:
recipe.add_result(True, "Receiver reported non-zero throughput")
else:
recipe.add_result(False, "Receiver reported zero throughput")
- @classmethod
- def _report_flow_results(cls, recipe, flow_results):
generator = flow_results.generator_results
generator_cpu = flow_results.generator_cpu_stats
receiver = flow_results.receiver_results
receiver_cpu = flow_results.receiver_cpu_stats
desc = []
desc.append("Generator measured throughput: {tput:.2f} +-{deviation:.2f}({percentage:.2f}%) {unit} per second."
.format(tput=generator.average,
deviation=generator.std_deviation,
percentage=(generator.std_deviation/generator.average) * 100,
unit=generator.unit))
desc.append("Generator process CPU data: {cpu:.2f} +-{cpu_deviation:.2f} {cpu_unit} per second."
.format(cpu=generator_cpu.average,
cpu_deviation=generator_cpu.std_deviation,
cpu_unit=generator_cpu.unit))
desc.append("Receiver measured throughput: {tput:.2f} +-{deviation:.2f}({percentage:.2}%) {unit} per second."
.format(tput=receiver.average,
deviation=receiver.std_deviation,
percentage=(receiver.std_deviation/receiver.average) * 100,
unit=receiver.unit))
desc.append("Receiver process CPU data: {cpu:.2f} +-{cpu_deviation:.2f} {cpu_unit} per second."
.format(cpu=receiver_cpu.average,
cpu_deviation=receiver_cpu.std_deviation,
cpu_unit=receiver_cpu.unit))
#TODO add flow description
recipe.add_result(True, "\n".join(desc), data = dict(
generator_flow_data=generator,
generator_cpu_data=generator_cpu,
receiver_flow_data=receiver,
receiver_cpu_data=receiver_cpu))
- @classmethod
- def aggregate_results(cls, old, new):
aggregated = []
if old is None:
old = [None] * len(new)
for old_flow, new_flow in zip(old, new):
aggregated.append(cls._aggregate_flows(old_flow, new_flow))
return aggregated
- @classmethod
- def _aggregate_flows(cls, old_flow, new_flow):
if old_flow is not None and old_flow.flow is not new_flow.flow:
raise MeasurementError("Aggregating incompatible Flows")
new_result = AggregatedFlowMeasurementResults(new_flow.flow)
new_result.add_results(old_flow)
new_result.add_results(new_flow)
return new_result
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py new file mode 100644 index 0000000..8059308 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py @@ -0,0 +1,29 @@ +class BaseMeasurement(object):
- def __init__(self, conf):
self._conf = conf
- @property
- def conf(self):
return self._conf
- def start(self):
raise NotImplementedError()
- def finish(self):
raise NotImplementedError()
- def collect_results(self):
raise NotImplementedError()
- @classmethod
- def report_results(recipe, results):
raise NotImplementedError()
- @classmethod
- def evaluate_results(recipe, results):
#TODO split off into separate evaluator classes
raise NotImplementedError()
- @classmethod
- def aggregate_results(first, second):
raise NotImplementedError()
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py new file mode 100644 index 0000000..c792e9d --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -0,0 +1,157 @@ +import time
+from lnst.Common.IpAddress import ipaddress
+from lnst.Controller.Recipe import RecipeError +from lnst.Controller.RecipeResults import ResultLevel
+from lnst.RecipeCommon.Perf.Results import PerfInterval +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult +from lnst.RecipeCommon.Perf.Results import ParallelPerfResult +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults
+from lnst.Tests.Iperf import IperfClient, IperfServer
+class IperfFlowMeasurement(BaseFlowMeasurement):
- def __init__(self, *args):
super(IperfFlowMeasurement, self).__init__(*args)
self._running_measurements = []
self._finished_measurements = []
- def start(self):
if len(self._running_measurements) > 0:
raise MeasurementError("Measurement already running!")
test_flows = self._prepare_test_flows(self._conf)
result = None
for flow in test_flows:
flow.server_job.start(bg=True)
for flow in test_flows:
flow.client_job.start(bg=True)
self._running_measurements = test_flows
- def finish(self):
test_flows = self._running_measurements
try:
for flow in test_flows:
client_iperf = flow.client_job.what
flow.client_job.wait(timeout=client_iperf.runtime_estimate())
flow.server_job.wait(timeout=5)
finally:
for flow in test_flows:
if not flow.server_job.finished:
flow.server_job.kill()
if not flow.client_job.finished:
flow.client_job.kill()
Just wondering if the kill() method could handle the .finished check automatically. In case the job is finished don't do anything.
self._running_measurements = []
self._finished_measurements = test_flows
- def collect_results(self):
test_flows = self._finished_measurements
results = []
for test_flow in test_flows:
flow_results = FlowMeasurementResults(test_flow.flow)
flow_results.generator_results = self._parse_job_streams(
test_flow.client_job)
flow_results.generator_cpu_stats = self._parse_job_cpu(
test_flow.client_job)
flow_results.receiver_results = self._parse_job_streams(
test_flow.server_job)
flow_results.receiver_cpu_stats = self._parse_job_cpu(
test_flow.server_job)
results.append(flow_results)
return results
- def _prepare_test_flows(self, flows):
test_flows = []
for flow in flows:
server_job = self._prepare_server(flow)
client_job = self._prepare_client(flow)
test_flow = NetworkFlowTest(flow, server_job, client_job)
test_flows.append(test_flow)
return test_flows
- def _prepare_server(self, flow):
host = flow.receiver
server_params = dict(bind = ipaddress(flow.receiver_bind),
oneoff = True)
return host.prepare_job(IperfServer(**server_params),
job_level=ResultLevel.NORMAL)
- def _prepare_client(self, flow):
host = flow.generator
client_params = dict(server = ipaddress(flow.receiver_bind),
duration = flow.duration)
if flow.type == "tcp_stream":
#tcp stream is the default for iperf3
pass
elif flow.type == "udp_stream":
client_params["udp"] = True
elif flow.type == "sctp_stream":
client_params["sctp"] = True
else:
raise RecipeError("Unsupported flow type '{}'".format(flow.type))
if flow.parallel_streams > 1:
client_params["parallel"] = flow.parallel_streams
if flow.msg_size:
client_params["blksize"] = flow.msg_size
return host.prepare_job(IperfClient(**client_params),
job_level=ResultLevel.NORMAL)
- def _parse_job_streams(self, job):
result = ParallelPerfResult()
if not job.passed:
result.append(PerfInterval(0, 0, "bits"))
else:
for i in job.result["data"]["end"]["streams"]:
result.append(SequentialPerfResult())
for interval in job.result["data"]["intervals"]:
for i, stream in enumerate(interval["streams"]):
result[i].append(PerfInterval(stream["bytes"] * 8,
stream["seconds"],
"bits"))
return result
- def _parse_job_cpu(self, job):
if not job.passed:
return PerfInterval(0, 0, "cpu_percent")
else:
cpu_percent = job.result["data"]["end"]["cpu_utilization_percent"]["host_total"]
return PerfInterval(cpu_percent, 1, "cpu_percent")
+class NetworkFlowTest(object):
- def __init__(self, flow, server_job, client_job):
self._flow = flow
self._server_job = server_job
self._client_job = client_job
- @property
- def flow(self):
return self._flow
- @property
- def server_job(self):
return self._server_job
- @property
- def client_job(self):
return self._client_job
- @property
- def duration(self):
return self._flow.duration
diff --git a/lnst/RecipeCommon/Perf/Measurements/MeasurementError.py b/lnst/RecipeCommon/Perf/Measurements/MeasurementError.py new file mode 100644 index 0000000..66ed168 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/MeasurementError.py @@ -0,0 +1,4 @@ +from lnst.Common.LnstError import LnstError
+class MeasurementError(LnstError):
- pass
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py new file mode 100644 index 0000000..14e7f73 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -0,0 +1,88 @@ +import signal
+from lnst.RecipeCommon.Perf.Results import PerfInterval +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult +from lnst.RecipeCommon.Perf.Results import ParallelPerfResult +from lnst.RecipeCommon.Perf.Measurements.BaseCPUMeasurement import BaseCPUMeasurement +from lnst.RecipeCommon.Perf.Measurements.BaseCPUMeasurement import CPUMeasurementResults
+from lnst.Tests.CPUStatMonitor import CPUStatMonitor
+class StatCPUMeasurementResults(CPUMeasurementResults):
- def __init__(self, *args):
super(StatCPUMeasurementResults, self).__init__(*args)
self._data = {}
- def update_intervals(self, intervals):
for key, interval in intervals.items():
if key not in self._data:
self._data[key] = SequentialPerfResult()
self._data[key].append(interval)
- @property
- def utilization(self):
return ParallelPerfResult([self._data["user"], self._data["nice"],
self._data["system"], self._data["irq"], self._data["softirq"],
self._data["steal"]])
+class StatCPUMeasurement(BaseCPUMeasurement):
- def __init__(self, *args):
super(StatCPUMeasurement, self).__init__(*args)
self._running_measurements = []
self._finished_measurements = []
- def start(self):
jobs = []
for host in self._conf:
jobs.append(host.run(CPUStatMonitor(interval=1000),bg=True))
self._running_measurements = jobs
- def finish(self):
jobs = self._running_measurements
try:
for job in jobs:
job.kill(signal.SIGINT)
job.wait()
finally:
for job in jobs:
if not job.finished:
job.kill()
Same for the job.finished() + job.kill(). Or does this save an RPC call?
self._running_measurements = []
self._finished_measurements = jobs
- def collect_results(self):
results = []
for job in self._finished_measurements:
job_results = self._process_job(job)
results.extend(job_results)
return results
- def _process_job(self, job):
host = job.host
job_results = {}
for sample in job.result["data"]:
parsed_sample = self._parse_sample(sample)
for cpu, cpu_intervals in parsed_sample.items():
if cpu not in job_results:
job_results[cpu] = StatCPUMeasurementResults(host, cpu)
cpu_results = job_results[cpu]
cpu_results.update_intervals(cpu_intervals)
return job_results.values()
- def _parse_sample(self, sample):
result = {}
duration = sample["duration"]
for key, value in sample.items():
if key.startswith("cpu"):
result[key] = self._create_cpu_intervals(duration, value)
return result
- def _create_cpu_intervals(self, duration, cpu_intervals):
result = {}
for key, value in cpu_intervals.items():
result[key] = PerfInterval(value, duration, "time units")
return result
diff --git a/lnst/RecipeCommon/TRexMeasurementTool.py b/lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py similarity index 100% rename from lnst/RecipeCommon/TRexMeasurementTool.py rename to lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py diff --git a/lnst/RecipeCommon/Perf/Measurements/__init__.py b/lnst/RecipeCommon/Perf/Measurements/__init__.py new file mode 100644 index 0000000..781e641 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/__init__.py @@ -0,0 +1,3 @@ +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import Flow +from lnst.RecipeCommon.Perf.Measurements.IperfFlowMeasurement import IperfFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements.StatCPUMeasurement import StatCPUMeasurement diff --git a/lnst/Recipes/ENRT/BaseEnrtRecipe.py b/lnst/Recipes/ENRT/BaseEnrtRecipe.py index a26d999..d7d1aec 100644 --- a/lnst/Recipes/ENRT/BaseEnrtRecipe.py +++ b/lnst/Recipes/ENRT/BaseEnrtRecipe.py @@ -1,4 +1,3 @@
from lnst.Common.LnstError import LnstError from lnst.Common.Parameters import Param, IntParam, StrParam, BoolParam from lnst.Common.IpAddress import AF_INET, AF_INET6 @@ -8,7 +7,9 @@ from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.Ping import PingTestAndEvaluate, PingConf from lnst.RecipeCommon.Perf.Recipe import Recipe as PerfRecipe from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf -from lnst.RecipeCommon.IperfMeasurementTool import IperfMeasurementTool +from lnst.RecipeCommon.Perf.Measurements import Flow as PerfFlow +from lnst.RecipeCommon.Perf.Measurements import IperfFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements import StatCPUMeasurement
class EnrtConfiguration(object): def __init__(self): @@ -79,14 +80,16 @@ class BaseEnrtRecipe(PingTestAndEvaluate, PerfRecipe):
perf_duration = IntParam(default=60) perf_iterations = IntParam(default=5)
- perf_streams = IntParam(default=1)
perf_parallel_streams = IntParam(default=1) perf_msg_size = IntParam(default=123)
perf_usr_comment = StrParam(default="")
perf_max_deviation = IntParam(default=10) #TODO required?
- perf_tool = Param(default=IperfMeasurementTool)
net_perf_tool = Param(default=IperfFlowMeasurement)
cpu_perf_tool = Param(default=StatCPUMeasurement)
def test(self): main_config = self.test_wide_configuration()
@@ -188,8 +191,22 @@ class BaseEnrtRecipe(PingTestAndEvaluate, PerfRecipe): server_bind = server_nic.ips_filter(family=family)[0]
for perf_test in self.params.perf_tests:
flow = PerfFlow(
type = perf_test,
generator = client_netns,
generator_bind = client_bind,
receiver = server_netns,
receiver_bind = server_bind,
msg_size = self.params.perf_msg_size,
duration = self.params.perf_duration,
parallel_streams = self.params.perf_parallel_streams)
flow_measurement = self.params.net_perf_tool([flow]) yield PerfRecipeConf(
measurements=[ ],
measurements=[
self.params.cpu_perf_tool([client_netns, server_netns]),
flow_measurement
], iterations=self.params.perf_iterations)
def _pin_dev_interrupts(self, dev, cpu):
-- 2.19.1 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://getfedora.org/code-of-conduct.html List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...