From: Ondrej Lichtner olichtne@redhat.com
First of all this includes the reimplementation of the TRexMeasurement module and class into the TRexFlowMeasurement class that implements the BaseFlowMeasurement API and can be easily plugged into Perf.Recipe as a measurement. That said, it does have some specific restrictions specific to TRex: * it still requires the trex_dir parameter telling it where to look for the TRex application. * the measurement is port based but the configuration is flow based. Each port currently supports generation of a single flow so that's what is expected on the configuration part. However results are reported per port (with association to the generated flow). It's important to note that while the "tx_rate" statistics represent the generated flow, the "rx_rate" statistics only talk about received packets regardles of which flow they belong to.
The OvSDPDKPvPRecipe class was updated to work with the redesigned implementation of the PerfRecipe base class and it's methods for measurements and reporting results.
The OvSDPDKPvPRecipe now also requests a StatCPUMeasurement measurement for all the hosts involved in the test.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Perf/Measurements/TRexFlowMeasurement.py | 151 ++++++++++++++++++ .../Perf/Measurements/TRexMeasurement.py | 87 ---------- .../Perf/Measurements/__init__.py | 1 + lnst/Recipes/ENRT/OvS_DPDK_PvP.py | 46 ++++-- 4 files changed, 185 insertions(+), 100 deletions(-) create mode 100644 lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py delete mode 100644 lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py
diff --git a/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py new file mode 100644 index 0000000..a873577 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py @@ -0,0 +1,151 @@ +import time +import signal +from lnst.Controller.Recipe import RecipeError +from lnst.Controller.RecipeResults import ResultLevel + +from lnst.RecipeCommon.Perf.Results import PerfInterval +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult +from lnst.RecipeCommon.Perf.Results import ParallelPerfResult + +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults + +from lnst.Tests.TRex import TRexServer, TRexClient + +class TRexFlowMeasurement(BaseFlowMeasurement): + def __init__(self, flows, trex_dir): + self._flows = flows + self._trex_dir = trex_dir + self._running_measurements = [] + self._finished_measurements = [] + + def start(self): + if len(self._running_measurements) > 0: + raise MeasurementError("Measurement already running!") + + tests = self._prepare_tests(self._flows) + + result = None + for test in tests: + test.server_job.start(bg=True) + + for test in tests: + test.client_job.start(bg=True) + + self._running_measurements = tests + + def finish(self): + tests = self._running_measurements + try: + for test in tests: + client_test = test.client_job.what + test.client_job.wait(timeout=client_test.runtime_estimate()) + + test.server_job.kill(signal.SIGINT) + test.server_job.wait(5) + finally: + for test in tests: + if not test.server_job.finished: + test.server_job.kill() + if not test.client_job.finished: + test.client_job.kill() + + self._running_measurements = [] + self._finished_measurements = tests + + def _prepare_tests(self, flows): + tests = [] + + flows_by_generator = self._flows_by_generator(flows) + for generator, flows in flows_by_generator.items(): + flow_tuples = [(flow.generator_bind, flow.receiver_bind) + for flow in flows] + server_job = generator.prepare_job( + TRexServer( + trex_dir=self._trex_dir, + flows=flow_tuples, + cores=["2", "3", "4"])) + client_job = generator.prepare_job( + TRexClient( + trex_dir=self._trex_dir, + ports=range(len(flow_tuples)), + flows=flow_tuples, + duration=flows[0].duration, + msg_size=flows[0].msg_size)) + + test = NetworkFlowTest(flows, server_job, client_job) + tests.append(test) + return tests + + def collect_results(self): + tests = self._finished_measurements + + results = [] + for test in tests: + for port, flow in enumerate(test.flow): + flow_results = self._parse_results_by_port( + test.client_job, port, flow) + results.append(flow_results) + + return results + + def _flows_by_generator(self, flows): + result = dict() + for flow in flows: + if flow.generator in result: + result[flow.generator].append(flow) + else: + result[flow.generator] = [flow] + + for generator, flows in result.items(): + for flow in flows: + if (flow.duration != flows[0].duration or + flow.msg_size != flows[0].msg_size): + raise MeasurementError("Flows on the same generator need to have the same duration and msg_size at the moment") + return result + + def _parse_results_by_port(self, job, port, flow): + results = FlowMeasurementResults(flow) + results.generator_results = SequentialPerfResult() + results.generator_cpu_stats = SequentialPerfResult() + + results.receiver_results = SequentialPerfResult() + results.receiver_cpu_stats = SequentialPerfResult() + + if not job.passed: + results.generator_results.append(PerfInterval(0, 0, "packets")) + results.generator_cpu.append(PerfInterval(0, 0, "cpu_percent")) + results.receiver_results.append(PerfInterval(0, 0, "packets")) + results.receiver_cpu.append(PerfInterval(0, 0, "cpu_percent")) + else: + prev_time = job.result["start_time"] + prev_tx_val = 0 + prev_rx_val = 0 + for i in job.result["data"]: + time_delta = i["timestamp"] - prev_time + tx_delta = i["measurement"][port]["opackets"] - prev_tx_val + rx_delta = i["measurement"][port]["ipackets"] - prev_rx_val + results.generator_results.append(PerfInterval( + tx_delta, + time_delta, + "pkts")) + results.receiver_results.append(PerfInterval( + rx_delta, + time_delta, + "pkts")) + + prev_time = i["timestamp"] + prev_tx_val = i["measurement"][port]["opackets"] + prev_rx_val = i["measurement"][port]["ipackets"] + + cpu_delta = i["measurement"]["global"]["cpu_util"] + results.generator_cpu_stats.append(PerfInterval( + cpu_delta, + time_delta, + "cpu_percent")) + results.receiver_cpu_stats.append(PerfInterval( + cpu_delta, + time_delta, + "cpu_percent")) + return results diff --git a/lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py deleted file mode 100644 index 96abdc2..0000000 --- a/lnst/RecipeCommon/Perf/Measurements/TRexMeasurement.py +++ /dev/null @@ -1,87 +0,0 @@ -import time -import signal -import logging -from lnst.Common.IpAddress import ipaddress -from lnst.Controller.Recipe import RecipeError -from lnst.Controller.RecipeResults import ResultLevel -from lnst.RecipeCommon.Perf import PerfConf, PerfMeasurementTool -from lnst.RecipeCommon.PerfResult import PerfInterval, StreamPerf -from lnst.RecipeCommon.PerfResult import MultiStreamPerf - -from lnst.Tests.TRex import TRexServer, TRexClient - -class TRexMeasurementTool(PerfMeasurementTool): - def __init__(self, trex_dir): - self._trex_dir = trex_dir - - def perf_measure(self, perf_conf): - generator = perf_conf.generator - - flows = [] - for src, dst in zip(perf_conf.generator_bind, perf_conf.receiver_bind): - flows.append(( - dict(mac_addr=src.hwaddr, - pci_addr=src.bus_info, - ip_addr=src.ips[0]), - dict(mac_addr=dst.hwaddr, - pci_addr=dst.bus_info, - ip_addr=dst.ips[0]))) - - try: - server = generator.run( - TRexServer( - trex_dir=self._trex_dir, - flows=flows, - cores=["2", "3", "4"]), - bg=True) - - #wait for server to start up - #TODO better options?? - time.sleep(5) - - test = TRexClient( - trex_dir=self._trex_dir, - ports=range(len(flows)), - flows=flows, - duration=perf_conf.duration, - msg_size=perf_conf.msg_size) - client = generator.run( - test, - timeout=test.runtime_estimate()) - finally: - server.kill(signal.SIGINT) - if not server.wait(5): - server.kill(signal.SIGKILL) - - client_result = None - if client.passed: - tx_result = MultiStreamPerf() - rx_result = MultiStreamPerf() - for port in range(len(flows)): - tx_stream = StreamPerf() - rx_stream = StreamPerf() - - prev_time = client.result["start_time"] - prev_tx_val = 0 - prev_rx_val = 0 - for i in client.result["data"]: - time_delta = i["timestamp"] - prev_time - tx_delta = i["measurement"][port]["opackets"] - prev_tx_val - rx_delta = i["measurement"][port]["ipackets"] - prev_rx_val - tx_stream.append(PerfInterval( - tx_delta, - time_delta, - "pkts")) - rx_stream.append(PerfInterval( - rx_delta, - time_delta, - "pkts")) - - prev_time = i["timestamp"] - prev_tx_val = i["measurement"][port]["opackets"] - prev_rx_val = i["measurement"][port]["ipackets"] - - tx_result.append(tx_stream) - rx_result.append(rx_stream) - - return tx_result, rx_result diff --git a/lnst/RecipeCommon/Perf/Measurements/__init__.py b/lnst/RecipeCommon/Perf/Measurements/__init__.py index 781e641..0b98cd6 100644 --- a/lnst/RecipeCommon/Perf/Measurements/__init__.py +++ b/lnst/RecipeCommon/Perf/Measurements/__init__.py @@ -1,3 +1,4 @@ from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import Flow from lnst.RecipeCommon.Perf.Measurements.IperfFlowMeasurement import IperfFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements.TRexFlowMeasurement import TRexFlowMeasurement from lnst.RecipeCommon.Perf.Measurements.StatCPUMeasurement import StatCPUMeasurement diff --git a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py index 860f060..aa8af24 100644 --- a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py +++ b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py @@ -10,8 +10,12 @@ from lnst.Common.IpAddress import ipaddress from lnst.RecipeCommon.Ping import PingTestAndEvaluate, PingConf from lnst.Tests import Ping from lnst.Tests.TestPMD import TestPMD -from lnst.RecipeCommon.Perf import PerfTestAndEvaluate, PerfConf -from lnst.RecipeCommon.TRexMeasurementTool import TRexMeasurementTool + +from lnst.RecipeCommon.Perf.Recipe import Recipe as PerfRecipe +from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf +from lnst.RecipeCommon.Perf.Measurements import Flow as PerfFlow +from lnst.RecipeCommon.Perf.Measurements import TRexFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements import StatCPUMeasurement
from lnst.RecipeCommon.LibvirtControl import LibvirtControl
@@ -43,7 +47,7 @@ class PvPTestConf(object): self.dut = self.DUTConf() self.guest = self.GuestConf()
-class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfTestAndEvaluate): +class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): m1 = HostReq() m1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) m1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) @@ -72,6 +76,8 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfTestAndEvaluate): dev_intr_cpu = IntParam(default=0)
+ cpu_perf_tool = Param(default=StatCPUMeasurement) + perf_duration = IntParam(default=60) perf_iterations = IntParam(default=5) perf_msg_size = IntParam(default=64) @@ -132,7 +138,7 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfTestAndEvaluate):
perf_config = self.generate_perf_config(config) result = self.perf_test(perf_config) - self.perf_evaluate_and_report(perf_config, result, baseline=None) + self.perf_report_and_evaluate(result) finally: self.test_wide_deconfiguration(config)
@@ -175,18 +181,32 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfTestAndEvaluate): return config
def generate_perf_config(self, config): - conf = PerfConf( - perf_tool = TRexMeasurementTool(self.params.trex_dir), - test_type = "pvp_loop_rate", + flows = [] + for src_nic, dst_nic in zip(config.generator.nics, config.dut.nics): + src_bind = dict(mac_addr=src_nic.hwaddr, + pci_addr=src_nic.bus_info, + ip_addr=src_nic.ips[0]) + dst_bind = dict(mac_addr=dst_nic.hwaddr, + pci_addr=dst_nic.bus_info, + ip_addr=dst_nic.ips[0]) + flows.append(PerfFlow( + type = "pvp_loop_rate", generator = config.generator.host, - generator_bind = config.generator.nics, + generator_bind = src_bind, receiver = config.dut.host, - receiver_bind = config.dut.nics, + receiver_bind = dst_bind, msg_size = self.params.perf_msg_size, duration = self.params.perf_duration, - iterations = self.params.perf_iterations, - streams = self.params.perf_streams) - return conf + parallel_streams = self.params.perf_streams)) + + return PerfRecipeConf( + measurements=[ + self.params.cpu_perf_tool([config.generator.host, + config.dut.host, + config.guest.host]), + TRexFlowMeasurement(flows, self.params.trex_dir) + ], + iterations=self.params.perf_iterations)
def test_wide_deconfiguration(self, config): try: @@ -358,7 +378,7 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfTestAndEvaluate): guest_ip_job = host.run("gethostip -d {}".format(guest_conf.name)) guest_ip = guest_ip_job.stdout.strip()
- guest = self.ctl.connect_host(guest_ip, timeout=60) + guest = self.ctl.connect_host(guest_ip, timeout=60, machine_id="guest1") guest_conf.host = guest
for i, nic in enumerate(guest_conf.vhost_nics):