From: Ondrej Lichtner olichtne@redhat.com
The evaluate_results method signature changed in cd5328db8b0196a6102ebea54bdea1ce632cfe46 and the ping evaluators weren't updated...
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py | 10 +++++++++- .../Ping/Evaluators/ZeroPassPingEvaluator.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py index 6760d32..f2a3dce 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py @@ -1,3 +1,6 @@ +from typing import List, Any + +from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class RatePingEvaluator(BaseResultEvaluator): @@ -12,7 +15,12 @@ def __init__(self, min_rate=None, max_rate=None, rate=None): self.__class__.__name__) )
- def evaluate_results(self, recipe, result): + def evaluate_results( + self, + recipe: BaseRecipe, + recipe_conf: Any, + result: List[Any], + ): result_status = True ping_rate = int(result['rate'])
diff --git a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py index 350e157..afb3d02 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py @@ -1,7 +1,15 @@ +from typing import List, Any + +from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class ZeroPassPingEvaluator(BaseResultEvaluator): - def evaluate_results(self, recipe, result): + def evaluate_results( + self, + recipe: BaseRecipe, + recipe_conf: Any, + result: List[Any], + ): result_status = True trans_packets = int(result['trans_pkts']) recv_packets = int(result['recv_pkts'])
From: Ondrej Lichtner olichtne@redhat.com
We shouldn't use "None" for initialization of timestamps as it breaks any time alignment/time slice related code that works with these results.
Use current timestamp of the controller (code that creates the PerfResult object) as "good enough" in cases where no timestamp is returned from the Test tool used.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Perf/Measurements/IperfFlowMeasurement.py | 8 +++++--- .../Perf/Measurements/TRexFlowMeasurement.py | 9 +++++---- 2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 10088f4..e52f262 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -167,7 +167,7 @@ def _prepare_client(self, flow): def _parse_job_streams(self, job): result = ParallelPerfResult() if not job.passed: - result.append(PerfInterval(0, 0, "bits", None)) + result.append(PerfInterval(0, 0, "bits", time.time())) else: for i in job.result["data"]["end"]["streams"]: result.append(SequentialPerfResult()) @@ -183,7 +183,9 @@ def _parse_job_streams(self, job):
def _parse_job_cpu(self, job): if not job.passed: - return PerfInterval(0, 0, "cpu_percent", None) + return PerfInterval(0, 0, "cpu_percent", time.time()) else: cpu_percent = job.result["data"]["end"]["cpu_utilization_percent"]["host_total"] - return PerfInterval(cpu_percent, 1, "cpu_percent", None) + job_start = job.result["data"]["start"]["timestamp"]["timesecs"] + duration = job.result["data"]["end"]["sum_received"]["seconds"] + return PerfInterval(cpu_percent*duration, duration, "cpu_percent", job_start) diff --git a/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py index b780842..dca58b1 100644 --- a/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py @@ -147,10 +147,11 @@ def _parse_results_by_port(self, job, port, flow): results.receiver_cpu_stats = SequentialPerfResult()
if not job.passed: - results.generator_results.append(PerfInterval(0, 0, "packets", None)) - results.generator_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", None)) - results.receiver_results.append(PerfInterval(0, 0, "packets", None)) - results.receiver_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", None)) + timestamp = time.time() + results.generator_results.append(PerfInterval(0, 0, "packets", timestamp)) + results.generator_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", timestamp)) + results.receiver_results.append(PerfInterval(0, 0, "packets", timestamp)) + results.receiver_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", timestamp)) else: prev_time = job.result["start_time"] prev_tx_val = 0
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
* start_timestamp * end_timestamp * time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class: * PerfInterval serves as the smalles unit and creates a new PerfInterval object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice * PerfList derivatives (SequentialPerfResult and ParallelPerfResult), recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self): - return max([seq_result[0].timestamp for seq_result in self.generator_results]) + return min( + [ + self.generator_results.start_timestamp, + self.generator_cpu_stats.start_timestamp, + self.receiver_results.start_timestamp, + self.receiver_cpu_stats.start_timestamp, + ] + )
@property def end_timestamp(self): - return min([seq_result[-1].timestamp for seq_result in self.generator_results]) + return max( + [ + self.generator_results.end_timestamp, + self.generator_cpu_stats.end_timestamp, + self.receiver_results.end_timestamp, + self.receiver_cpu_stats.end_timestamp, + ] + )
- def align_data(self, start, end): + def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
- # NOTE: iperf reports the cpu utilization for the whole test - # period, not each second, so the CPU samples cannot be aligned - result_copy.generator_cpu_stats = self.generator_cpu_stats - result_copy.receiver_cpu_stats = self.receiver_cpu_stats - - result_copy.generator_results = ParallelPerfResult() - result_copy.receiver_results = ParallelPerfResult() - - for stream in self.generator_results: - aligned_intervals = [ - interval - for interval in stream - if interval.timestamp >= start and interval.timestamp <= end - ] - - result_copy.generator_results.append(SequentialPerfResult(aligned_intervals)) - - for stream in self.receiver_results: - aligned_intervals = [ - interval - for interval in stream - if interval.timestamp >= start and interval.timestamp <= end - ] + result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end) + result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
- result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals)) + result_copy.generator_results = self.generator_results.time_slice(start, end) + result_copy.receiver_results = self.generator_results.time_slice(start, end)
return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self): - return self._data["user"][0].timestamp + return min([item.start_timestamp for item in self._data.values()])
@property def end_timestamp(self): - return self._data["user"][-1].timestamp + return max([item.end_timestamp for item in self._data.values()])
- def align_data(self, start, end): + def time_slice(self, start, end): result_copy = StatCPUMeasurementResults( self.measurement, self.host, self.cpu ) for cpu_state, intervals in self._data.items(): - aligned_interval = [ - interval - for interval in intervals - if interval.timestamp >= start and interval.timestamp <= end - ] - - result_copy._data[cpu_state] = SequentialPerfResult(aligned_interval) + result_copy._data[cpu_state] = intervals.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index 82f9649..07f88fd 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -117,7 +117,7 @@ def time_aligned_results(self) -> "RecipeResults": for i, measurement_iteration in enumerate(measurement_results): aligned_measurement_results = [] for result in measurement_iteration: - aligned_measurement_result = result.align_data( + aligned_measurement_result = result.time_slice( timestamps[i][0], timestamps[i][1] ) aligned_measurement_results.append( diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 61a9ef8..aaca05f 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -1,6 +1,9 @@ from lnst.Common.LnstError import LnstError from lnst.Common.Utils import std_deviation
+class EmptySlice(LnstError): + pass + class PerfStatMixin(object): @property def average(self): @@ -26,6 +29,17 @@ def duration(self): def unit(self): raise NotImplementedError()
+ @property + def start_timestamp(self): + raise NotImplementedError() + + @property + def end_timestamp(self): + raise NotImplementedError() + + def time_slice(self, start, end): + raise NotImplementedError() + class PerfInterval(PerfResult): def __init__(self, value, duration, unit, timestamp): self._value = value @@ -46,9 +60,13 @@ def unit(self): return self._unit
@property - def timestamp(self): + def start_timestamp(self): return self._timestamp
+ @property + def end_timestamp(self): + return self._timestamp + self.duration + @property def std_deviation(self): return 0 @@ -57,6 +75,20 @@ def __str__(self): return "{:.2f} {} in {:.2f} seconds".format( float(self.value), self.unit, float(self.duration))
+ def time_slice(self, start, end): + if end < self.start_timestamp or start > self.end_timestamp: + raise EmptySlice( + "current start, end {} {}; request start, end {}, {}".format( + self.start_timestamp, self.end_timestamp, start, end, + ) + ) + + new_start = max(self.start_timestamp, start) + new_end = min(self.end_timestamp, end) + new_duration = new_end - new_start + new_value = self.value * (new_duration/self.duration) + return PerfInterval(new_value, new_duration, self.unit, new_start) + class PerfList(list): def __init__(self, iterable=[]): for i, item in enumerate(iterable): @@ -123,7 +155,23 @@ def __setitem__(self, i, item):
super(PerfList, self).__setitem__(i, item)
-class SequentialPerfResult(PerfResult, PerfList): + def time_slice(self, start, end): + result = self.__class__() + for item in self: + try: + item_slice = item.time_slice(start, end) + result.append(item_slice) + except EmptySlice: + continue + if len(result) == 0: + raise EmptySlice( + "current start, end {} {}; request start, end {}, {}".format( + self.start_timestamp, self.end_timestamp, start, end, + ) + ) + return result + +class SequentialPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self]) @@ -139,14 +187,24 @@ def unit(self): else: return None
-class ParallelPerfResult(PerfResult, PerfList): + @property + def start_timestamp(self): + return self[0].start_timestamp + + @property + def end_timestamp(self): + return self[-1].end_timestamp + +class ParallelPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self])
@property def duration(self): - return max([i.duration for i in self]) + min_start = min([item.start_timestamp for item in self]) + max_end = max([item.end_timestamp for item in self]) + return max_end - min_start
@property def unit(self): @@ -155,6 +213,14 @@ def unit(self): else: return None
+ @property + def start_timestamp(self): + return min([i.start_timestamp for i in self]) + + @property + def end_timestamp(self): + return max([i.end_timestamp for i in self]) + def result_averages_difference(a, b): if a is None or b is None: return None
Tue, Jan 26, 2021 at 03:09:17PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
- start_timestamp
- end_timestamp
- time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class:
- PerfInterval serves as the smalles unit and creates a new PerfInterval
object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice
- PerfList derivatives (SequentialPerfResult and ParallelPerfResult),
recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self):
return max([seq_result[0].timestamp for seq_result in self.generator_results])
return min(
[
self.generator_results.start_timestamp,
self.generator_cpu_stats.start_timestamp,
self.receiver_results.start_timestamp,
self.receiver_cpu_stats.start_timestamp,
]
)
@property def end_timestamp(self):
return min([seq_result[-1].timestamp for seq_result in self.generator_results])
return max(
[
self.generator_results.end_timestamp,
self.generator_cpu_stats.end_timestamp,
self.receiver_results.end_timestamp,
self.receiver_cpu_stats.end_timestamp,
]
)
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
# NOTE: iperf reports the cpu utilization for the whole test
# period, not each second, so the CPU samples cannot be aligned
result_copy.generator_cpu_stats = self.generator_cpu_stats
result_copy.receiver_cpu_stats = self.receiver_cpu_stats
result_copy.generator_results = ParallelPerfResult()
result_copy.receiver_results = ParallelPerfResult()
for stream in self.generator_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_results.append(SequentialPerfResult(aligned_intervals))
for stream in self.receiver_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end)
result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals))
result_copy.generator_results = self.generator_results.time_slice(start, end)
result_copy.receiver_results = self.generator_results.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self):
return self._data["user"][0].timestamp
return min([item.start_timestamp for item in self._data.values()])
Although this seems to be a correct way to get the timestamp, the way how StatCPUMeasurement is implemented does not require this.
The timestamps are the same for all 'user', 'system', 'irq' etc for one sample. That's why used the optimization before. I think you can do the same.
@property def end_timestamp(self):
return self._data["user"][-1].timestamp
return max([item.end_timestamp for item in self._data.values()])
Same here.
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = StatCPUMeasurementResults( self.measurement, self.host, self.cpu ) for cpu_state, intervals in self._data.items():
aligned_interval = [
interval
for interval in intervals
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy._data[cpu_state] = SequentialPerfResult(aligned_interval)
result_copy._data[cpu_state] = intervals.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index 82f9649..07f88fd 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -117,7 +117,7 @@ def time_aligned_results(self) -> "RecipeResults": for i, measurement_iteration in enumerate(measurement_results): aligned_measurement_results = [] for result in measurement_iteration:
aligned_measurement_result = result.align_data(
aligned_measurement_result = result.time_slice( timestamps[i][0], timestamps[i][1] ) aligned_measurement_results.append(
diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 61a9ef8..aaca05f 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -1,6 +1,9 @@ from lnst.Common.LnstError import LnstError from lnst.Common.Utils import std_deviation
+class EmptySlice(LnstError):
- pass
class PerfStatMixin(object): @property def average(self): @@ -26,6 +29,17 @@ def duration(self): def unit(self): raise NotImplementedError()
- @property
- def start_timestamp(self):
raise NotImplementedError()
- @property
- def end_timestamp(self):
raise NotImplementedError()
- def time_slice(self, start, end):
raise NotImplementedError()
class PerfInterval(PerfResult): def __init__(self, value, duration, unit, timestamp): self._value = value @@ -46,9 +60,13 @@ def unit(self): return self._unit
@property
- def timestamp(self):
def start_timestamp(self): return self._timestamp
@property
def end_timestamp(self):
return self._timestamp + self.duration
@property def std_deviation(self): return 0
@@ -57,6 +75,20 @@ def __str__(self): return "{:.2f} {} in {:.2f} seconds".format( float(self.value), self.unit, float(self.duration))
- def time_slice(self, start, end):
if end < self.start_timestamp or start > self.end_timestamp:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
new_start = max(self.start_timestamp, start)
new_end = min(self.end_timestamp, end)
new_duration = new_end - new_start
new_value = self.value * (new_duration/self.duration)
return PerfInterval(new_value, new_duration, self.unit, new_start)
class PerfList(list): def __init__(self, iterable=[]): for i, item in enumerate(iterable): @@ -123,7 +155,23 @@ def __setitem__(self, i, item):
super(PerfList, self).__setitem__(i, item)
-class SequentialPerfResult(PerfResult, PerfList):
- def time_slice(self, start, end):
result = self.__class__()
for item in self:
try:
item_slice = item.time_slice(start, end)
result.append(item_slice)
except EmptySlice:
continue
if len(result) == 0:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
return result
+class SequentialPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self]) @@ -139,14 +187,24 @@ def unit(self): else: return None
-class ParallelPerfResult(PerfResult, PerfList):
- @property
- def start_timestamp(self):
return self[0].start_timestamp
- @property
- def end_timestamp(self):
return self[-1].end_timestamp
+class ParallelPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self])
@property def duration(self):
return max([i.duration for i in self])
min_start = min([item.start_timestamp for item in self])
max_end = max([item.end_timestamp for item in self])
return max_end - min_start
@property def unit(self):
@@ -155,6 +213,14 @@ def unit(self): else: return None
- @property
- def start_timestamp(self):
return min([i.start_timestamp for i in self])
- @property
- def end_timestamp(self):
return max([i.end_timestamp for i in self])
def result_averages_difference(a, b): if a is None or b is None: return None -- 2.30.0 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...
On Wed, Jan 27, 2021 at 09:11:05AM +0100, Jan Tluka wrote:
Tue, Jan 26, 2021 at 03:09:17PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
- start_timestamp
- end_timestamp
- time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class:
- PerfInterval serves as the smalles unit and creates a new PerfInterval
object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice
- PerfList derivatives (SequentialPerfResult and ParallelPerfResult),
recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self):
return max([seq_result[0].timestamp for seq_result in self.generator_results])
return min(
[
self.generator_results.start_timestamp,
self.generator_cpu_stats.start_timestamp,
self.receiver_results.start_timestamp,
self.receiver_cpu_stats.start_timestamp,
]
)
@property def end_timestamp(self):
return min([seq_result[-1].timestamp for seq_result in self.generator_results])
return max(
[
self.generator_results.end_timestamp,
self.generator_cpu_stats.end_timestamp,
self.receiver_results.end_timestamp,
self.receiver_cpu_stats.end_timestamp,
]
)
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
# NOTE: iperf reports the cpu utilization for the whole test
# period, not each second, so the CPU samples cannot be aligned
result_copy.generator_cpu_stats = self.generator_cpu_stats
result_copy.receiver_cpu_stats = self.receiver_cpu_stats
result_copy.generator_results = ParallelPerfResult()
result_copy.receiver_results = ParallelPerfResult()
for stream in self.generator_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_results.append(SequentialPerfResult(aligned_intervals))
for stream in self.receiver_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end)
result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals))
result_copy.generator_results = self.generator_results.time_slice(start, end)
result_copy.receiver_results = self.generator_results.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self):
return self._data["user"][0].timestamp
return min([item.start_timestamp for item in self._data.values()])
Although this seems to be a correct way to get the timestamp, the way how StatCPUMeasurement is implemented does not require this.
The timestamps are the same for all 'user', 'system', 'irq' etc for one sample. That's why used the optimization before. I think you can do the same.
agreed, but if StatCPUMeasurement ever changes this may need to be updated and we could miss that, I'll do some profiling on this method to see how much of an optimization it really is and if it's worth it.
-Ondrej
@property def end_timestamp(self):
return self._data["user"][-1].timestamp
return max([item.end_timestamp for item in self._data.values()])
Same here.
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = StatCPUMeasurementResults( self.measurement, self.host, self.cpu ) for cpu_state, intervals in self._data.items():
aligned_interval = [
interval
for interval in intervals
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy._data[cpu_state] = SequentialPerfResult(aligned_interval)
result_copy._data[cpu_state] = intervals.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index 82f9649..07f88fd 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -117,7 +117,7 @@ def time_aligned_results(self) -> "RecipeResults": for i, measurement_iteration in enumerate(measurement_results): aligned_measurement_results = [] for result in measurement_iteration:
aligned_measurement_result = result.align_data(
aligned_measurement_result = result.time_slice( timestamps[i][0], timestamps[i][1] ) aligned_measurement_results.append(
diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 61a9ef8..aaca05f 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -1,6 +1,9 @@ from lnst.Common.LnstError import LnstError from lnst.Common.Utils import std_deviation
+class EmptySlice(LnstError):
- pass
class PerfStatMixin(object): @property def average(self): @@ -26,6 +29,17 @@ def duration(self): def unit(self): raise NotImplementedError()
- @property
- def start_timestamp(self):
raise NotImplementedError()
- @property
- def end_timestamp(self):
raise NotImplementedError()
- def time_slice(self, start, end):
raise NotImplementedError()
class PerfInterval(PerfResult): def __init__(self, value, duration, unit, timestamp): self._value = value @@ -46,9 +60,13 @@ def unit(self): return self._unit
@property
- def timestamp(self):
def start_timestamp(self): return self._timestamp
@property
def end_timestamp(self):
return self._timestamp + self.duration
@property def std_deviation(self): return 0
@@ -57,6 +75,20 @@ def __str__(self): return "{:.2f} {} in {:.2f} seconds".format( float(self.value), self.unit, float(self.duration))
- def time_slice(self, start, end):
if end < self.start_timestamp or start > self.end_timestamp:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
new_start = max(self.start_timestamp, start)
new_end = min(self.end_timestamp, end)
new_duration = new_end - new_start
new_value = self.value * (new_duration/self.duration)
return PerfInterval(new_value, new_duration, self.unit, new_start)
class PerfList(list): def __init__(self, iterable=[]): for i, item in enumerate(iterable): @@ -123,7 +155,23 @@ def __setitem__(self, i, item):
super(PerfList, self).__setitem__(i, item)
-class SequentialPerfResult(PerfResult, PerfList):
- def time_slice(self, start, end):
result = self.__class__()
for item in self:
try:
item_slice = item.time_slice(start, end)
result.append(item_slice)
except EmptySlice:
continue
if len(result) == 0:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
return result
+class SequentialPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self]) @@ -139,14 +187,24 @@ def unit(self): else: return None
-class ParallelPerfResult(PerfResult, PerfList):
- @property
- def start_timestamp(self):
return self[0].start_timestamp
- @property
- def end_timestamp(self):
return self[-1].end_timestamp
+class ParallelPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self])
@property def duration(self):
return max([i.duration for i in self])
min_start = min([item.start_timestamp for item in self])
max_end = max([item.end_timestamp for item in self])
return max_end - min_start
@property def unit(self):
@@ -155,6 +213,14 @@ def unit(self): else: return None
- @property
- def start_timestamp(self):
return min([i.start_timestamp for i in self])
- @property
- def end_timestamp(self):
return max([i.end_timestamp for i in self])
def result_averages_difference(a, b): if a is None or b is None: return None -- 2.30.0 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...
Wed, Jan 27, 2021 at 10:00:04AM CET, olichtne@redhat.com wrote:
On Wed, Jan 27, 2021 at 09:11:05AM +0100, Jan Tluka wrote:
Tue, Jan 26, 2021 at 03:09:17PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
- start_timestamp
- end_timestamp
- time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class:
- PerfInterval serves as the smalles unit and creates a new PerfInterval
object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice
- PerfList derivatives (SequentialPerfResult and ParallelPerfResult),
recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self):
return max([seq_result[0].timestamp for seq_result in self.generator_results])
return min(
[
self.generator_results.start_timestamp,
self.generator_cpu_stats.start_timestamp,
self.receiver_results.start_timestamp,
self.receiver_cpu_stats.start_timestamp,
]
)
@property def end_timestamp(self):
return min([seq_result[-1].timestamp for seq_result in self.generator_results])
return max(
[
self.generator_results.end_timestamp,
self.generator_cpu_stats.end_timestamp,
self.receiver_results.end_timestamp,
self.receiver_cpu_stats.end_timestamp,
]
)
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
# NOTE: iperf reports the cpu utilization for the whole test
# period, not each second, so the CPU samples cannot be aligned
result_copy.generator_cpu_stats = self.generator_cpu_stats
result_copy.receiver_cpu_stats = self.receiver_cpu_stats
result_copy.generator_results = ParallelPerfResult()
result_copy.receiver_results = ParallelPerfResult()
for stream in self.generator_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_results.append(SequentialPerfResult(aligned_intervals))
for stream in self.receiver_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end)
result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals))
result_copy.generator_results = self.generator_results.time_slice(start, end)
result_copy.receiver_results = self.generator_results.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self):
return self._data["user"][0].timestamp
return min([item.start_timestamp for item in self._data.values()])
Although this seems to be a correct way to get the timestamp, the way how StatCPUMeasurement is implemented does not require this.
The timestamps are the same for all 'user', 'system', 'irq' etc for one sample. That's why used the optimization before. I think you can do the same.
agreed, but if StatCPUMeasurement ever changes this may need to be updated and we could miss that, I'll do some profiling on this method to see how much of an optimization it really is and if it's worth it.
-Ondrej
I completely agree. I'm ok with keeping this as-is. Let's see how the profiling goes.
@property def end_timestamp(self):
return self._data["user"][-1].timestamp
return max([item.end_timestamp for item in self._data.values()])
Same here.
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = StatCPUMeasurementResults( self.measurement, self.host, self.cpu ) for cpu_state, intervals in self._data.items():
aligned_interval = [
interval
for interval in intervals
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy._data[cpu_state] = SequentialPerfResult(aligned_interval)
result_copy._data[cpu_state] = intervals.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index 82f9649..07f88fd 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -117,7 +117,7 @@ def time_aligned_results(self) -> "RecipeResults": for i, measurement_iteration in enumerate(measurement_results): aligned_measurement_results = [] for result in measurement_iteration:
aligned_measurement_result = result.align_data(
aligned_measurement_result = result.time_slice( timestamps[i][0], timestamps[i][1] ) aligned_measurement_results.append(
diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 61a9ef8..aaca05f 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -1,6 +1,9 @@ from lnst.Common.LnstError import LnstError from lnst.Common.Utils import std_deviation
+class EmptySlice(LnstError):
- pass
class PerfStatMixin(object): @property def average(self): @@ -26,6 +29,17 @@ def duration(self): def unit(self): raise NotImplementedError()
- @property
- def start_timestamp(self):
raise NotImplementedError()
- @property
- def end_timestamp(self):
raise NotImplementedError()
- def time_slice(self, start, end):
raise NotImplementedError()
class PerfInterval(PerfResult): def __init__(self, value, duration, unit, timestamp): self._value = value @@ -46,9 +60,13 @@ def unit(self): return self._unit
@property
- def timestamp(self):
def start_timestamp(self): return self._timestamp
@property
def end_timestamp(self):
return self._timestamp + self.duration
@property def std_deviation(self): return 0
@@ -57,6 +75,20 @@ def __str__(self): return "{:.2f} {} in {:.2f} seconds".format( float(self.value), self.unit, float(self.duration))
- def time_slice(self, start, end):
if end < self.start_timestamp or start > self.end_timestamp:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
new_start = max(self.start_timestamp, start)
new_end = min(self.end_timestamp, end)
new_duration = new_end - new_start
new_value = self.value * (new_duration/self.duration)
return PerfInterval(new_value, new_duration, self.unit, new_start)
class PerfList(list): def __init__(self, iterable=[]): for i, item in enumerate(iterable): @@ -123,7 +155,23 @@ def __setitem__(self, i, item):
super(PerfList, self).__setitem__(i, item)
-class SequentialPerfResult(PerfResult, PerfList):
- def time_slice(self, start, end):
result = self.__class__()
for item in self:
try:
item_slice = item.time_slice(start, end)
result.append(item_slice)
except EmptySlice:
continue
if len(result) == 0:
raise EmptySlice(
"current start, end {} {}; request start, end {}, {}".format(
self.start_timestamp, self.end_timestamp, start, end,
)
)
return result
+class SequentialPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self]) @@ -139,14 +187,24 @@ def unit(self): else: return None
-class ParallelPerfResult(PerfResult, PerfList):
- @property
- def start_timestamp(self):
return self[0].start_timestamp
- @property
- def end_timestamp(self):
return self[-1].end_timestamp
+class ParallelPerfResult(PerfList, PerfResult): @property def value(self): return sum([i.value for i in self])
@property def duration(self):
return max([i.duration for i in self])
min_start = min([item.start_timestamp for item in self])
max_end = max([item.end_timestamp for item in self])
return max_end - min_start
@property def unit(self):
@@ -155,6 +213,14 @@ def unit(self): else: return None
- @property
- def start_timestamp(self):
return min([i.start_timestamp for i in self])
- @property
- def end_timestamp(self):
return max([i.end_timestamp for i in self])
def result_averages_difference(a, b): if a is None or b is None: return None -- 2.30.0 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...
On Wed, Jan 27, 2021 at 10:14:36AM +0100, Jan Tluka wrote:
Wed, Jan 27, 2021 at 10:00:04AM CET, olichtne@redhat.com wrote:
On Wed, Jan 27, 2021 at 09:11:05AM +0100, Jan Tluka wrote:
Tue, Jan 26, 2021 at 03:09:17PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
- start_timestamp
- end_timestamp
- time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class:
- PerfInterval serves as the smalles unit and creates a new PerfInterval
object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice
- PerfList derivatives (SequentialPerfResult and ParallelPerfResult),
recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self):
return max([seq_result[0].timestamp for seq_result in self.generator_results])
return min(
[
self.generator_results.start_timestamp,
self.generator_cpu_stats.start_timestamp,
self.receiver_results.start_timestamp,
self.receiver_cpu_stats.start_timestamp,
]
)
@property def end_timestamp(self):
return min([seq_result[-1].timestamp for seq_result in self.generator_results])
return max(
[
self.generator_results.end_timestamp,
self.generator_cpu_stats.end_timestamp,
self.receiver_results.end_timestamp,
self.receiver_cpu_stats.end_timestamp,
]
)
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
# NOTE: iperf reports the cpu utilization for the whole test
# period, not each second, so the CPU samples cannot be aligned
result_copy.generator_cpu_stats = self.generator_cpu_stats
result_copy.receiver_cpu_stats = self.receiver_cpu_stats
result_copy.generator_results = ParallelPerfResult()
result_copy.receiver_results = ParallelPerfResult()
for stream in self.generator_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_results.append(SequentialPerfResult(aligned_intervals))
for stream in self.receiver_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end)
result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals))
result_copy.generator_results = self.generator_results.time_slice(start, end)
result_copy.receiver_results = self.generator_results.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self):
return self._data["user"][0].timestamp
return min([item.start_timestamp for item in self._data.values()])
Although this seems to be a correct way to get the timestamp, the way how StatCPUMeasurement is implemented does not require this.
The timestamps are the same for all 'user', 'system', 'irq' etc for one sample. That's why used the optimization before. I think you can do the same.
agreed, but if StatCPUMeasurement ever changes this may need to be updated and we could miss that, I'll do some profiling on this method to see how much of an optimization it really is and if it's worth it.
-Ondrej
I completely agree. I'm ok with keeping this as-is. Let's see how the profiling goes.
profiling outputs:
unoptimized version: time_slice called 230 times -> 2969070 function calls in 1.011 seconds end_timestamp called 230 times -> 8050 function calls in 0.003 seconds start_timestamp called 230 times -> 5750 function calls in 0.002 seconds
optimized version: time_slice called 240 times -> 3098160 function calls in 1.105 seconds end_timestamp called 240 times -> 960 function calls in 0.000 seconds start_timestamp called 240 times -> 720 function calls in 0.000 seconds
the 230 vs 240 times calls is probably influenced by some artifact in the length of my tests (was using local vms and "faking" higher number of cpu cores), but it should be close enough to create a relevant comparison.
The important numbers are the function calls caused by calling each function.
The "time_slice" call stays roughly the same - there's no code change or optimization done on it.
Both end_timestamp and start_timestamp can be ~8 times faster when looking at the number of function calls that each causes, however, looking at the time spent on the execution, this is still a very minimal difference.
As such I think we're ok with not optimizing this for now.
-Ondrej
Wed, Jan 27, 2021 at 02:55:22PM CET, olichtne@redhat.com wrote:
On Wed, Jan 27, 2021 at 10:14:36AM +0100, Jan Tluka wrote:
Wed, Jan 27, 2021 at 10:00:04AM CET, olichtne@redhat.com wrote:
On Wed, Jan 27, 2021 at 09:11:05AM +0100, Jan Tluka wrote:
Tue, Jan 26, 2021 at 03:09:17PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
Extending the PerfResult object api with the following methods/properties:
- start_timestamp
- end_timestamp
- time_slice(start, end)
These unify the api for PerfInterval, SequentialPerfResult and ParallelPerfResult.
This also means that "timestamp" itself in PerfInterval is renamed to "start_timestamp" to avoid duplication.
The "time_slice" method returns an object of the same type (PerfInterval, SequentialPerfResult, ParallelPerfResult or other class derived from PerfResult) that is "sliced" to only contain measurement data from the selected timestamp period restricted by the "start" and "end" parameters.
The implementation depends on the specific class:
- PerfInterval serves as the smalles unit and creates a new PerfInterval
object that has a shorter *duration* (intersection of current start-end and requested start-end), and has the *value*, adjusted by the ration of new_duration/old_duration. This ensures that the "average" calculated from this PerfInterval is consistent after the time_slice
- PerfList derivatives (SequentialPerfResult and ParallelPerfResult),
recursively call "time_slice" in all the individual items of the sequence.
In case an empty slice would be created (mismatch of start-end intervals), an EmptySlice exception is thrown.
This commit also adjusts the PerfRecipe "align_data" related code to use this new refactored code.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Perf/Measurements/BaseFlowMeasurement.py | 48 ++++++------ .../Perf/Measurements/StatCPUMeasurement.py | 14 +--- lnst/RecipeCommon/Perf/Recipe.py | 2 +- lnst/RecipeCommon/Perf/Results.py | 74 ++++++++++++++++++- 4 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index dcb0d2a..a8e9328 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -165,40 +165,34 @@ def receiver_cpu_stats(self, value):
@property def start_timestamp(self):
return max([seq_result[0].timestamp for seq_result in self.generator_results])
return min(
[
self.generator_results.start_timestamp,
self.generator_cpu_stats.start_timestamp,
self.receiver_results.start_timestamp,
self.receiver_cpu_stats.start_timestamp,
]
)
@property def end_timestamp(self):
return min([seq_result[-1].timestamp for seq_result in self.generator_results])
return max(
[
self.generator_results.end_timestamp,
self.generator_cpu_stats.end_timestamp,
self.receiver_results.end_timestamp,
self.receiver_cpu_stats.end_timestamp,
]
)
- def align_data(self, start, end):
- def time_slice(self, start, end): result_copy = FlowMeasurementResults(self.measurement, self.flow)
# NOTE: iperf reports the cpu utilization for the whole test
# period, not each second, so the CPU samples cannot be aligned
result_copy.generator_cpu_stats = self.generator_cpu_stats
result_copy.receiver_cpu_stats = self.receiver_cpu_stats
result_copy.generator_results = ParallelPerfResult()
result_copy.receiver_results = ParallelPerfResult()
for stream in self.generator_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_results.append(SequentialPerfResult(aligned_intervals))
for stream in self.receiver_results:
aligned_intervals = [
interval
for interval in stream
if interval.timestamp >= start and interval.timestamp <= end
]
result_copy.generator_cpu_stats = self.generator_cpu_stats.time_slice(start, end)
result_copy.receiver_cpu_stats = self.receiver_cpu_stats.time_slice(start, end)
result_copy.receiver_results.append(SequentialPerfResult(aligned_intervals))
result_copy.generator_results = self.generator_results.time_slice(start, end)
result_copy.receiver_results = self.generator_results.time_slice(start, end) return result_copy
diff --git a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py index 49aa262..c5b50fe 100644 --- a/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/StatCPUMeasurement.py @@ -28,26 +28,20 @@ def utilization(self):
@property def start_timestamp(self):
return self._data["user"][0].timestamp
return min([item.start_timestamp for item in self._data.values()])
Although this seems to be a correct way to get the timestamp, the way how StatCPUMeasurement is implemented does not require this.
The timestamps are the same for all 'user', 'system', 'irq' etc for one sample. That's why used the optimization before. I think you can do the same.
agreed, but if StatCPUMeasurement ever changes this may need to be updated and we could miss that, I'll do some profiling on this method to see how much of an optimization it really is and if it's worth it.
-Ondrej
I completely agree. I'm ok with keeping this as-is. Let's see how the profiling goes.
profiling outputs:
unoptimized version: time_slice called 230 times -> 2969070 function calls in 1.011 seconds end_timestamp called 230 times -> 8050 function calls in 0.003 seconds start_timestamp called 230 times -> 5750 function calls in 0.002 seconds
optimized version: time_slice called 240 times -> 3098160 function calls in 1.105 seconds end_timestamp called 240 times -> 960 function calls in 0.000 seconds start_timestamp called 240 times -> 720 function calls in 0.000 seconds
the 230 vs 240 times calls is probably influenced by some artifact in the length of my tests (was using local vms and "faking" higher number of cpu cores), but it should be close enough to create a relevant comparison.
The important numbers are the function calls caused by calling each function.
The "time_slice" call stays roughly the same - there's no code change or optimization done on it.
Both end_timestamp and start_timestamp can be ~8 times faster when looking at the number of function calls that each causes, however, looking at the time spent on the execution, this is still a very minimal difference.
As such I think we're ok with not optimizing this for now.
-Ondrej
Looks good. Thanks for sharing.
-Jan
Tue, Jan 26, 2021 at 03:09:15PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The evaluate_results method signature changed in cd5328db8b0196a6102ebea54bdea1ce632cfe46 and the ping evaluators weren't updated...
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py | 10 +++++++++- .../Ping/Evaluators/ZeroPassPingEvaluator.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py index 6760d32..f2a3dce 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py @@ -1,3 +1,6 @@ +from typing import List, Any
+from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class RatePingEvaluator(BaseResultEvaluator): @@ -12,7 +15,12 @@ def __init__(self, min_rate=None, max_rate=None, rate=None): self.__class__.__name__) )
- def evaluate_results(self, recipe, result):
- def evaluate_results(
self,
recipe: BaseRecipe,
recipe_conf: Any,
result: List[Any],
- ): result_status = True ping_rate = int(result['rate'])
diff --git a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py index 350e157..afb3d02 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py @@ -1,7 +1,15 @@ +from typing import List, Any
+from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class ZeroPassPingEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, result):
- def evaluate_results(
self,
recipe: BaseRecipe,
recipe_conf: Any,
result: List[Any],
- ): result_status = True trans_packets = int(result['trans_pkts']) recv_packets = int(result['recv_pkts'])
-- 2.30.0 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...
There's one more change required here and that is in: lnst/RecipeCommon/Ping/Recipe.py and PingTestAndEvaluate.single_ping_evaluate()
This uses: evaluator.evaluate_results(self, result[1])
which needs to be updated.
Also wondering if specific recipes (Ipsec) that override the ping functionality requires updates, too.
-Jan
On Tue, Jan 26, 2021 at 06:09:47PM +0100, Jan Tluka wrote:
Tue, Jan 26, 2021 at 03:09:15PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The evaluate_results method signature changed in cd5328db8b0196a6102ebea54bdea1ce632cfe46 and the ping evaluators weren't updated...
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py | 10 +++++++++- .../Ping/Evaluators/ZeroPassPingEvaluator.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-)
There's one more change required here and that is in: lnst/RecipeCommon/Ping/Recipe.py and PingTestAndEvaluate.single_ping_evaluate()
This uses: evaluator.evaluate_results(self, result[1])
which needs to be updated.
Also wondering if specific recipes (Ipsec) that override the ping functionality requires updates, too.
-Jan
On irc we agreed on skipping this patch for this series, it was intended to fix an assumed crashing issue for ping evaluators caused by cd5328db8b0196a6102ebea54bdea1ce632cfe46 changing the BaseEvaluator method signatures.
This is however not happening as the call hierarchy and method implementation hierarchy for PingRecipe and PingEvaluators overrides the relevant methods completely and removes the addtitional parameter. There are therefore no crashes to fix at the moment.
We should at some point update the PingRecipe code to include the additional parameter to improve code consistency, however that is a bigger code change and this patchset was only intended to fix currently crashing recipes caused by recent changes.
-Ondrej
sent a second version of the patchset that removes the first patch and introduces an extra bugfix into the final patch that I found while profiling measurements.
I still need to do some more testing with this (to see if the timestamp rework) fixes all the actual issues that I've seen with the ported recipes before applying this to upstream.
-Ondrej
On Tue, Jan 26, 2021 at 03:09:15PM +0100, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The evaluate_results method signature changed in cd5328db8b0196a6102ebea54bdea1ce632cfe46 and the ping evaluators weren't updated...
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py | 10 +++++++++- .../Ping/Evaluators/ZeroPassPingEvaluator.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py index 6760d32..f2a3dce 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/RatePingEvaluator.py @@ -1,3 +1,6 @@ +from typing import List, Any
+from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class RatePingEvaluator(BaseResultEvaluator): @@ -12,7 +15,12 @@ def __init__(self, min_rate=None, max_rate=None, rate=None): self.__class__.__name__) )
- def evaluate_results(self, recipe, result):
- def evaluate_results(
self,
recipe: BaseRecipe,
recipe_conf: Any,
result: List[Any],
- ): result_status = True ping_rate = int(result['rate'])
diff --git a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py index 350e157..afb3d02 100644 --- a/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py +++ b/lnst/RecipeCommon/Ping/Evaluators/ZeroPassPingEvaluator.py @@ -1,7 +1,15 @@ +from typing import List, Any
+from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class ZeroPassPingEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, result):
- def evaluate_results(
self,
recipe: BaseRecipe,
recipe_conf: Any,
result: List[Any],
- ): result_status = True trans_packets = int(result['trans_pkts']) recv_packets = int(result['recv_pkts'])
-- 2.30.0
lnst-developers@lists.fedorahosted.org