[PATCH v2] regression_tests: ipsec_esp_ah_comp add comp key
by Kamil Jerabek
The configuration of compression in iproute version 2 requires one addition
parameter. This commit adds key to compression configuration if it is needed.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
Changes in:
v2 * revision of import changes
---
recipes/regression_tests/phase3/ipsec_esp_ah_comp.py | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
index 7286f60..fada91e 100644
--- a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -160,12 +160,12 @@ def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
m1.run("ip xfrm state add "\
"src %s dst %s proto comp spi 4 mode %s "\
- "comp deflate"\
- % (m1_addr, m2_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m1_addr, m2_addr, ipsec_mode, m1_key))
m1.run("ip xfrm state add "\
"src %s dst %s proto comp spi 1 mode %s "\
- "comp deflate"\
- % (m2_addr, m1_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m2_addr, m1_addr, ipsec_mode, m1_key))
m1.run("ip xfrm state add "\
"src %s dst %s proto esp spi 2 mode %s "\
@@ -210,12 +210,12 @@ def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
m2.run("ip xfrm state add "\
"src %s dst %s proto comp spi 4 mode %s "\
- "comp deflate"\
- % (m1_addr, m2_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m1_addr, m2_addr, ipsec_mode, m2_key))
m2.run("ip xfrm state add "\
"src %s dst %s proto comp spi 1 mode %s "\
- "comp deflate"\
- % (m2_addr, m1_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m2_addr, m1_addr, ipsec_mode, m2_key))
m2.run("ip xfrm state add "\
"src %s dst %s proto esp spi 2 mode %s "\
--
2.5.5
6 years, 6 months
NEW API Discussion
by Ondrej Lichtner
Hi all,
for the past couple of weeks I've been going over the meeting recordings
we've had wrt the new Python API of LNST. I've been collecting
everything into a single file that I'm appending to this email. I'm
sending it here so that everyone can join the discussion before the
implementation itself begins. I'll warn you thougn... it's LONG :)
!!!NOTE it's not complete yet, I'm sending it now because we have an
upstream meeting planned for later today, namely Device/Interface API is
not complete.
The structure of the file is following:
1. commented pseudo code of how Test Modules will look like - they'll be
instantiated on the Controller and send ad-hoc to the slave where
they'll be executed --> no more synchronization on test start...
2. commented pseudo code of how Tasks will look like, they'll define
both the network requirements and the test execution as well.
3. short rough idea of how the tests/recipes will be executed.
4. 1st version of the API "specification"/documentation. Here I tried to
go through the current *API objects we currently have and make them more
"Pythonic", thinking of how they'll be used from a Task. I tried writing
it as class-method-attribute definitions with some documentation so
hopefully it makes some sense... Like I've said before,
Device/Interfaces are not complete so there's a lot missing there.
Please take a look and provide feedback. I'm sure there are other parts
in addition to Device/Interface APIs that are missing something so I'll
appreciate any help :).
================================================================================
new_api file:
1. test modules
class BaseTestModule:
def __init__(self, **kwargs):
#by defaults loads the params into self.params - no checks pseudocode:
for x in vars(self):
if isinstance(x, BaseType):
param_class = self.getattr(x)
try:
val = kwargs[x]
except KeyError:
if param_class.is_mandatory():
raise TestModuleError("Option x is mandatory")
self.setattr(x.params, param_class.construct(val))
del kwargs[x]
for x in kwargs.keys():
log.error("Undefined parameter x")
if len(kwargs):
raise TestModuleError("Undefined TestModule parameters")
def run():
#needs to be over-ridden - throw an exception to notify the test developer
class MyTest(BaseTestModule):
param = ParamType()
param2 = ParamType2()
param3 = Multiparam(ParamType())
#optional __init__
#def __init__(self, **kwargs):
#super(MyTest).__init__(kwargs)
#additional tester defined checks
def run():
#do my test
#parameters available in self.params
#in Task:
import lnst
#module lnst.modules will dynamically look for module classes in configured
#locations, similar to how we do it now
ping = lnst.modules.Ping(dst=m2.if1.ip[0], count=100, interval=0.1)
m1.run(ping)
================================================
2. Tasks:
class BaseTask(object):
def __init__(self):
#initialize instance specific requirements
self.requirements = Requirements()
for x in dir(self):
val = getattr(self, x)
setattr(self.requirements, x, val)
def test():
raise Exception("Method test MUST be defined.")
class MyTask(lnst.BaseTask):
#class-wide definition of requirements
m1 = HostSel(param="val", ...)
m1.if1 = IfaceSel(l2net="xyz", param="val", ...)
m2 = HostSel(param="val", ...)
m2.if1 = IfaceSel(l2net="xyz", param="val", ...)
def __init__(self, **kwargs):
super(self, lnst.BaseTask).__init__()
#do something with kwargs
#adjust instance specific requirements
self.requirements.m3 = HostSel(...)
def test():
self.matched.m1.run(Module)
self.matched.m1.run("command")
#or
def test(m1, m2):
m1.run(Module)
m2.run("command")
================================================
3. Running Tasks:
from MyTasks import MyTask
import lnst
task_instance = MyTask(params)
lnst(args)
lnst.run(task_instance)
OR
lnst-ctl -d run MyTask.py -- task_params
# looks for NAME class in the NAME.py file (MyTask in this case for which
# the condition "isinstance(NAME, BaseTask)" must be True
# could also run for all classes in the file where "isinstance(x, BaseTask)" is
# True. with the option to restrict to specific task class (or just run the
# first one?)... lnst-ctl rewritten to do the same as manually running the
# task from it's own python script
First do the second option - easier since we have this already, then refactor
the controller to create the lnst controller for the first option.
Aliases lose meaning - they're parameters passed to the MyTask __init__, when
using the lnst-ctl CLI, use "-- task_params"?? might not work for multiple tasks,
================================================
4. Tester facing API, inside the test() method:
Host objects available in self.matched.selector_name:
class Host: #HostAPI??? name can change
#attributes:
# dynamically filled object of Host attributes such as architecture and
# so on. Use example in test() would look like this:
# if host.params.arch == "x86":
# I separated this into the "params" object so I can overwrite its
# __getattr__ method and return None/UnknownParam exception for unknown
# parameters, and to avoid name conflicts with other attributes
params = object()
# dynamically filled object of NetDevice objects accessible directly as the
# object attributes:
# host.ifaces.eth0.set_ip(...)
# I separated this into the "ifaces" object to avoid name conflicts with
# other attributes
# creation of new NetDevices should be possible through simple assignement:
# m1.devs.new_team0 = TeamDevice(...)
# assignement of an incompatible Type or to an existing Device object will
# return an exception
# assignment of None? or del devs.new_team0 to deconfigure the device?
devs = object()
def run(what, bg=False, fail=False, timeout=60, path="", json=False, netns)
# will run "what" on the remote host
# "what" is either a Module object, or a string command that will be
# executed as a bash command
# "bg" when True, runs "what" on background - the run() call
# immediately returns, and "timeout" is ignored, the background
# process can be controlled through the returned Job object
# "fail" if True then the Job is expected to fail, and will be reported
# as PASSed if it does
# "timeout" in seconds, determines how long to block test execution for
# before killing the Job. Only when running in foreground
# "path" changes the current working directory to the specified path
# before "what" is executed and changes back after execution is
# finished.
# "tool" changes the current working directory to the directory of a
# speficied test_tool before "what" is executed and changes back
# after execution is finished.
# !!!!!!! this is from the current API and i'm not yet sure how we
# !!!!!!! want to handle those... so for now I'll keep it
# "json" if True will attempt to parse the returned stdout of the Job
# as json into a dictionary
# "netns" Job will be run in the specified network namespace
# Returns a Job object
def config(option, value)
# copied from old API, provides a shortcut for "echo $value # >/proc/or/sys/path"
# and returns the original value when the test is finished
def sync_resources(srcpath="", dstpath="", recursive=False)
# copies the specified file from the controller to the specified
# destination path, if recursive == True and srcpath refers to a
# directory it copies the entire directory
def {enable, disable}_service(service)
# copied from old API, enables or disables the specified service
def add_{bond, bridge,...}(params)
# this is how we can currently dynamically create net devices on the
# hosts. Even with the new assignment-based approach this could still,
# be usefull, though the method would need to be dynamically created to
# avoid useless work when adding a new netdev type. Something like:
# add_device("name", "Type", params) which would then do
# self.devs.name = TypeDevice(params) ??
def del_device(name)
# removes the specified device, probably easier (more logical?) to do
# this then "devs.name = None" and "del devs.name" would be unreliable
class Device: #DeviceAPI, InterfaceAPI? name can change...
# attributes:
# dynamically created Device attributes such as driver and so on. Use
# example in test() would look like this:
# if host.devs.eth0.driver == "ixgbe":
# achieved through rewriting of the __getattr__ method of the Device class
# should return None or throw UnknownParam exception for unknown parameters
# this should directly mirror the Device objects that are managed by the
# InterfaceManager on the Slave
# eg:
driver = something
mtu = something
ips = [IpAddress, ...]
class Job: #ProcessAPI? name can change...
#attributes:
# True if the Job finished, False if it's still running in the background
finished = bool
# contains the result data returned by the Job, None for bash commands
result = object
# contain the stdout and stderr generated by the job, None for Module Jobs
stdout = ""
stderr = ""
# simple True/False value indicating success/failure of the Job
passed = bool
def wait(timeout=0):
# for background jobs, will wait until the job finished
# "timeout" in seconds, determines how long to wait for. After timeout
# reached, nothing happens, status of the job can be checked with the
# "finished" attribute. If timeout=0, then wait forever.
def kill(signalnum=signal.SIGKILL):
# sends the specified signal to the process of the Job running in
# background
# "signalnum" the signal to be sent
6 years, 6 months
Please recheck your delivery address (UPS parcel 08021312)
by www-data
Dear Customer,
Your parcel was successfully delivered February 22 to UPS Station, but our courier cound not contact you.
Please check the attachment for complete details!
With sincere appreciation,
Rene Duke,
UPS Senior Delivery Manager.
6 years, 7 months
Problem with parcel shipping, ID:6887011
by acrobatp@srv1.netbigs.net
Dear Customer,
Your item has arrived at the UPS Post Office at February 21, but the courier was unable to deliver parcel to you.
Download postal receipt attached to e-mail!
Kind thoughts,
Tom Petersen,
UPS Senior Station Manager.
6 years, 7 months
[PATCH] regression_tests: ipsec_esp_ah_comp add comp key
by Kamil Jerabek
The configuration of compression in iproute version 2 requires one addition
parameter. This commit adds key to compression configuration if it is needed.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase3/ipsec_esp_ah_comp.py | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
index 7286f60..0efff03 100644
--- a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -2,10 +2,9 @@ from lnst.Controller.Task import ctl
from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
from lnst.Controller.PerfRepoUtils import netperf_result_template
-from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.ModuleWrap import netperf
from lnst.RecipeCommon.IRQ import pin_dev_irqs
from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
-import re
# ---------------------------
# ALGORITHM AND CIPHER CONFIG
@@ -160,12 +159,12 @@ def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
m1.run("ip xfrm state add "\
"src %s dst %s proto comp spi 4 mode %s "\
- "comp deflate"\
- % (m1_addr, m2_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m1_addr, m2_addr, ipsec_mode, m1_key))
m1.run("ip xfrm state add "\
"src %s dst %s proto comp spi 1 mode %s "\
- "comp deflate"\
- % (m2_addr, m1_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m2_addr, m1_addr, ipsec_mode, m1_key))
m1.run("ip xfrm state add "\
"src %s dst %s proto esp spi 2 mode %s "\
@@ -210,12 +209,12 @@ def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
m2.run("ip xfrm state add "\
"src %s dst %s proto comp spi 4 mode %s "\
- "comp deflate"\
- % (m1_addr, m2_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m1_addr, m2_addr, ipsec_mode, m2_key))
m2.run("ip xfrm state add "\
"src %s dst %s proto comp spi 1 mode %s "\
- "comp deflate"\
- % (m2_addr, m1_addr, ipsec_mode))
+ "comp deflate %s"\
+ % (m2_addr, m1_addr, ipsec_mode, m2_key))
m2.run("ip xfrm state add "\
"src %s dst %s proto esp spi 2 mode %s "\
--
2.5.5
6 years, 7 months
[PATCH v8] regression-tests: add ipsec_esp_ah_comp test
by Kamil Jerabek
This patch adds new test to our regression_tests phase3. The topology
is the same as in phase1 simple_netperf test.
This test covers performance of ipsec over ethernet. Ping and netperf is run.
Covered are both tunnel and transport mode. All tests are done with esp, ah and
comp options set together. All combinations of cipher and hash functions listed
below are tested. Netperf message size is set to 1400 bytes by default.
ciphers: aes-128, aes-256
hash functions: hmac(md5), sha256
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
Changes in
v2: * removed whitespaces and static thresholds
v3: * default nperf_msg_size set to 16000
* added set mtu to nic
* msg_size added as result parameter to perfrepo
* repaired bad indentation
* default mtu set to 1500
* removed one fragmentation check
v4: * reworked compression check
* removed typically unused cipher algorithms and hash functions
v5: * ciphers structure changed
v6: * hash structure changed
* generate_key function changed
v7: * disabled packet assert compression check for transport mode
v8: * repaired mistake in interfaces variable
---
.../phase3/ipsec_esp_ah_comp.README | 94 ++++
.../regression_tests/phase3/ipsec_esp_ah_comp.py | 553 +++++++++++++++++++++
.../regression_tests/phase3/ipsec_esp_ah_comp.xml | 50 ++
3 files changed, 697 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
new file mode 100644
index 0000000..9490d08
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
@@ -0,0 +1,94 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_ah_comp.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp, ah, comp options together
+ + tested with all listed ciphers and hash functions
+ + ciphers
+ + aes-128
+ + aes-256
+ + hash functions
+ + hmac(md5)
+ + sha256
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
new file mode 100644
index 0000000..7286f60
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -0,0 +1,553 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+ciphers = []
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+ciphers.append(('aes', 128))
+ciphers.append(('aes', 256))
+
+hashes = []
+
+hashes.append(('hmac(md5)', 128))
+hashes.append(('sha256', 256))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_if), (m2, m2_if)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+ # second machine
+ m2.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+ m2.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+for ciph_alg, ciph_len in ciphers:
+ for hash_alg, hash_len in hashes:
+ # test: compressed check, TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(hash_len),
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vvv" % m1_if_name, bg=True)
+
+ if ipsec_mode != "transport":
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ if ipsec_mode != "transport":
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(ciph_len),
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ if ipsec_mode != "transport":
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ if ipsec_mode != "transport":
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
new file mode 100644
index 0000000..8ed30d6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
@@ -0,0 +1,50 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_ah_comp.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_ah_comp.py"/>
+
+</lnstrecipe>
--
2.5.5
6 years, 7 months
[PATCH v2] regression_tests: simple_macsec add nperf_msg_size alias
by Kamil Jerabek
This commit adds nperf_msg_size alias to our phase3 regression_tests
simple_macsec test. The default value is set to 16000.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
Changes in
v2: * added set parameter nperf_msg_size to nperf results
---
recipes/regression_tests/phase3/simple_macsec.py | 9 +++++++++
recipes/regression_tests/phase3/simple_macsec.xml | 1 +
2 files changed, 10 insertions(+)
diff --git a/recipes/regression_tests/phase3/simple_macsec.py b/recipes/regression_tests/phase3/simple_macsec.py
index b16214b..96f1ee3 100644
--- a/recipes/regression_tests/phase3/simple_macsec.py
+++ b/recipes/regression_tests/phase3/simple_macsec.py
@@ -36,6 +36,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = int(ctl.get_alias("nperf_msg_size"))
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -144,6 +145,7 @@ for setting in encryption_settings:
result_tcp.set_parameter('num_parallel', nperf_num_parallel)
result_tcp.set_parameter('encryption', setting)
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_tcp)
baseline = perfrepo_baseline_to_dict(baseline)
@@ -158,6 +160,7 @@ for setting in encryption_settings:
"runs": nperf_max_runs,
"debug": nperf_debug,
"max_deviation": nperf_max_dev,
+ "msg_size": nperf_msg_size,
"netperf_opts": nperf_opts},
baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
@@ -178,6 +181,7 @@ for setting in encryption_settings:
result_udp.set_parameter('num_parallel', nperf_num_parallel)
result_udp.set_parameter('encryption', setting)
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_udp)
baseline = perfrepo_baseline_to_dict(baseline)
@@ -192,6 +196,7 @@ for setting in encryption_settings:
"runs": nperf_max_runs,
"debug": nperf_debug,
"max_deviation": nperf_max_dev,
+ "msg_size": nperf_msg_size,
"netperf_opts": nperf_opts},
baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
@@ -220,6 +225,7 @@ for setting in encryption_settings:
result_tcp.set_parameter('num_parallel', nperf_num_parallel)
result_tcp.set_parameter('encryption', setting)
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_tcp)
baseline = perfrepo_baseline_to_dict(baseline)
@@ -234,6 +240,7 @@ for setting in encryption_settings:
"runs": nperf_max_runs,
"debug": nperf_debug,
"max_deviation": nperf_max_dev,
+ "msg_size": nperf_msg_size,
"netperf_opts" : nperf_opts + " -6"},
baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
@@ -254,6 +261,7 @@ for setting in encryption_settings:
result_udp.set_parameter('num_parallel', nperf_num_parallel)
result_udp.set_parameter('encryption', setting)
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_udp)
baseline = perfrepo_baseline_to_dict(baseline)
@@ -268,6 +276,7 @@ for setting in encryption_settings:
"runs": nperf_max_runs,
"debug": nperf_debug,
"max_deviation": nperf_max_dev,
+ "msg_size": nperf_msg_size,
"netperf_opts" : nperf_opts + "-6"},
baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
diff --git a/recipes/regression_tests/phase3/simple_macsec.xml b/recipes/regression_tests/phase3/simple_macsec.xml
index 2209af6..031314b 100644
--- a/recipes/regression_tests/phase3/simple_macsec.xml
+++ b/recipes/regression_tests/phase3/simple_macsec.xml
@@ -9,6 +9,7 @@
<alias name="nperf_num_parallel" value="1" />
<alias name="nperf_debug" value="0"/>
<alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_msg_size" value="16000" />
<alias name="mapping_file" value="simple_macsec.mapping" />
<alias name="net" value="192.168.0" />
<alias name="driver" value=""/>
--
2.5.5
6 years, 7 months
[PATCH] recipes: add mirrored virt tests for bridge and OVS
by Jan Tluka
These tests are guest-to-guest version of existing virtual *vlan-in-host and
*vlan-in-guest tests of guest-to-baremetal topology.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
.../virtual_bridge_vlan_in_guest_mirrored.README | 87 ++++
.../virtual_bridge_vlan_in_guest_mirrored.py | 466 ++++++++++++++++++++
.../virtual_bridge_vlan_in_guest_mirrored.xml | 110 +++++
.../virtual_bridge_vlan_in_host_mirrored.README | 87 ++++
.../phase1/virtual_bridge_vlan_in_host_mirrored.py | 464 ++++++++++++++++++++
.../virtual_bridge_vlan_in_host_mirrored.xml | 111 +++++
...irtual_ovs_bridge_vlan_in_guest_mirrored.README | 87 ++++
.../virtual_ovs_bridge_vlan_in_guest_mirrored.py | 469 +++++++++++++++++++++
.../virtual_ovs_bridge_vlan_in_guest_mirrored.xml | 104 +++++
...virtual_ovs_bridge_vlan_in_host_mirrored.README | 88 ++++
.../virtual_ovs_bridge_vlan_in_host_mirrored.py | 464 ++++++++++++++++++++
.../virtual_ovs_bridge_vlan_in_host_mirrored.xml | 98 +++++
12 files changed, 2635 insertions(+)
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
new file mode 100644
index 0000000..e3f9ba0
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +----+ | | +----+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
+| |br0| | | |br0| |
+| +-+-+ host1 | | host2 +-+-+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ |VLAN10 |VLAN10
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device with one VLAN subinterface
+Guest #2 description:
+ One ethernet device with one VLAN subinterface
+Test name:
+ virtual_bridge_vlan_in_guest_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's VLAN10 and guest2's VLAN10
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's VLAN10 and guest2's VLAN10
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_bridge_vlan_in_guest_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
new file mode 100644
index 0000000..3d7dc1e
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
@@ -0,0 +1,466 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+mtu = ctl.get_alias("mtu")
+
+g1_vlan10 = g1.get_interface("vlan10")
+g2_vlan10 = g2.get_interface("vlan10")
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_vlan10.get_ip(0),
+ "count" : 100,
+ "iface" : g1_vlan10.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_vlan10.get_ip(1),
+ "count" : 100,
+ "iface" : g1_vlan10.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_vlan10.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_vlan10.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+# configure mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g1.get_interface("vlan10").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("vlan10").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
new file mode 100644
index 0000000..9d91555
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
@@ -0,0 +1,110 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_bridge_vlan_in_guest_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.1/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.2/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_bridge_vlan_in_guest_mirrored.py" />
+</lnstrecipe>
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
new file mode 100644
index 0000000..14cdf71
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| VLAN10 | | | | VLAN10 |
+| +----+ | | +----+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
+| |br0| | | |br0| |
+| +-+-+ host1 | | host2 +-+-+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ | |
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One bridge device, bridging VLAN and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One bridge device, bridging VLAN and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device
+Guest #2 description:
+ One ethernet device
+Test name:
+ virtual_bridge_vlan_in_host_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's NIC and guest2's NIC
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's NIC and guest2's NIC
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_bridge_vlan_in_host_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
new file mode 100644
index 0000000..5a06fcd
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
@@ -0,0 +1,464 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+mtu = ctl.get_alias("mtu")
+
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_guestnic.get_ip(0),
+ "count" : 100,
+ "iface" : g1_guestnic.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_guestnic.get_ip(1),
+ "count" : 100,
+ "iface" : g1_guestnic.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_guestnic.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_guestnic.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+# configure mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("vlan10").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("vlan10").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
new file mode 100644
index 0000000..4973caf
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
@@ -0,0 +1,111 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_bridge_vlan_in_host_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="nic" />
+ </slaves>
+ </vlan>
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="vlan10" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.1/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="nic" />
+ </slaves>
+ </vlan>
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="vlan10" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.2/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_bridge_vlan_in_host_mirrored.py" />
+</lnstrecipe>
+
+
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
new file mode 100644
index 0000000..9db35be
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +----+ | | +----+ |
+| | | | | |
+| +-+---------+ | | +---------+-+ |
+| | ovs_bridge| | | |ovs_bridge | |
+| +-+---------+ | | +---------+-+ |
+| | | | | |
+| +-+-+ host1 | | host2 +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ |VLAN10 |VLAN10
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device
+ One tap device
+ One Open vSwitch bridge device, bridging ethernet and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device with one VLAN subinterface
+Guest #2 description:
+ One ethernet device with one VLAN subinterface
+Test name:
+ virtual_ovs_bridge_vlan_in_guest_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's VLAN10 and guest2's VLAN10
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's VLAN10 and guest2's VLAN10
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_ovs_bridge_vlan_in_guest_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
new file mode 100644
index 0000000..2203cdc
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
@@ -0,0 +1,469 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+mtu = ctl.get_alias("mtu")
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+g1_vlan10 = g1.get_interface("vlan10")
+g2_vlan10 = g2.get_interface("vlan10")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_vlan10.get_ip(0),
+ "count" : 100,
+ "iface" : g1_vlan10.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_vlan10.get_ip(1),
+ "count" : 100,
+ "iface" : g1_vlan10.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_vlan10.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_vlan10.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+#set mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g1.get_interface("vlan10").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("vlan10").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
new file mode 100644
index 0000000..45e1808
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
@@ -0,0 +1,104 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5" />
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_ovs_bridge_vlan_in_guest_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <ovs_bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <ovs_bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_ovs_bridge_vlan_in_guest_mirrored.py" />
+</lnstrecipe>
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
new file mode 100644
index 0000000..bb6846d
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
@@ -0,0 +1,88 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|-------+ +------|nic|-------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +------+-------+ | | +------+-------+ |
+| | vlan10 | | | | vlan10 | |
+| | | | | | | |
+| | ovs_bridge | | | | ovs_bridge | |
+| | | | | | | |
+| +-+------------+ | | +-----------+--+ |
+| | | | | |
+| +-+-+ host1 | | host2 +-+-+ |
++-|tap|------------+ +-----------|tap|--+
+ +-+-+ +-+-+
+ | |
+ | |
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One Open vSwitch bridge device, bridging VLAN and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One Open vSwitch bridge device, bridging VLAN and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device
+Guest #2 description:
+ One ethernet device
+Test name:
+ virtual_ovs_bridge_vlan_in_host_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's NIC and guest2's NIC
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's NIC and guest2's NIC
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_ovs_bridge_vlan_in_host_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
new file mode 100644
index 0000000..407ddb6
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
@@ -0,0 +1,464 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+mtu = ctl.get_alias("mtu")
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
+
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_guestnic.get_ip(0),
+ "count" : 100,
+ "iface" : g1_guestnic.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_guestnic.get_ip(1),
+ "count" : 100,
+ "iface" : g1_guestnic.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_guestnic.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_guestnic.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+#set mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("ovs_br").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("ovs_br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("guestnic").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
new file mode 100644
index 0000000..9e22de8
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
@@ -0,0 +1,98 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5" />
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_ovs_bridge_vlan_in_host_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1"/>
+ <ovs_bridge id="ovs_br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <vlan tag="{$vlan10_tag}">
+ <slaves>
+ <slave id="tap"/>
+ </slaves>
+ </vlan>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio"/>
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2"/>
+ <ovs_bridge id="ovs_br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <vlan tag="{$vlan10_tag}">
+ <slaves>
+ <slave id="tap"/>
+ </slaves>
+ </vlan>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio"/>
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_ovs_bridge_vlan_in_host_mirrored.py" />
+</lnstrecipe>
--
2.7.4
6 years, 7 months
[PATCH v7] regression-tests: add ipsec_esp_ah_comp test
by Kamil Jerabek
This patch adds new test to our regression_tests phase3. The topology
is the same as in phase1 simple_netperf test.
This test covers performance of ipsec over ethernet. Ping and netperf is run.
Covered are both tunnel and transport mode. All tests are done with esp, ah and
comp options set together. All combinations of cipher and hash functions listed
below are tested. Netperf message size is set to 1400 bytes by default.
ciphers: aes-128, aes-256
hash functions: hmac(md5), sha256
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
Changes in
v2: * removed whitespaces and static thresholds
v3: * default nperf_msg_size set to 16000
* added set mtu to nic
* msg_size added as result parameter to perfrepo
* repaired bad indentation
* default mtu set to 1500
* removed one fragmentation check
v4: * reworked compression check
* removed typically unused cipher algorithms and hash functions
v5: * ciphers structure changed
v6: * hash structure changed
* generate_key function changed
v7: * disabled packet assert compression check for transport mode
---
.../phase3/ipsec_esp_ah_comp.README | 94 ++++
.../regression_tests/phase3/ipsec_esp_ah_comp.py | 553 +++++++++++++++++++++
.../regression_tests/phase3/ipsec_esp_ah_comp.xml | 50 ++
3 files changed, 697 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
new file mode 100644
index 0000000..9490d08
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
@@ -0,0 +1,94 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_ah_comp.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp, ah, comp options together
+ + tested with all listed ciphers and hash functions
+ + ciphers
+ + aes-128
+ + aes-256
+ + hash functions
+ + hmac(md5)
+ + sha256
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
new file mode 100644
index 0000000..2669b9e
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -0,0 +1,553 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+ciphers = []
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+ciphers.append(('aes', 128))
+ciphers.append(('aes', 256))
+
+hashes = []
+
+hashes.append(('hmac(md5)', 128))
+hashes.append(('sha256', 256))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_phy), (m2, m2_phy)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+ # second machine
+ m2.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+ m2.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+for ciph_alg, ciph_len in ciphers:
+ for hash_alg, hash_len in hashes:
+ # test: compressed check, TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(hash_len),
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vvv" % m1_if_name, bg=True)
+
+ if ipsec_mode != "transport":
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ if ipsec_mode != "transport":
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(ciph_len),
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ if ipsec_mode != "transport":
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ if ipsec_mode != "transport":
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
new file mode 100644
index 0000000..8ed30d6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
@@ -0,0 +1,50 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_ah_comp.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_ah_comp.py"/>
+
+</lnstrecipe>
--
2.5.5
6 years, 7 months
[PATCH v6] regression-tests: add ipsec_esp_ah_comp test
by Kamil Jerabek
This patch adds new test to our regression_tests phase3. The topology
is the same as in phase1 simple_netperf test.
This test covers performance of ipsec over ethernet. Ping and netperf is run.
Covered are both tunnel and transport mode. All tests are done with esp, ah and
comp options set together. All combinations of cipher and hash functions listed
below are tested. Netperf message size is set to 1400 bytes by default.
ciphers: aes-128, aes-256
hash functions: hmac(md5), sha256
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
Changes in
v2: * removed whitespaces and static thresholds
v3: * default nperf_msg_size set to 16000
* added set mtu to nic
* msg_size added as result parameter to perfrepo
* repaired bad indentation
* default mtu set to 1500
* removed one fragmentation check
v4: * reworked compression check
* removed typically unused cipher algorithms and hash functions
v5: * ciphers structure changed
v6: * hash structure changed
* generate_key function changed
---
.../phase3/ipsec_esp_ah_comp.README | 94 ++++
.../regression_tests/phase3/ipsec_esp_ah_comp.py | 549 +++++++++++++++++++++
.../regression_tests/phase3/ipsec_esp_ah_comp.xml | 50 ++
3 files changed, 693 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
new file mode 100644
index 0000000..9490d08
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.README
@@ -0,0 +1,94 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_ah_comp.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp, ah, comp options together
+ + tested with all listed ciphers and hash functions
+ + ciphers
+ + aes-128
+ + aes-256
+ + hash functions
+ + hmac(md5)
+ + sha256
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
new file mode 100644
index 0000000..ebcbc93
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -0,0 +1,549 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+ciphers = []
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+ciphers.append(('aes', 128))
+ciphers.append(('aes', 256))
+
+hashes = []
+
+hashes.append(('hmac(md5)', 128))
+hashes.append(('sha256', 256))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_phy), (m2, m2_phy)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+ # second machine
+ m2.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+ m2.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+for ciph_alg, ciph_len in ciphers:
+ for hash_alg, hash_len in hashes:
+ # test: compressed check, TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(hash_len),
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(ciph_alg,
+ generate_key(ciph_len),
+ hash_alg,
+ generate_key(ciph_len),
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [ "AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping + PacketAssert test with bigger size to check compression is used
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "grep_for": ["IPComp"],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1,
+ "size": 1500})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('cipher_len', ciph_len)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('cipher_alg', ciph_alg)
+ result_udp.set_parameter('cipher_len', ciph_len)
+ result_udp.set_parameter('hash_alg', hash_alg)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + "-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
new file mode 100644
index 0000000..8ed30d6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
@@ -0,0 +1,50 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_ah_comp.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_ah_comp.py"/>
+
+</lnstrecipe>
--
2.5.5
6 years, 7 months