[PATCH lnst 06/11] recipes: switchdev: ipip: Test migration of shared
GRE underlay
by Petr Machata
Given two tunnels that share a bound device, test that when that device
is moved to another VRF, both tunnels are updated.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/TestLib.py | 14 ++
recipes/switchdev/ipip-009-gre-migration-shared.py | 157 +++++++++++++++++++
.../switchdev/ipip-009-gre-migration-shared.xml | 166 +++++++++++++++++++++
3 files changed, 337 insertions(+)
create mode 100644 recipes/switchdev/ipip-009-gre-migration-shared.py
create mode 100644 recipes/switchdev/ipip-009-gre-migration-shared.xml
diff --git a/recipes/switchdev/TestLib.py b/recipes/switchdev/TestLib.py
index 02e8bd9..b6962d6 100644
--- a/recipes/switchdev/TestLib.py
+++ b/recipes/switchdev/TestLib.py
@@ -683,3 +683,17 @@ class route:
def __exit__(self, exc_type, exc_value, traceback):
self.do("del")
+
+class scoped_run:
+ def __init__(self, sw, enter=None, exit=None):
+ self._sw = sw
+ self._enter = enter
+ self._exit = exit
+
+ def __enter__(self):
+ if self._enter is not None:
+ self._sw.run(self._enter)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self._exit is not None:
+ self._sw.run(self._exit)
diff --git a/recipes/switchdev/ipip-009-gre-migration-shared.py b/recipes/switchdev/ipip-009-gre-migration-shared.py
new file mode 100644
index 0000000..47d7308
--- /dev/null
+++ b/recipes/switchdev/ipip-009-gre-migration-shared.py
@@ -0,0 +1,157 @@
+"""
+Copyright 2017 Mellanox Technologies. All rights reserved.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+petrm(a)mellanox.com (Petr Machata)
+"""
+
+from lnst.Controller.Task import ctl
+from TestLib import TestLib, vrf, dummy, gre, scoped_run
+from ipip_common import ping_test, encap_route, \
+ add_forward_route, connect_host_ifaces, \
+ test_ip, ipv4, ipv6, refresh_addrs
+from time import sleep
+import logging
+
+class fake_iface:
+ def __init__(self, name):
+ self.name = name
+ def get_devname(self):
+ return self.name
+
+def do_task(ctl, hosts, ifaces, aliases):
+ m1, m2, sw = hosts
+ (m1_if1_10, m1_if1_20,
+ m2_if1_10, m2_if1_20,
+ sw_if1_10, sw_if1_20,
+ sw_if2_10, sw_if2_20) = ifaces
+
+ m1_if1_10.add_nhs_route(ipv4(test_ip(2, 0)), [ipv4(test_ip(1, 1, []))])
+ m1_if1_10.add_nhs_route(ipv6(test_ip(2, 0)), [ipv6(test_ip(1, 1, []))])
+ m1_if1_20.add_nhs_route(ipv4(test_ip(4, 0)), [ipv4(test_ip(3, 1, []))])
+ m1_if1_20.add_nhs_route(ipv6(test_ip(4, 0)), [ipv6(test_ip(3, 1, []))])
+
+ vrf_None = None
+ tl = TestLib(ctl, aliases)
+
+ # - Test migration of several tunnels tied to a single dummy. Have a
+ # setup like below, and test end-to-end ping from 1.33 to 2.33, and
+ # lack of end-to-end ping from 3.33 to 4.33. Then migrate d to svu2
+ # and test that 2.33 doesn't ping anymore, but 4.33 now does.
+ #
+ # +-- M1 ------------+ +-- SW -----------------------------+
+ # | | | +-- svo ------------------------+ |
+ # | | | | sg1 1.2.3.4/31 | |
+ # | 1.33/24 +-|----|-|-+ 1.1/24 + | |
+ # | | | | | sg2 1.2.3.6/31 | |
+ # | 3.33/24 +-|----|-|-+ 3.1/24 | + | |
+ # +------------------+ | +-------------------------------+ |
+ # | | | |
+ # +-- M2 ------------+ | +-- svu1 -----------------------+ |
+ # | | | | \ / | |
+ # | 2.33/32 md1 + | | | \ / 1.2.3.4/32 | |
+ # | 1.2.3.5/31 mg1 + | | | + sd 1.2.3.6/32 | |
+ # | 99.2/24 +-|----|-|-+ 99.1/24 | |
+ # | | | | | |
+ # | | | +-------------------------------+ |
+ # | | | |
+ # | 4.33/32 md2 + | | +-- svu2 -----------------------+ |
+ # | 1.2.3.7/31 mg2 + | | | | |
+ # | 88.2/24 +-|----|-|-+ 88.1/24 | |
+ # | | | +-------------------------------+ |
+ # +------------------+ +-----------------------------------+
+
+ logging.info("--- Migrate bound device shared by several tunnels")
+ with vrf(sw) as svo, \
+ vrf(sw) as svu1, \
+ vrf(sw) as svu2, \
+ \
+ encap_route(m2, vrf_None, 1, "mg1", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "mg1", ip=ipv6), \
+ \
+ encap_route(m2, vrf_None, 3, "mg2", ip=ipv4), \
+ encap_route(m2, vrf_None, 3, "mg2", ip=ipv6), \
+ \
+ dummy(sw, svu1, ip=["1.2.3.4/32", "1.2.3.6/32"]) as sd, \
+ gre(sw, sd, svo,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as sg1, \
+ scoped_run(sw,
+ # An LNST device can only have one master, so bound devices can't
+ # be shared. We have to create the second one by hand (we still
+ # want to create the one that we can automatically so that we get
+ # the stats). We also can't create it sans bound device and add
+ # it surreptitiously through sw.run, because reacting to that
+ # change is a separate feature that may not be implemented. So do
+ # it all by hand, including routes.
+ enter = "ip t a name sg2 mode gre local 1.2.3.6 remote 1.2.3.7"
+ " dev %s tos inherit;"
+ "ip l s dev sg2 master %s;"
+ "ip l s dev sg2 up""" % (sd.get_devname(), svo),
+ exit = "ip l d dev sg2"), \
+ encap_route(sw, svo, 2, sg1, ip=ipv4), \
+ encap_route(sw, svo, 2, sg1, ip=ipv6), \
+ encap_route(sw, svo, 4, fake_iface("sg2"), ip=ipv4), \
+ encap_route(sw, svo, 4, fake_iface("sg2"), ip=ipv6):
+
+ connect_host_ifaces(sw, sw_if1_10, svo, sw_if2_10, svu1)
+ refresh_addrs(sw, sw_if1_10)
+ refresh_addrs(sw, sw_if2_10)
+
+ connect_host_ifaces(sw, sw_if1_20, svo, sw_if2_20, svu2)
+ refresh_addrs(sw, sw_if1_20)
+ refresh_addrs(sw, sw_if2_20)
+
+ add_forward_route(sw, svu1, "1.2.3.5", ipv4(test_ip(99, 2, [])))
+ add_forward_route(sw, svu2, "1.2.3.7", ipv4(test_ip(88, 2, [])))
+ add_forward_route(m2, vrf_None, "1.2.3.4", ipv4(test_ip(99, 2, [])))
+ add_forward_route(m2, vrf_None, "1.2.3.6", ipv4(test_ip(88, 2, [])))
+
+ def quick_test(tun1_ipv4_fail, tun1_ipv6_fail,
+ tun2_ipv4_fail, tun2_ipv6_fail):
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1_10, sg1,
+ count=25, fail_expected=tun1_ipv6_fail, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1_10, sg1,
+ count=25, fail_expected=tun1_ipv4_fail)
+
+ ping_test(tl, m1, sw, ipv6(test_ip(4, 33, [])), m1_if1_20, None,
+ count=25, fail_expected=tun2_ipv6_fail, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(4, 33, [])), m1_if1_20, None,
+ count=25, fail_expected=tun2_ipv4_fail)
+
+ sleep(15)
+ quick_test(False, False, True, True)
+
+ sw.run("ip l s dev %s master %s" % (sd.get_devname(), svu2))
+ sleep(5)
+ quick_test(True, True, False, False)
+
+ sw.run("ip l s dev %s master %s" % (sd.get_devname(), svu1))
+ sleep(5)
+ quick_test(False, False, True, True)
+
+ sw.run("ip l s dev %s master %s" % (sd.get_devname(), svu2))
+ sleep(5)
+ quick_test(True, True, False, False)
+
+ sw.run("ip l s dev %s nomaster" % sd.get_devname())
+ sleep(5)
+ quick_test(True, True, True, True)
+
+do_task(ctl, [ctl.get_host("machine1"),
+ ctl.get_host("machine2"),
+ ctl.get_host("switch")],
+ [ctl.get_host("machine1").get_interface("if1.10"),
+ ctl.get_host("machine1").get_interface("if1.20"),
+ ctl.get_host("machine2").get_interface("if1.10"),
+ ctl.get_host("machine2").get_interface("if1.20"),
+ ctl.get_host("switch").get_interface("if1.10"),
+ ctl.get_host("switch").get_interface("if1.20"),
+ ctl.get_host("switch").get_interface("if2.10"),
+ ctl.get_host("switch").get_interface("if2.20")],
+ ctl.get_aliases())
diff --git a/recipes/switchdev/ipip-009-gre-migration-shared.xml b/recipes/switchdev/ipip-009-gre-migration-shared.xml
new file mode 100644
index 0000000..680d2ae
--- /dev/null
+++ b/recipes/switchdev/ipip-009-gre-migration-shared.xml
@@ -0,0 +1,166 @@
+<lnstrecipe xmlns:xi="http://www.w3.org/2003/XInclude">
+ <xi:include href="default_aliases.xml" />
+ <define>
+ <alias name="onet1" value="192.168.1"/>
+ <alias name="onet2" value="192.168.2"/>
+ <alias name="onet3" value="192.168.3"/>
+ <alias name="onet4" value="192.168.4"/>
+ <alias name="o6net1" value="2002:1"/>
+ <alias name="o6net2" value="2002:2"/>
+ <alias name="o6net3" value="2002:3"/>
+ <alias name="o6net4" value="2002:4"/>
+ <alias name="unet1" value="192.168.99"/>
+ <alias name="unet2" value="192.168.88"/>
+ </define>
+ <network>
+ <host id="machine1">
+ <params/>
+ <interfaces>
+ <eth id="if1" label="A" />
+
+ <vlan id="if1.10">
+ <options>
+ <option name="vlan_tci" value="10" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$onet1}.33/24" />
+ <address value="{$o6net1}::33/64" />
+ </addresses>
+ </vlan>
+
+ <vlan id="if1.20">
+ <options>
+ <option name="vlan_tci" value="20" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$onet3}.33/24" />
+ <address value="{$o6net3}::33/64" />
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+
+ <host id="machine2">
+ <params/>
+ <interfaces>
+ <eth id="if1" label="B" />
+
+ <vlan id="if1.10">
+ <options>
+ <option name="vlan_tci" value="10" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$unet1}.2/24" />
+ </addresses>
+ </vlan>
+ <dummy id="md1">
+ <addresses>
+ <address value="{$onet2}.33/32" />
+ <address value="{$o6net2}::33/128" />
+ </addresses>
+ </dummy>
+ <gre id="mg1">
+ <options>
+ <option name="local_ip" value="1.2.3.5"/>
+ <option name="remote_ip" value="1.2.3.4"/>
+ </options>
+ <addresses>
+ <address value="1.2.3.5/32" />
+ </addresses>
+ </gre>
+
+ <vlan id="if1.20">
+ <options>
+ <option name="vlan_tci" value="20" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$unet2}.2/24" />
+ </addresses>
+ </vlan>
+ <dummy id="md2">
+ <addresses>
+ <address value="{$onet4}.33/32" />
+ <address value="{$o6net4}::33/128" />
+ </addresses>
+ </dummy>
+ <gre id="mg2">
+ <options>
+ <option name="local_ip" value="1.2.3.7"/>
+ <option name="remote_ip" value="1.2.3.6"/>
+ </options>
+ <addresses>
+ <address value="1.2.3.7/32" />
+ </addresses>
+ </gre>
+ </interfaces>
+ </host>
+
+ <host id="switch">
+ <interfaces>
+ <eth id="if1" label="A" />
+ <vlan id="if1.10">
+ <options>
+ <option name="vlan_tci" value="10" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$onet1}.1/24" />
+ <address value="{$o6net1}::1/64" />
+ </addresses>
+ </vlan>
+ <vlan id="if1.20">
+ <options>
+ <option name="vlan_tci" value="20" />
+ </options>
+ <slaves>
+ <slave id="if1"/>
+ </slaves>
+ <addresses>
+ <address value="{$onet3}.1/24" />
+ <address value="{$o6net3}::1/64" />
+ </addresses>
+ </vlan>
+
+ <eth id="if2" label="B" />
+ <vlan id="if2.10">
+ <options>
+ <option name="vlan_tci" value="10" />
+ </options>
+ <slaves>
+ <slave id="if2"/>
+ </slaves>
+ <addresses>
+ <address value="{$unet1}.1/24" />
+ </addresses>
+ </vlan>
+ <vlan id="if2.20">
+ <options>
+ <option name="vlan_tci" value="20" />
+ </options>
+ <slaves>
+ <slave id="if2"/>
+ </slaves>
+ <addresses>
+ <address value="{$unet2}.1/24" />
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipip-009-gre-migration-shared.py" />
+</lnstrecipe>
--
2.4.11
5 years, 11 months
[PATCH lnst 05/11] recipes: switchdev: ipip_common: Watch both RX and
TX stats
by Petr Machata
The ping test in ipip_common checks that traffic actually went through
fast path (or, contrary, slow path). But so far it has only looked at
received packes, arbitrarily. However, for asymmetric offloads, each
direction needs to be verified.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip_common.py | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/recipes/switchdev/ipip_common.py b/recipes/switchdev/ipip_common.py
index c5bad69..850d650 100644
--- a/recipes/switchdev/ipip_common.py
+++ b/recipes/switchdev/ipip_common.py
@@ -19,7 +19,7 @@ def ping_test(tl, m1, sw, addr, m1_if1, gre,
ipv6=False, ttl=None):
limit = int(0.9 * count)
if gre is not None:
- before_stats = gre.link_stats()["rx_packets"]
+ before_stats = gre.link_stats()
options = {
"addr": addr,
"count": count,
@@ -34,17 +34,20 @@ def ping_test(tl, m1, sw, addr, m1_if1, gre,
m1.run(ping_mod, fail_expected=fail_expected)
if not fail_expected and gre is not None:
- after_stats = gre.link_stats()["rx_packets"]
+ after_stats = gre.link_stats()
- delta = after_stats - before_stats
- if require_fastpath and delta > 10:
+ drx = after_stats["rx_packets"] - before_stats["rx_packets"]
+ dtx = after_stats["tx_packets"] - before_stats["tx_packets"]
+ if require_fastpath and (drx > 10 or dtx > 10):
# Allow a few packets of control plane traffic to go through slow
# path. All the data plane traffic should go through fast path.
tl.custom(sw, "ipip",
- "Too many packets (%d) observed at GRE netdevice" % delta)
- if require_slowpath and delta < count / 2:
+ "Too many packets (rx:%d, tx:%d) observed at GRE netdevice"
+ % (drx, dtx))
+ if require_slowpath and (drx < count / 2 and dtx < count / 2):
tl.custom(sw, "ipip",
- "Too few packets (%d) observed at GRE netdevice" % delta)
+ "Too few packets (rx:%d, tx:%d) observed at GRE netdevice"
+ % (drx, dtx))
def ipv4(test_ip):
return test_ip[0]
--
2.4.11
5 years, 11 months
[PATCH lnst 04/11] recipes: switchdev: ipip_common: ping: Add
require_slowpath
by Petr Machata
This allows configuring tests that are expected to go through slow path.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip_common.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/recipes/switchdev/ipip_common.py b/recipes/switchdev/ipip_common.py
index 15c2124..c5bad69 100644
--- a/recipes/switchdev/ipip_common.py
+++ b/recipes/switchdev/ipip_common.py
@@ -14,7 +14,8 @@ from lnst.Controller.Task import ctl
from TestLib import route
def ping_test(tl, m1, sw, addr, m1_if1, gre,
- require_fastpath=True, fail_expected=False, count=100,
+ require_fastpath=True, require_slowpath=False,
+ fail_expected=False, count=100,
ipv6=False, ttl=None):
limit = int(0.9 * count)
if gre is not None:
@@ -41,6 +42,9 @@ def ping_test(tl, m1, sw, addr, m1_if1, gre,
# path. All the data plane traffic should go through fast path.
tl.custom(sw, "ipip",
"Too many packets (%d) observed at GRE netdevice" % delta)
+ if require_slowpath and delta < count / 2:
+ tl.custom(sw, "ipip",
+ "Too few packets (%d) observed at GRE netdevice" % delta)
def ipv4(test_ip):
return test_ip[0]
--
2.4.11
5 years, 11 months
[PATCH lnst 03/11] recipes: switchdev: ipip_common: Extract
refresh_addrs
by Petr Machata
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-006-gre-decap.py | 18 +-----------------
recipes/switchdev/ipip_common.py | 16 ++++++++++++++++
2 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/recipes/switchdev/ipip-006-gre-decap.py b/recipes/switchdev/ipip-006-gre-decap.py
index 6ea8fe1..4beec67 100644
--- a/recipes/switchdev/ipip-006-gre-decap.py
+++ b/recipes/switchdev/ipip-006-gre-decap.py
@@ -12,26 +12,10 @@ from lnst.Controller.Task import ctl
from TestLib import TestLib, vrf, dummy, gre
from ipip_common import ping_test, encap_route, \
add_forward_route, connect_host_ifaces, \
- test_ip, ipv4, ipv6
+ test_ip, ipv4, ipv6, refresh_addrs
from time import sleep
import logging
-def refresh_addrs(m, iface):
- # A device loses IPv6 address when changing VRF, which we normally work
- # around with doing a reset of the device. But for VLAN devices, reset
- # removes and recreates them in default VRF. So instead reset the addresses
- # by hand.
- m.run("ip a flush dev %s" % iface.get_devname())
-
- # Down/up cycle to get a new link-local address so that IPv6 neighbor
- # discovery works.
- m.run("ip l set dev %s down" % iface.get_devname())
- m.run("ip l set dev %s up" % iface.get_devname())
-
- # Now reassign the fixed addresses.
- for ip, mask in iface.get_ips().get_val():
- m.run("ip a add dev %s %s/%s" % (iface.get_devname(), ip, mask))
-
def do_task(ctl, hosts, ifaces, aliases):
m1, m2, sw = hosts
(m1_if1_10, m1_if1_20,
diff --git a/recipes/switchdev/ipip_common.py b/recipes/switchdev/ipip_common.py
index c1b07c1..15c2124 100644
--- a/recipes/switchdev/ipip_common.py
+++ b/recipes/switchdev/ipip_common.py
@@ -73,3 +73,19 @@ def add_forward_route(m, vrf, remote_ip, via=ipv4(test_ip(99, 2, []))):
def connect_host_ifaces(sw, if_o, vrf_o, if_u, vrf_u):
sw.run("ip l set dev %s master %s" % (if_o.get_devname(), vrf_o))
sw.run("ip l set dev %s master %s" % (if_u.get_devname(), vrf_u))
+
+def refresh_addrs(m, iface):
+ # A device loses IPv6 address when changing VRF, which we normally work
+ # around with doing a reset of the device. But for VLAN devices, reset
+ # removes and recreates them in default VRF. So instead reset the addresses
+ # by hand.
+ m.run("ip a flush dev %s" % iface.get_devname())
+
+ # Down/up cycle to get a new link-local address so that IPv6 neighbor
+ # discovery works.
+ m.run("ip l set dev %s down" % iface.get_devname())
+ m.run("ip l set dev %s up" % iface.get_devname())
+
+ # Now reassign the fixed addresses.
+ for ip, mask in iface.get_ips().get_val():
+ m.run("ip a add dev %s %s/%s" % (iface.get_devname(), ip, mask))
--
2.4.11
5 years, 11 months
[PATCH lnst 02/11] Slave: InterfaceManager: Name tunnel devices
better
by Petr Machata
Instead of calling these devices simply "dev%d", indicate in the name
the type of device.
All these names end in underscore to avoid collision with the respective
"foo0" device, registered by default by the driver.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
lnst/Slave/InterfaceManager.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/lnst/Slave/InterfaceManager.py b/lnst/Slave/InterfaceManager.py
index e28682f..3ad81da 100644
--- a/lnst/Slave/InterfaceManager.py
+++ b/lnst/Slave/InterfaceManager.py
@@ -344,6 +344,12 @@ class InterfaceManager(object):
return self.assign_name_generic("t_ip6vti")
elif dev_type == "vxlan":
return self.assign_name_generic("vxlan")
+ elif dev_type == "gre":
+ return self.assign_name_generic("gre_")
+ elif dev_type == "ipip":
+ return self.assign_name_generic("ipip_")
+ elif dev_type == "dummy":
+ return self.assign_name_generic("dummy_")
else:
return self.assign_name_generic("dev")
--
2.4.11
5 years, 11 months
[PATCH v2] TestModule, Recipe: add Netperf test module and example
by Kamil Jerabek
This commit adds Netperf module to new lnst version. This is recreated
Netperf module. There is used new design which splits one old Netperf class,
that contains implementation for client and server in one class, into two
classes Netserver and Netperf.
This patch also includes expansion of example recipes/examples/python_recipe.py.
It now contains also example Netperf module usage.
v2 changes:
* refactorization of logical structures and parameters
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
lnst/Tests/Netperf.py | 567 ++++++++++++++++++++++++++++++++++++++
recipes/examples/python_recipe.py | 15 +
2 files changed, 582 insertions(+)
create mode 100644 lnst/Tests/Netperf.py
diff --git a/lnst/Tests/Netperf.py b/lnst/Tests/Netperf.py
new file mode 100644
index 0000000..aca34e0
--- /dev/null
+++ b/lnst/Tests/Netperf.py
@@ -0,0 +1,567 @@
+import logging
+import errno
+import re
+from lnst.Common.Parameters import IntParam, IpParam, StrParam, Param
+from lnst.Common.TestModule import BaseTestModule, TestModuleError
+from lnst.Common.ShellProcess import ShellProcess
+from lnst.Common.ExecCmd import exec_cmd
+from lnst.Common.Utils import is_installed
+
+class Netserver(BaseTestModule):
+ bind = IpParam(mandatory=True)
+ port = IntParam()
+ opts = StrParam()
+
+ def run(self):
+ if not is_installed("netserver"):
+ res_data = {}
+ res_data["msg"] = "Netserver is not installed on this machine!"
+ logging.error(res_data["msg"])
+ self._res_data = res_data
+ return False
+
+ cmd = "netserver -D{bind}{port} {opts}".format(
+ bind = " -L " + str(self.params.bind),
+ port = " -p " + str(self.params.port) if "port" in self.params
+ else "",
+ opts = self.params.opts if "opts" in self.params else "")
+ logging.debug("compiled command: %s" % cmd)
+
+ logging.debug("running as server...")
+ server = ShellProcess(cmd)
+ try:
+ server.wait()
+ except OSError as e:
+ if e.errno == errno.EINTR:
+ server.kill()
+ return True
+
+class Netperf(BaseTestModule):
+ _nonomni_tests = ["SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+ _omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
+
+ _supported_tests = _nonomni_tests + _omni_tests
+
+ netperf_server = IpParam(mandatory=True)
+ testname = StrParam(mandatory=True)
+ duration = IntParam(mandatory=True)
+
+ bind = IpParam()
+ port = IntParam()
+ testoptions = StrParam()
+ confidence = StrParam()
+ cpu_util = StrParam()
+ num_parallel = IntParam(default=1)
+ runs = IntParam(default=1)
+ debug = IntParam(default=0)
+ opts = StrParam()
+
+ max_deviation = Param()
+
+ threshold = Param()
+ threshold_deviation = Param()
+ threshold_interval = Param()
+
+ def __init__(self, **kwargs):
+ super(Netperf, self).__init__(**kwargs)
+
+ if self.params.testname not in self._supported_tests:
+ msg = ("Only TCP_STREAM, TCP_RR, UDP_STREAM, "
+ "UDP_RR, SCTP_STREAM, SCTP_STREAM_MANY and SCTP_RR tests "
+ "are now officialy supported by LNST. You "
+ "can use other tests, but test result may not be correct.")
+ logging.error(msg)
+ raise TestModuleError(msg)
+
+ if "confidence" in self.params:
+ tmp = self.params.confidence.split(",")
+ if tmp[0] not in ["99", "95"]:
+ raise TestModuleError("Confidence level must be 95 or 99.")
+ try:
+ int(tmp[1])
+ except ValueError:
+ raise TestModuleError("Confidence interval must be an integer.")
+
+ if "cpu_util" in self.params:
+ if self.params.cpu_util not in ["both", "local", "remote"]:
+ raise TestModuleError("cpu_util can be 'both', 'local' or 'remote'")
+
+
+
+ if "threshold_deviation" in self.params:
+ self._check_threshold_param(self.params.threshold_deviation,
+ "threshold_deviation")
+
+ else:
+ self.params.threshold_deviation = {"rate" : 0.0,
+ "unit" : "bps"}
+
+
+ if "threshold" in self.params:
+ self._check_threshold_param(self.params.threshold,
+ "threshold")
+
+ rate = self.params.threshold["rate"]
+ deviation = self.params.threshold_deviation["rate"]
+ self.params.threshold_interval = (rate - deviation,
+ rate + deviation)
+
+ if "max_deviation" in self.params:
+ if not isinstance(self.params.max_deviation, dict):
+ raise TestModuleError("max_deviation is expected to be dictionary")
+
+ if 'type' not in self.params.max_deviation:
+ raise TestModuleError("max_deviation 'type' has to be specified ('percent' or 'absolute')")
+
+ if self.params.max_deviation['type'] not in ['percent','absolute']:
+ raise TestModuleError("max_deviation 'type' can be 'percent' or 'absolute'")
+
+
+
+ if self.params.max_deviation['type'] is 'percent':
+ if 'value' not in self.params.max_deviation:
+ raise TestModuleError("max_deviation 'value' has to be specified")
+
+ self.params.max_deviation['value'] = float(self.params.max_deviation['value'])
+
+ if self.params.max_deviation['type'] is 'absolute':
+ if not isinstance(self.params.max_deviation, dict):
+ raise TestModuleError("max_deviation 'value' is expected to be dictionary for 'absolute' type")
+
+ val = self._parse_threshold(self.params.max_deviation['value'],
+ "max_deviation 'value'")
+
+
+ def _check_threshold_param(self, threshold, name):
+ if not isinstance(threshold, dict):
+ raise TestModuleError("%s is expected to be dictionary", name)
+
+ if 'rate' not in threshold:
+ raise TestModuleError("%s expects 'rate' key in dictionary", name)
+
+ threshold['rate'] = float(threshold['rate'])
+
+ if 'unit' not in threshold:
+ raise TestModuleError("%s expects 'unit' key in dictionary", name)
+
+ if self.params.testname in ["TCP_STREAM", "UDP_STREAM",
+ "SCTP_STREAM", "SCTP_STREAM_MANY"]:
+ if threshold['unit'] is not 'bps':
+ raise TestModuleError("unit can be 'bps' for STREAMs")
+ else:
+ if threshold['unit'] is not ['tps']:
+ raise TestModuleError("unit can be 'tps' for RRs")
+
+
+ def _is_omni(self):
+ return self.params.testname in self._omni_tests
+
+ def _compose_cmd(self):
+ """
+ composes commands for netperf and netserver based on xml recipe
+ """
+ cmd = "netperf -H %s -f k" % self.params.netperf_server
+ if self._is_omni():
+ # -P 0 disables banner header of output
+ cmd += " -P 0"
+ if "bind" in self.params:
+ """
+ application is bound to this address
+ """
+ cmd += " -L %s" % self.params.bind
+ if "port" in self.params:
+ """
+ client connects on this port
+ """
+ cmd += " -p %s" % self.params.port
+ if "duration" in self.params:
+ """
+ test will last this duration
+ """
+ cmd += " -l %s" % self.params.duration
+ if "testname" in self.params:
+ """
+ test that will be performed
+ """
+ cmd += " -t %s" % self.params.testname
+
+ if "confidence" in self.params and self.params.num_parallel <= 1:
+ """
+ confidence level that Netperf should try to achieve
+ """
+ cmd += " -I %s" % self.params.confidence
+ if self.params.runs >= 3:
+ cmd += " -i %d,%d" % (self.params.runs, self.params.runs)
+ self.params.runs = 1
+
+ if "cpu_util" in self.params:
+ if self.params.cpu_util.lower() == "both":
+ cmd += " -c -C"
+ elif self.params.cpu_util.lower() == "local":
+ cmd += " -c"
+ elif self.params.cpu_util.lower() == "remote":
+ cmd += " -C"
+
+ if self.params.debug > 0:
+ cmd += " -%s" % ('d' * self.params.debug)
+
+ if "netperf_opts" in self.params:
+ """
+ custom options for netperf
+ """
+ cmd += " %s" % self.params.netperf_opts
+
+ if self.params.num_parallel > 1:
+ """
+ wait 1 second before starting the data transfer
+ taken from the super_netperf script, can be removed if it
+ doesn't make sense
+ """
+ cmd += " -s 1"
+
+ # Print only relevant output
+ if self._is_omni():
+ cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+
+ if "testoptions" in self.params:
+ if self._is_omni():
+ cmd += " %s" % self.params.testoptions
+ else:
+ cmd += " -- %s" % self.params.testoptions
+
+ return cmd
+
+ def _parse_output(self, output):
+ res_val = None
+
+ if self._is_omni():
+ res_val = self._parse_omni_output(output)
+ else:
+ res_val = self._parse_non_omni_output(output)
+
+ if "confidence" in self.params:
+ confidence = self._parse_confidence(output)
+ res_val["confidence"] = confidence
+
+ return res_val
+
+ def _parse_omni_output(self, output):
+ res_val = {}
+
+ pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
+ throughput = re.search(pattern_throughput, output)
+
+ if throughput is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(throughput.group(1))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
+
+ if "cpu_util" in self.params:
+ if self.params.cpu_util == "local" or self.params.cpu_util == "both":
+ pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
+ loc_cpu_util = re.search(pattern_loc_cpu_util, output)
+ res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+
+ if self.params.cpu_util == "remote" or self.params.cpu_util == "both":
+ pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
+ rem_cpu_util = re.search(pattern_rem_cpu_util, output)
+ res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
+
+ return res_val
+
+ def _parse_non_omni_output(self, output):
+ res_val = {}
+
+ # pattern for SCTP streams and other tests
+ # decimal decimal decimal float (float)
+ pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
+ if "cpu_util" in self.params:
+ # cpu utilization data in format: float float
+ pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
+
+ r2 = re.search(pattern, output.lower())
+
+ if r2 is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(r2.group(1))
+ if "cpu_util" in self.params:
+ res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
+ res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
+
+ return res_val
+
+ def _parse_confidence(self, output):
+ if self._is_omni():
+ return self._parse_confidence_omni(output)
+ else:
+ return self._parse_confidence_non_omni(output)
+
+ def _parse_confidence_omni(self, output):
+ pattern_throughput_confid = "THROUGHPUT_CONFID=([-]?\d+\.\d+)"
+ pattern_confidence_level = "CONFIDENCE_LEVEL=(\d+)"
+
+ throughput_confid = re.search(pattern_throughput_confid, output)
+ confidence_level = re.search(pattern_confidence_level, output)
+
+ if throughput_confid is not None and confidence_level is not None:
+ throughput_confid = float(throughput_confid.group(1))
+ confidence_level = int(confidence_level.group(1))
+ real_confidence = (confidence_level, throughput_confid/2)
+ return real_confidence
+ else:
+ return (0, 0.0)
+
+ def _parse_confidence_non_omni(self, output):
+ normal_pattern = r'\+/-(\d+\.\d*)% @ (\d+)% conf\.'
+ warning_pattern = r'!!! Confidence intervals: Throughput\s+: (\d+\.\d*)%'
+ normal_confidence = re.search(normal_pattern, output)
+ warning_confidence = re.search(warning_pattern, output)
+
+ if normal_confidence is None:
+ logging.error("Failed to parse confidence!!")
+ return (0, 0.0)
+
+ if warning_confidence is None:
+ real_confidence = (float(normal_confidence.group(2)),
+ float(normal_confidence.group(1)))
+ else:
+ real_confidence = (float(normal_confidence.group(2)),
+ float(warning_confidence.group(1))/2)
+
+ return real_confidence
+
+
+
+ def _sum_results(self, first, second):
+ result = {}
+
+ #add rates
+ if first["unit"] == second["unit"]:
+ result["unit"] = first["unit"]
+ result["rate"] = first["rate"] + second["rate"]
+
+ # netperf measures the complete cpu utilization of the machine,
+ # so both second and first should be +- the same number
+ if "LOCAL_CPU_UTIL" in first and "LOCAL_CPU_UTIL" in second:
+ result["LOCAL_CPU_UTIL"] = first["LOCAL_CPU_UTIL"]
+
+ if "REMOTE_CPU_UTIL" in first and "REMOTE_CPU_UTIL" in second:
+ result["REMOTE_CPU_UTIL"] = first["REMOTE_CPU_UTIL"]
+
+ #ignoring confidence because it doesn't make sense to sum those
+ return result
+
+ def _pretty_rate(self, rate, unit=None):
+ pretty_rate = {}
+ if unit is None:
+ if rate < 1000:
+ pretty_rate["unit"] = "bits/sec"
+ pretty_rate["rate"] = rate
+ elif rate < 1000**2:
+ pretty_rate["unit"] = "kbits/sec"
+ pretty_rate["rate"] = rate / 1000
+ elif rate < 1000**3:
+ pretty_rate["unit"] = "mbits/sec"
+ pretty_rate["rate"] = rate / (1000**2)
+ elif rate < 1000**4:
+ pretty_rate["unit"] = "gbits/sec"
+ pretty_rate["rate"] = rate / (1000**3)
+ elif rate < 1000**5:
+ pretty_rate["unit"] = "tbits/sec"
+ pretty_rate["rate"] = rate / (1000**4)
+ else:
+ if unit == "bits/sec":
+ pretty_rate["unit"] = "bits/sec"
+ pretty_rate["rate"] = rate
+ elif unit == "Kbits/sec":
+ pretty_rate["unit"] = "Kbits/sec"
+ pretty_rate["rate"] = rate / 1024
+ elif unit == "kbits/sec":
+ pretty_rate["unit"] = "kbits/sec"
+ pretty_rate["rate"] = rate / 1000
+ elif unit == "Mbits/sec":
+ pretty_rate["unit"] = "Mbits/sec"
+ pretty_rate["rate"] = rate / (1024**2)
+ elif unit == "mbits/sec":
+ pretty_rate["unit"] = "mbits/sec"
+ pretty_rate["rate"] = rate / (1000**2)
+ elif unit == "Gbits/sec":
+ pretty_rate["unit"] = "Gbits/sec"
+ pretty_rate["rate"] = rate / (1024**3)
+ elif unit == "gbits/sec":
+ pretty_rate["unit"] = "gbits/sec"
+ pretty_rate["rate"] = rate / (1000**3)
+ elif unit == "Tbits/sec":
+ pretty_rate["unit"] = "Tbits/sec"
+ pretty_rate["rate"] = rate / (1024**4)
+ elif unit == "tbits/sec":
+ pretty_rate["unit"] = "tbits/sec"
+ pretty_rate["rate"] = rate / (1000**4)
+
+ return pretty_rate
+
+ def _run_client(self, cmd):
+ logging.debug("running as client...")
+
+ res_data = {}
+ res_data["testname"] = self.params.testname
+
+ rv = 0
+ results = []
+ rates = []
+ for i in range(1, self.params.runs+1):
+ if self.params.runs > 1:
+ logging.info("Netperf starting run %d" % i)
+ clients = []
+ client_results = []
+ for i in range(0, self.params.num_parallel):
+ clients.append(ShellProcess(cmd))
+
+ for client in clients:
+ ret_code = None
+ try:
+ ret_code = client.wait()
+ rv += ret_code
+ except OSError as e:
+ if e.errno == errno.EINTR:
+ client.kill()
+
+ output = client.read_nonblocking()
+ logging.debug(output)
+
+ if ret_code is not None and ret_code == 0:
+ client_results.append(self._parse_output(output))
+
+ if len(client_results) > 0:
+ #accumulate all the parallel results into one
+ result = client_results[0]
+ for res in client_results[1:]:
+ result = self._sum_results(result, res)
+
+ results.append(result)
+ rates.append(results[-1]["rate"])
+
+ if results > 1:
+ res_data["results"] = results
+
+ if len(rates) > 0:
+ rate = sum(rates)/len(rates)
+ else:
+ rate = 0.0
+
+ if len(rates) > 1:
+ # setting deviation to 2xstd_deviation because of the 68-95-99.7
+ # rule this seems comparable to the -I 99 netperf setting
+ res_data["std_deviation"] = std_deviation(rates)
+ rate_deviation = 2*res_data["std_deviation"]
+ elif len(rates) == 1 and "confidence" in self.params:
+ result = results[0]
+ rate_deviation = rate * (result["confidence"][1] / 100)
+ else:
+ rate_deviation = 0.0
+
+ res_data["rate"] = rate
+ res_data["rate_deviation"] = rate_deviation
+
+ rate_pretty = self._pretty_rate(rate)
+ rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])
+
+ if rv != 0 and self.params.runs == 1:
+ res_data["msg"] = "Could not get performance throughput!"
+ logging.info(res_data["msg"])
+ return (False, res_data)
+ elif rv != 0 and self.params.runs > 1:
+ res_data["msg"] = "At least one of the Netperf runs failed, "\
+ "check the logs and result data for more "\
+ "information."
+ logging.info(res_data["msg"])
+ return (False, res_data)
+
+ res_val = False
+ if "max_deviation" in self.params:
+ if self.params.max_deviation["type"] == "percent":
+ percentual_deviation = (rate_deviation / rate) * 100
+ if percentual_deviation > self.params.max_deviation["value"]:
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
+ "deviation than allowed (+-%.2f %%)" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ self.params.max_deviation["value"])
+ return (res_val, res_data)
+ elif self.params.max_deviation["type"] == "absolute":
+ if rate_deviation > self.params.max_deviation["value"]["rate"]:
+ pretty_deviation = self._pretty_rate(self.params.max_deviation["value"]["rate"])
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
+ "deviation than allowed (+-%.2f %s)" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ pretty_deviation["rate"],
+ pretty_deviation["unit"])
+ return (res_val, res_data)
+ if "threshold_interval" in self.params:
+ result_interval = (rate - rate_deviation,
+ rate + rate_deviation)
+
+ threshold_pretty = self._pretty_rate(self.params.threshold["rate"])
+ threshold_dev_pretty = self._pretty_rate(self.params.threshold_deviation["rate"],
+ unit = threshold_pretty["unit"])
+
+ if self.params.threshold_interval[0] > result_interval[1]:
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
+ "than threshold %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ threshold_pretty["rate"],
+ threshold_dev_pretty["rate"],
+ threshold_pretty["unit"])
+ return (res_val, res_data)
+ else:
+ res_val = True
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
+ "than threshold %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ threshold_pretty["rate"],
+ threshold_dev_pretty["rate"],
+ threshold_pretty["unit"])
+ return (res_val, res_data)
+ else:
+ if rate > 0.0:
+ res_val = True
+ else:
+ res_val = False
+ res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"])
+ return (res_val, res_data)
+
+ def run(self):
+ cmd = self._compose_cmd()
+ logging.debug("compiled command: %s" % cmd)
+ if not is_installed("netperf"):
+ res_data = {}
+ res_data["msg"] = "Netperf is not installed on this machine!"
+ logging.error(res_data["msg"])
+ self._res_data = res_data
+ return False
+
+ (rv, res_data) = self._run_client(cmd)
+ self._res_data = res_data
+ if rv == False:
+ return False
+ return True
diff --git a/recipes/examples/python_recipe.py b/recipes/examples/python_recipe.py
index a4f4165..5b105ed 100755
--- a/recipes/examples/python_recipe.py
+++ b/recipes/examples/python_recipe.py
@@ -11,6 +11,7 @@ from lnst.Controller import BaseRecipe
from lnst.Controller import HostReq, DeviceReq
from lnst.Tests import IcmpPing
+from lnst.Tests.Netperf import Netperf, Netserver
class MyRecipe(BaseRecipe):
m1 = HostReq()
@@ -28,6 +29,20 @@ class MyRecipe(BaseRecipe):
interval=0,
iface=self.matched.m1.eth0))
+ netserver_job = self.matched.m1.run(Netserver(bind=self.matched.m1.eth0),
+ bg=True)
+
+ netperf_job = self.matched.m2.run(Netperf(netperf_server=self.matched.m1.eth0,
+ duration=1,
+ confidence="99,5",
+ runs="5",
+ debug=0,
+ max_deviation={'type':"percent",
+ 'value':20.0},
+ testname="TCP_STREAM"))
+
+
+
ctl = Controller(debug=1)
r = MyRecipe()
--
2.5.5
5 years, 11 months
[PATCH 1/5] NetTestSlave: add restart_service method
by olichtne@redhat.com
From: Ondrej Lichtner <olichtne(a)redhat.com>
Also slightly modified the _configure_service helper method.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/Controller/Machine.py | 8 ++++----
lnst/Controller/Task.py | 3 +++
lnst/Slave/NetTestSlave.py | 10 ++++++----
3 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/lnst/Controller/Machine.py b/lnst/Controller/Machine.py
index ee0bd76..9a7c233 100644
--- a/lnst/Controller/Machine.py
+++ b/lnst/Controller/Machine.py
@@ -636,10 +636,10 @@ class Machine(object):
return False
return self._rpc_call("disable_service", service)
- def disable_services(self):
- for service in self._services:
- self.disable_service(service)
- return True
+ def restart_service(self, service):
+ if service not in self._services:
+ self._services.append(service)
+ return self._rpc_call("restart_service", service)
def get_num_cpus(self):
return self._rpc_call("get_num_cpus")
diff --git a/lnst/Controller/Task.py b/lnst/Controller/Task.py
index e103aa6..245968d 100644
--- a/lnst/Controller/Task.py
+++ b/lnst/Controller/Task.py
@@ -542,6 +542,9 @@ class HostAPI(object):
def disable_service(self, service):
return self._m.disable_service(service)
+ def restart_service(self, service):
+ return self._m.restart_service(service)
+
def get_num_cpus(self):
return self._m.get_num_cpus()
diff --git a/lnst/Slave/NetTestSlave.py b/lnst/Slave/NetTestSlave.py
index e71d3ef..685111a 100644
--- a/lnst/Slave/NetTestSlave.py
+++ b/lnst/Slave/NetTestSlave.py
@@ -1014,8 +1014,7 @@ class SlaveMethods:
stdout, _ = exec_cmd("pidof systemd", die_on_err=False)
return len(stdout) != 0
- def _configure_service(self, service, start=True):
- action = "start" if start else "stop"
+ def _configure_service(self, service, action):
if self._is_systemd():
exec_cmd("systemctl {} {}".format(action, service))
else:
@@ -1023,10 +1022,13 @@ class SlaveMethods:
return True
def enable_service(self, service):
- return self._configure_service(service)
+ return self._configure_service(service, "start")
def disable_service(self, service):
- return self._configure_service(service, start=False)
+ return self._configure_service(service, "stop")
+
+ def restart_service(self, service):
+ return self._configure_service(service, "restart")
def get_num_cpus(self):
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
--
2.14.2
5 years, 11 months
[patch lnst-next] bridge: add support for very basic options
by Jiri Pirko
From: Jiri Pirko <jiri(a)mellanox.com>
Signed-off-by: Jiri Pirko <jiri(a)mellanox.com>
---
lnst/Devices/BridgeDevice.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/lnst/Devices/BridgeDevice.py b/lnst/Devices/BridgeDevice.py
index 8edb774..2208e25 100644
--- a/lnst/Devices/BridgeDevice.py
+++ b/lnst/Devices/BridgeDevice.py
@@ -17,6 +17,10 @@ class BridgeDevice(MasterDevice):
_name_template = "t_br"
_link_type = "bridge"
+ _linkinfo_data_map = {"ageing_time": "IFLA_BR_AGEING_TIME",
+ "stp_state": "IFLA_BR_STP_STATE",
+ "vlan_filtering": "IFLA_BR_VLAN_FILTERING"}
+
# def _get_bridge_dir(self):
# return "/sys/class/net/%s/bridge" % self.name
--
2.9.5
5 years, 11 months
[PATCH lnst 00/13] Subject: Test IP-in-IP netdevices
by Petr Machata
Hi,
the goal of this patch set is to provide test coverage to offloading of L3
tunnels, with the emphasis on GRE in particular.
The first patch is a small improvement in how routes are parsed, so that later
on I can ask about IPv6 route flags and have tests about that.
Then there are three patches that introduce support for several netdevices
pertaining to IP-in-IP setups: dummy device, which serves as an underlay anchor
for many tests, and GRE and IPIP netdevices for tunnels themselves.
Finally there's a suite of patches that introduce the tests. First several
context managers for TestLib, and then eight patches, one per test recipe. Each
recipe then holds several actual tests to exercise the feature in a variety of
scenarios.
Regarding the context managers, those allow one to structure netdevice lifetimes
(and other artifacts) in a nicely scoped manner, such as:
with vrf(sw) as vrf_u, \
vrf(sw) as vrf_o:
# Do stuff with the VRFs.
with dummy(sw, vrf_u) as d, \
gre(sw, d, vrf_o,
local_ip="1.2.3.4", remote_ip="1.2.3.5") as g:
# Do stuff with D and G.
ping_test(...)
# Here, D and G are gone again, but the VRFs are still around for
# more tests.
Not all tests map nicely to that paradigm. E.g. often, routes exhibit this
nested lifetime, but sometimes one wants to shuffle a route around mid-operation
to test some corner case. But most of the time, resources tend to have nested
lifetime like this, and there the "with" statement is a natural fit.
Thanks,
Petr
Petr Machata (13):
HostAPI: Make SlaveMethods.get_routes() IPv6-aware
Support dummy devices
Support GRE devices
Support ipip devices
recipes: switchdev: TestLib: Add context managers
recipes: switchdev: Test GRE tunnels
recipes: switchdev: Test IPIP tunnels
recipes: switchdev: Test GRE in flat scenario
recipes: switchdev: Test alien traffic in GRE tunnel
recipes: switchdev: Test offload flag in IPIP routes
recipes: switchdev: Test decap-only GRE tunnels
recipes: switchdev: Test GRE migration
recipes: switchdev: Test a conflicting tunnel
lnst/Controller/RecipeParser.py | 8 +-
lnst/Controller/Task.py | 47 ++++++
lnst/Slave/NetConfigDevice.py | 79 +++++++++-
lnst/Slave/NetTestSlave.py | 3 +-
recipes/switchdev/TestLib.py | 152 +++++++++++++++++++
recipes/switchdev/ipip-001-gre-hier-basic.py | 197 +++++++++++++++++++++++++
recipes/switchdev/ipip-001-gre-hier-basic.xml | 12 ++
recipes/switchdev/ipip-002-ipip-hier-basic.py | 60 ++++++++
recipes/switchdev/ipip-002-ipip-hier-basic.xml | 57 +++++++
recipes/switchdev/ipip-003-gre-flat.py | 134 +++++++++++++++++
recipes/switchdev/ipip-003-gre-flat.xml | 12 ++
recipes/switchdev/ipip-004-gre-traps.py | 69 +++++++++
recipes/switchdev/ipip-004-gre-traps.xml | 12 ++
recipes/switchdev/ipip-005-offload-flag.py | 63 ++++++++
recipes/switchdev/ipip-005-offload-flag.xml | 12 ++
recipes/switchdev/ipip-006-gre-decap.py | 93 ++++++++++++
recipes/switchdev/ipip-006-gre-decap.xml | 136 +++++++++++++++++
recipes/switchdev/ipip-007-gre-migration.py | 96 ++++++++++++
recipes/switchdev/ipip-007-gre-migration.xml | 12 ++
recipes/switchdev/ipip-008-gre-conflict.py | 82 ++++++++++
recipes/switchdev/ipip-008-gre-conflict.xml | 12 ++
recipes/switchdev/ipip_common.py | 72 +++++++++
recipes/switchdev/ipip_common_topology.xml | 58 ++++++++
schema-recipe.rng | 24 +++
24 files changed, 1496 insertions(+), 6 deletions(-)
create mode 100644 recipes/switchdev/ipip-001-gre-hier-basic.py
create mode 100644 recipes/switchdev/ipip-001-gre-hier-basic.xml
create mode 100644 recipes/switchdev/ipip-002-ipip-hier-basic.py
create mode 100644 recipes/switchdev/ipip-002-ipip-hier-basic.xml
create mode 100644 recipes/switchdev/ipip-003-gre-flat.py
create mode 100644 recipes/switchdev/ipip-003-gre-flat.xml
create mode 100644 recipes/switchdev/ipip-004-gre-traps.py
create mode 100644 recipes/switchdev/ipip-004-gre-traps.xml
create mode 100644 recipes/switchdev/ipip-005-offload-flag.py
create mode 100644 recipes/switchdev/ipip-005-offload-flag.xml
create mode 100644 recipes/switchdev/ipip-006-gre-decap.py
create mode 100644 recipes/switchdev/ipip-006-gre-decap.xml
create mode 100644 recipes/switchdev/ipip-007-gre-migration.py
create mode 100644 recipes/switchdev/ipip-007-gre-migration.xml
create mode 100644 recipes/switchdev/ipip-008-gre-conflict.py
create mode 100644 recipes/switchdev/ipip-008-gre-conflict.xml
create mode 100644 recipes/switchdev/ipip_common.py
create mode 100644 recipes/switchdev/ipip_common_topology.xml
--
2.4.11
5 years, 11 months
[PATCH] TestModule, Recipe: add Netperf test module and example
by Kamil Jerabek
This commit adds Netperf module to new lnst version. This is recreated
Netperf module. There is used new design which splits one old Netperf class,
that contains implementation for client and server in one class, into two
classes Netserver and Netperf.
This patch also includes expansion of example recipes/examples/python_recipe.py.
It now contains also example Netperf module usage.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
lnst/Tests/Netperf.py | 590 ++++++++++++++++++++++++++++++++++++++
recipes/examples/python_recipe.py | 16 ++
2 files changed, 606 insertions(+)
create mode 100644 lnst/Tests/Netperf.py
diff --git a/lnst/Tests/Netperf.py b/lnst/Tests/Netperf.py
new file mode 100644
index 0000000..838e3d6
--- /dev/null
+++ b/lnst/Tests/Netperf.py
@@ -0,0 +1,590 @@
+import logging
+import errno
+import re
+from lnst.Common.Parameters import IntParam, IpParam, StrParam, Param
+from lnst.Common.TestModule import BaseTestModule, TestModuleError
+from lnst.Common.ShellProcess import ShellProcess
+from lnst.Common.ExecCmd import exec_cmd
+from lnst.Common.Utils import is_installed
+
+class Netserver(BaseTestModule):
+ bind = IpParam(mandatory=True)
+ port = IntParam()
+ netperf_opts = StrParam()
+
+ def _compose_cmd(self):
+ cmd = "netserver -D"
+ if "bind"in self.params:
+ """
+ server is bound to this address
+ """
+ cmd += " -L %s" % self.params.bind
+ if "port" in self.params:
+ """
+ server listens on this port
+ """
+ cmd += " -p %s" % self.params.port
+ if "netperf_opts" in self.params:
+ """
+ custom options for netperf
+ """
+ cmd += " %s" % self.params.netperf_opts
+
+ return cmd
+
+ def _run_server(self, cmd):
+ logging.debug("running as server...")
+ server = ShellProcess(cmd)
+ try:
+ server.wait()
+ except OSError as e:
+ if e.errno == errno.EINTR:
+ server.kill()
+
+ def run(self):
+ cmd = self._compose_cmd()
+ logging.debug("compiled command: %s" % cmd)
+ if not is_installed("netserver"):
+ res_data = {}
+ res_data["msg"] = "Netserver is not installed on this machine!"
+ logging.error(res_data["msg"])
+ self._res_data = res_data
+ return False
+ self._run_server(cmd)
+
+
+class Netperf(BaseTestModule):
+ supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR",
+ "SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+
+ omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
+
+ netperf_server = IpParam(mandatory=True)
+ bind = IpParam()
+ duration = IntParam(default=60)
+ port = IntParam()
+ testname = StrParam(default="TCP_STREAM")
+ testoptions = StrParam()
+ confidence = StrParam()
+ cpu_util = StrParam()
+ num_parallel = IntParam(default=1)
+ runs = IntParam(default=1)
+ debug = IntParam(default=0)
+ netperf_opts = StrParam()
+
+ max_deviation = Param()
+ threshold = Param()
+ threshold_deviation = Param()
+ threshold_interval = Param()
+
+ def __init__(self, **kwargs):
+ super(Netperf, self).__init__(**kwargs)
+
+ if "threshold" in self.params:
+ self.params.threshold = self._parse_threshold(self.params.threshold)
+
+ if "threshold_deviation" in self.params:
+ self.params.threshold_deviation = self._parse_threshold(
+ self.params.threshold_deviation)
+ else:
+ self.params.threshold_deviation = {"rate" : 0.0,
+ "unit" : "bps"}
+
+ if "threshold" in self.params:
+ rate = self.params.threshold["rate"]
+ deviation = self.params.threshold_deviation["rate"]
+ self.params.threshold_interval = (rate - deviation,
+ rate + deviation)
+
+ if "max_deviation" in self.params:
+ self.params.max_deviation = self._parse_max_deviation(
+ self.params.max_deviation)
+
+
+ def _is_omni(self):
+ return self.params.testname in self.omni_tests
+
+ def _compose_cmd(self):
+ """
+ composes commands for netperf and netserver based on xml recipe
+ """
+ cmd = "netperf -H %s -f k" % self.params.netperf_server
+ if self._is_omni():
+ # -P 0 disables banner header of output
+ cmd += " -P 0"
+ if "bind" in self.params:
+ """
+ application is bound to this address
+ """
+ cmd += " -L %s" % self.params.bind
+ if "port" in self.params:
+ """
+ client connects on this port
+ """
+ cmd += " -p %s" % self.params.port
+ if "duration" in self.params:
+ """
+ test will last this duration
+ """
+ cmd += " -l %s" % self.params.duration
+ if "testname" in self.params:
+ """
+ test that will be performed
+ """
+ if self.params.testname not in self.supported_tests:
+ logging.warning("Only TCP_STREAM, TCP_RR, UDP_STREAM, "
+ "UDP_RR, SCTP_STREAM, SCTP_STREAM_MANY and SCTP_RR tests "
+ "are now officialy supported by LNST. You "
+ "can use other tests, but test result may not be correct.")
+ cmd += " -t %s" % self.params.testname
+
+ if "confidence" in self.params and self.params.num_parallel <= 1:
+ """
+ confidence level that Netperf should try to achieve
+ """
+ cmd += " -I %s" % self.params.confidence
+ if self.params.runs >= 3:
+ cmd += " -i %d,%d" % (self.params.runs, self.params.runs)
+ self.params.runs = 1
+
+ if "cpu_util" in self.params:
+ if self.params.cpu_util.lower() == "both":
+ cmd += " -c -C"
+ elif self.params.cpu_util.lower() == "local":
+ cmd += " -c"
+ elif self.params.cpu_util.lower() == "remote":
+ cmd += " -C"
+
+ if self.params.debug > 0:
+ cmd += " -%s" % ('d' * self.params.debug)
+
+ if "netperf_opts" in self.params:
+ """
+ custom options for netperf
+ """
+ cmd += " %s" % self.params.netperf_opts
+
+ if self.params.num_parallel > 1:
+ """
+ wait 1 second before starting the data transfer
+ taken from the super_netperf script, can be removed if it
+ doesn't make sense
+ """
+ cmd += " -s 1"
+
+ # Print only relevant output
+ if self._is_omni():
+ cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+
+ if "testoptions" in self.params:
+ if self._is_omni():
+ cmd += " %s" % self.params.testoptions
+ else:
+ cmd += " -- %s" % self.params.testoptions
+
+ return cmd
+
+ def _parse_output(self, output):
+ res_val = None
+
+ if self._is_omni():
+ res_val = self._parse_omni_output(output)
+ else:
+ res_val = self._parse_non_omni_output(output)
+
+ if "confidence" in self.params:
+ confidence = self._parse_confidence(output)
+ res_val["confidence"] = confidence
+
+ return res_val
+
+ def _parse_omni_output(self, output):
+ res_val = {}
+
+ pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
+ throughput = re.search(pattern_throughput, output)
+
+ if throughput is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(throughput.group(1))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
+
+ if "cpu_util" in self.params:
+ if self.params.cpu_util == "local" or self.params.cpu_util == "both":
+ pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
+ loc_cpu_util = re.search(pattern_loc_cpu_util, output)
+ res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+
+ if self.params.cpu_util == "remote" or self.params.cpu_util == "both":
+ pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
+ rem_cpu_util = re.search(pattern_rem_cpu_util, output)
+ res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
+
+ return res_val
+
+ def _parse_non_omni_output(self, output):
+ res_val = {}
+
+ # pattern for SCTP streams and other tests
+ # decimal decimal decimal float (float)
+ pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
+ if "cpu_util" in self.params:
+ # cpu utilization data in format: float float
+ pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
+
+ r2 = re.search(pattern, output.lower())
+
+ if r2 is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(r2.group(1))
+ if "cpu_util" in self.params:
+ res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
+ res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
+
+ return res_val
+
+ def _parse_confidence(self, output):
+ if self._is_omni():
+ return self._parse_confidence_omni(output)
+ else:
+ return self._parse_confidence_non_omni(output)
+
+ def _parse_confidence_omni(self, output):
+ pattern_throughput_confid = "THROUGHPUT_CONFID=([-]?\d+\.\d+)"
+ pattern_confidence_level = "CONFIDENCE_LEVEL=(\d+)"
+
+ throughput_confid = re.search(pattern_throughput_confid, output)
+ confidence_level = re.search(pattern_confidence_level, output)
+
+ if throughput_confid is not None and confidence_level is not None:
+ throughput_confid = float(throughput_confid.group(1))
+ confidence_level = int(confidence_level.group(1))
+ real_confidence = (confidence_level, throughput_confid/2)
+ return real_confidence
+ else:
+ return (0, 0.0)
+
+ def _parse_confidence_non_omni(self, output):
+ normal_pattern = r'\+/-(\d+\.\d*)% @ (\d+)% conf\.'
+ warning_pattern = r'!!! Confidence intervals: Throughput\s+: (\d+\.\d*)%'
+ normal_confidence = re.search(normal_pattern, output)
+ warning_confidence = re.search(warning_pattern, output)
+
+ if normal_confidence is None:
+ logging.error("Failed to parse confidence!!")
+ return (0, 0.0)
+
+ if warning_confidence is None:
+ real_confidence = (float(normal_confidence.group(2)),
+ float(normal_confidence.group(1)))
+ else:
+ real_confidence = (float(normal_confidence.group(2)),
+ float(warning_confidence.group(1))/2)
+
+ return real_confidence
+
+ def _parse_threshold(self, threshold):
+ res_data = {}
+ threshold_rate = 0.0
+ threshold_unit_type = "bps"
+
+ if threshold is None:
+ return None
+ # pattern for threshold
+ # group(1) ... threshold value
+ # group(3) ... threshold units
+ # group(4) ... bytes/bits
+ if (self.params.testname == "TCP_STREAM" or
+ self.params.testname == "UDP_STREAM" or
+ self.params.testname == "SCTP_STREAM" or
+ self.params.testname == "SCTP_STREAM_MANY"):
+ pattern_stream = "(\d*(\.\d*)?)\s*([ kmgtKMGT])(bits|bytes)\/sec"
+ r1 = re.search(pattern_stream, threshold)
+ if r1 is None:
+ res_data["msg"] = "Invalid unit type in the "\
+ "throughput option"
+ return (False, res_data)
+ threshold_rate = float(r1.group(1))
+ threshold_unit_size = r1.group(3)
+ threshold_unit_type = r1.group(4)
+ if threshold_unit_size == 'k':
+ threshold_rate *= 1000
+ elif threshold_unit_size == 'K':
+ threshold_rate *= 1024
+ elif threshold_unit_size == 'm':
+ threshold_rate *= 1000*1000
+ elif threshold_unit_size == 'M':
+ threshold_rate *= 1024*1024
+ elif threshold_unit_size == 'g':
+ threshold_rate *= 1000*1000*1000
+ elif threshold_unit_size == 'G':
+ threshold_rate *= 1024*1024*1024
+ elif threshold_unit_size == 't':
+ threshold_rate *= 1000 * 1000 * 1000 * 1000
+ elif threshold_unit_size == 'T':
+ threshold_rate *= 1024 * 1024 * 1024 * 1024
+ if threshold_unit_type == "bytes":
+ threshold_rate *= 8
+ threshold_unit_type = "bps"
+ elif (self.params.testname == "TCP_RR" or self.params.testname == "UDP_RR" or
+ self.params.testname == "SCTP_RR"):
+ pattern_rr = "(\d*(\.\d*)?)\s*trans\.\/sec"
+ r1 = re.search(pattern_rr, threshold.lower())
+ if r1 is None:
+ res_data["msg"] = "Invalid unit type in the "\
+ "throughput option"
+ return (False, res_data)
+ threshold_rate = float(r1.group(1))
+ threshold_unit_type = "tps"
+
+ return {"rate": threshold_rate,
+ "unit": threshold_unit_type}
+
+ def _parse_max_deviation(self, deviation):
+ if deviation is None:
+ return None
+ percentual_deviation = r"(\d+(.\d+)?)\s*%"
+ match = re.match(percentual_deviation, deviation)
+ if match:
+ return {"type": "percent",
+ "value": float(match.group(1))}
+ else:
+ val = self._parse_threshold(deviation)
+ if val is not None:
+ return {"type": "absolute",
+ "value": val}
+ return None
+
+ def _sum_results(self, first, second):
+ result = {}
+
+ #add rates
+ if first["unit"] == second["unit"]:
+ result["unit"] = first["unit"]
+ result["rate"] = first["rate"] + second["rate"]
+
+ # netperf measures the complete cpu utilization of the machine,
+ # so both second and first should be +- the same number
+ if "LOCAL_CPU_UTIL" in first and "LOCAL_CPU_UTIL" in second:
+ result["LOCAL_CPU_UTIL"] = first["LOCAL_CPU_UTIL"]
+
+ if "REMOTE_CPU_UTIL" in first and "REMOTE_CPU_UTIL" in second:
+ result["REMOTE_CPU_UTIL"] = first["REMOTE_CPU_UTIL"]
+
+ #ignoring confidence because it doesn't make sense to sum those
+ return result
+
+ def _pretty_rate(self, rate, unit=None):
+ pretty_rate = {}
+ if unit is None:
+ if rate < 1000:
+ pretty_rate["unit"] = "bits/sec"
+ pretty_rate["rate"] = rate
+ elif rate < 1000 * 1000:
+ pretty_rate["unit"] = "kbits/sec"
+ pretty_rate["rate"] = rate / 1000
+ elif rate < 1000 * 1000 * 1000:
+ pretty_rate["unit"] = "mbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000)
+ elif rate < 1000 * 1000 * 1000 * 1000:
+ pretty_rate["unit"] = "gbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000 * 1000)
+ elif rate < 1000 * 1000 * 1000 * 1000 * 1000:
+ pretty_rate["unit"] = "tbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000 * 1000 * 1000)
+ else:
+ if unit == "bits/sec":
+ pretty_rate["unit"] = "bits/sec"
+ pretty_rate["rate"] = rate
+ elif unit == "Kbits/sec":
+ pretty_rate["unit"] = "Kbits/sec"
+ pretty_rate["rate"] = rate / 1024
+ elif unit == "kbits/sec":
+ pretty_rate["unit"] = "kbits/sec"
+ pretty_rate["rate"] = rate / 1000
+ elif unit == "Mbits/sec":
+ pretty_rate["unit"] = "Mbits/sec"
+ pretty_rate["rate"] = rate / (1024 * 1024)
+ elif unit == "mbits/sec":
+ pretty_rate["unit"] = "mbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000)
+ elif unit == "Gbits/sec":
+ pretty_rate["unit"] = "Gbits/sec"
+ pretty_rate["rate"] = rate / (1024 * 1024 * 1024)
+ elif unit == "gbits/sec":
+ pretty_rate["unit"] = "gbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000 * 1000)
+ elif unit == "Tbits/sec":
+ pretty_rate["unit"] = "Tbits/sec"
+ pretty_rate["rate"] = rate / (1024 * 1024 * 1024 * 1024)
+ elif unit == "tbits/sec":
+ pretty_rate["unit"] = "tbits/sec"
+ pretty_rate["rate"] = rate / (1000 * 1000 * 1000 * 1000)
+
+ return pretty_rate
+
+ def _run_client(self, cmd):
+ logging.debug("running as client...")
+
+ res_data = {}
+ res_data["testname"] = self.params.testname
+
+ rv = 0
+ results = []
+ rates = []
+ for i in range(1, self.params.runs+1):
+ if self.params.runs > 1:
+ logging.info("Netperf starting run %d" % i)
+ clients = []
+ client_results = []
+ for i in range(0, self.params.num_parallel):
+ clients.append(ShellProcess(cmd))
+
+ for client in clients:
+ ret_code = None
+ try:
+ ret_code = client.wait()
+ rv += ret_code
+ except OSError as e:
+ if e.errno == errno.EINTR:
+ client.kill()
+
+ output = client.read_nonblocking()
+ logging.debug(output)
+
+ if ret_code is not None and ret_code == 0:
+ client_results.append(self._parse_output(output))
+
+ if len(client_results) > 0:
+ #accumulate all the parallel results into one
+ result = client_results[0]
+ for res in client_results[1:]:
+ result = self._sum_results(result, res)
+
+ results.append(result)
+ rates.append(results[-1]["rate"])
+
+ if results > 1:
+ res_data["results"] = results
+
+ if len(rates) > 0:
+ rate = sum(rates)/len(rates)
+ else:
+ rate = 0.0
+
+ if len(rates) > 1:
+ # setting deviation to 2xstd_deviation because of the 68-95-99.7
+ # rule this seems comparable to the -I 99 netperf setting
+ res_data["std_deviation"] = std_deviation(rates)
+ rate_deviation = 2*res_data["std_deviation"]
+ elif len(rates) == 1 and "confidence" in self.params:
+ result = results[0]
+ rate_deviation = rate * (result["confidence"][1] / 100)
+ else:
+ rate_deviation = 0.0
+
+ res_data["rate"] = rate
+ res_data["rate_deviation"] = rate_deviation
+
+ rate_pretty = self._pretty_rate(rate)
+ rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])
+
+ if rv != 0 and self.params.runs == 1:
+ res_data["msg"] = "Could not get performance throughput!"
+ logging.info(res_data["msg"])
+ return (False, res_data)
+ elif rv != 0 and self.params.runs > 1:
+ res_data["msg"] = "At least one of the Netperf runs failed, "\
+ "check the logs and result data for more "\
+ "information."
+ logging.info(res_data["msg"])
+ return (False, res_data)
+
+ res_val = False
+ if "max_deviation" in self.params:
+ if self.params.max_deviation["type"] == "percent":
+ percentual_deviation = (rate_deviation / rate) * 100
+ if percentual_deviation > self.params.max_deviation["value"]:
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
+ "deviation than allowed (+-%.2f %%)" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ self._max_deviation["value"])
+ return (res_val, res_data)
+ elif self.params.max_deviation["type"] == "absolute":
+ if rate_deviation > self.params.max_deviation["value"]["rate"]:
+ pretty_deviation = self._pretty_rate(self.params.max_deviation["value"]["rate"])
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
+ "deviation than allowed (+-%.2f %s)" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ pretty_deviation["rate"],
+ pretty_deviation["unit"])
+ return (res_val, res_data)
+ if "threshold_interval" in self.params:
+ result_interval = (rate - rate_deviation,
+ rate + rate_deviation)
+
+ threshold_pretty = self._pretty_rate(self.params.threshold["rate"])
+ threshold_dev_pretty = self._pretty_rate(self.params.threshold_deviation["rate"],
+ unit = threshold_pretty["unit"])
+
+ if self.params.threshold_interval[0] > result_interval[1]:
+ res_val = False
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
+ "than threshold %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ threshold_pretty["rate"],
+ threshold_dev_pretty["rate"],
+ threshold_pretty["unit"])
+ return (res_val, res_data)
+ else:
+ res_val = True
+ res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
+ "than threshold %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"],
+ threshold_pretty["rate"],
+ threshold_dev_pretty["rate"],
+ threshold_pretty["unit"])
+ return (res_val, res_data)
+ else:
+ if rate > 0.0:
+ res_val = True
+ else:
+ res_val = False
+ res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"])
+ return (res_val, res_data)
+
+ def run(self):
+ cmd = self._compose_cmd()
+ logging.debug("compiled command: %s" % cmd)
+ if not is_installed("netperf"):
+ res_data = {}
+ res_data["msg"] = "Netperf is not installed on this machine!"
+ logging.error(res_data["msg"])
+ self._res_data = res_data
+ return False
+
+ (rv, res_data) = self._run_client(cmd)
+ self._res_data = res_data
+ if rv == False:
+ return False
+ return True
diff --git a/recipes/examples/python_recipe.py b/recipes/examples/python_recipe.py
index a4f4165..da3c872 100755
--- a/recipes/examples/python_recipe.py
+++ b/recipes/examples/python_recipe.py
@@ -11,6 +11,7 @@ from lnst.Controller import BaseRecipe
from lnst.Controller import HostReq, DeviceReq
from lnst.Tests import IcmpPing
+from lnst.Tests.Netperf import Netperf, Netserver
class MyRecipe(BaseRecipe):
m1 = HostReq()
@@ -28,6 +29,21 @@ class MyRecipe(BaseRecipe):
interval=0,
iface=self.matched.m1.eth0))
+ netserver_job = self.matched.m1.run(Netserver(bind=self.matched.m1.eth0),
+ bg=True)
+
+ netperf_job = self.matched.m2.run(Netperf(netperf_server=self.matched.m1.eth0,
+ duration=1,
+ confidence="99,5",
+ runs="5",
+ debug=0,
+ max_deviation="20%"))
+
+ print(netperf_job.result)
+ print(netperf_job.stdout)
+ print(netperf_job._res["res_data"])
+
+
ctl = Controller(debug=1)
r = MyRecipe()
--
2.5.5
5 years, 11 months