class ShortLivedConnectionsRecipe(CommonHWSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) perf_tests = Param(default=("TCP_RR", "TCP_CRR")) ip_versions = Param(default=("ipv4",)) perf_parallel_streams = IntParam(default=2) perf_msg_sizes = ListParam(default=[1000, 5000, 7000, 10000, 12000]) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.eth0, host2.eth0] net_addr = "192.168.101" for i, host in enumerate([host1, host2], 10): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + "." + str(i) + "/24")) host.eth0.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format( dev.host.hostid, dev.name, dev.ips ) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_perf_endpoints(self, config): return [(self.matched.host1.eth0, self.matched.host2.eth0)] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class MyRecipe(BaseRecipe): m1 = HostReq() m1.eth0 = DeviceReq(label="net1") m2 = HostReq() m2.eth0 = DeviceReq(label="net1") def test(self): self.matched.m1.eth0.ip_add(IpAddress("192.168.1.1/24")) self.matched.m2.eth0.ip_add(IpAddress("192.168.1.2/24")) ping_job = self.matched.m1.run( IcmpPing(dst=self.matched.m2.eth0, interval=0, iface=self.matched.m1.eth0))
class HelloWorldRecipe(BaseRecipe): machine1 = HostReq() machine1.nic1 = DeviceReq(label="net1") machine2 = HostReq() machine2.nic1 = DeviceReq(label="net1") def test(self): self.matched.machine1.nic1.ip_add("192.168.1.1/24") self.matched.machine1.nic1.up() self.matched.machine2.nic1.ip_add("192.168.1.2/24") self.matched.machine2.nic1.up() self.matched.machine1.run("ping 192.168.1.2 -c 5") self.matched.machine2.run("ping 192.168.1.1 -c 5")
class PingFloodRecipe(PingTestAndEvaluate): driver = StrParam(default='ixgbe') host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) src_addr = StrParam(default="192.168.1.1/24") dst_addr = StrParam(default="192.168.1.2/24") count = IntParam(default=100) interval = StrParam(default=0.2) size = IntParam(mandatory=False) mtu = IntParam(mandatory=False) def test(self): host1, host2 = self.matched.host1, self.matched.host2 host1.eth0.ip_add(ipaddress(self.params.src_addr)) host2.eth0.ip_add(ipaddress(self.params.dst_addr)) if "mtu" in self.params: host1.eth0.mtu = self.params.mtu host2.eth0.mtu = self.params.mtu host1.eth0.up() host2.eth0.up() ip1 = host1.eth0.ips[0] ip2 = host2.eth0.ips[0] cn = self.params.count iv = self.params.interval if "size" in self.params: sz = self.params.size else: sz = None pcfg = PingConf(host1, ip1, host2, ip2, count=cn, interval=iv, size=sz) result = self.ping_test([pcfg]) self.ping_evaluate_and_report(pcfg, result)
class MyRecipe(BaseRecipe): m1 = HostReq() m1.eth0 = DeviceReq(label="net1") m2 = HostReq() m2.eth0 = DeviceReq(label="net1") def test(self): self.matched.m1.eth0.ip_add(ipaddress("192.168.1.1/24")) self.matched.m1.eth0.up() self.matched.m2.eth0.ip_add(ipaddress("192.168.1.2/24")) self.matched.m2.eth0.up() ping_job = self.matched.m1.run( Ping(dst=self.matched.m2.eth0, interval=0, iface=self.matched.m1.eth0)) netserver_job = self.matched.m1.run( Netserver(bind=self.matched.m1.eth0), bg=True) netperf_job = self.matched.m2.run( Netperf(server=self.matched.m1.eth0, duration=1, confidence="99,5", runs="5", debug=0, max_deviation={ 'type': "percent", 'value': 20.0 }, testname="TCP_STREAM")) netserver_job.kill(signal=signal.SIGINT) #examples of how to create soft devices self.matched.m1.eth0.down() m1 = self.matched.m1 eth0 = m1.eth0 #Bonding m1.bond = BondDevice(mode="active-backup", name="my_bond0") m1.bond.slave_add(eth0) m1.bond.up() m1.run("ip a") m1.bond.destroy() #Bridging m1.br = BridgeDevice() m1.br.slave_add(eth0) m1.br.up() m1.run("ip a") m1.br.destroy() #Teaming m1.team = TeamDevice() m1.team.slave_add(eth0) m1.team.up() m1.run("ip a") m1.team.destroy() #VethPair m1.veth0, m1.veth1 = VethPair() m1.veth0.up() m1.veth1.up() m1.run("ip a") m1.veth0.destroy() #Macvlan m1.mvlan = MacvlanDevice(realdev=eth0) m1.mvlan.up() m1.run("ip a") m1.mvlan.destroy() #Vlan eth0.up() m1.vlan = VlanDevice(realdev=eth0, vlan_id=123) m1.vlan.up() m1.run("ip a") m1.vlan.destroy() eth0.down() #Vti m1.vti = VtiDevice(local="1.2.3.4", ikey=123, okey=321) m1.vti.up() m1.run("ip a") m1.vti.destroy() #Vxlan m1.vxlan0 = VxlanDevice(vxlan_id=123, remote='1.2.3.4') m1.vxlan0.up() self.matched.m1.run("ip a") m1.vxlan0.destroy()
class VhostNetPvPRecipe(BasePvPRecipe): generator_req = HostReq() generator_req.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) generator_req.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) host_req = HostReq(with_guest="yes") host_req.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host_req.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) host1_dpdk_cores = StrParam(mandatory=True) vhost_cpus = StrParam( mandatory=True) # The CPUs used by vhost-net kernel threads # TODO: Study the possibility of adding more forwarding engines # like xdp or tc guest_fwd = StrParam(default='bridge') host_fwd = StrParam(default='bridge') guest_macs = Param(default=['02:fa:fe:fa:fe:01', '02:fa:fe:fa:fe:02']) generator_dpdk_cores = StrParam(mandatory=True) cpu_perf_tool = Param(default=StatCPUMeasurement) def test(self): self.check_params() self.warmup(self.gen_ping_config()) config = VhostPvPTestConf() self.pvp_test(config) def check_params(self): # Check emulatorpin range contains vhost cores emulator_min, emulator_max = self.params.guest_emulatorpin_cpu.split( '-') vhost_cpus = self.params.vhost_cpus.split(',') for vcpu in vhost_cpus: if vcpu > emulator_max or vcpu < emulator_min: raise ParamError("Emulator pin must contain vhost cpus") def gen_ping_config(self): return [(self.matched.generator_req, self.matched.generator_req.eth0, self.matched.host_req.eth0), (self.matched.generator_req, self.matched.generator_req.eth1, self.matched.host_req.eth1), (self.matched.host_req, self.matched.host_req.eth0, self.matched.generator_req.eth0), (self.matched.host_req, self.matched.host_req.eth1, self.matched.host_req.eth1)] def test_wide_configuration(self, config): config.generator.host = self.matched.generator_req config.generator.nics.append(self.matched.generator_req.eth0) config.generator.nics.append(self.matched.generator_req.eth1) self.matched.generator_req.eth0.ip_add(ipaddress("192.168.1.1/24")) self.matched.generator_req.eth1.ip_add(ipaddress("192.168.1.2/24")) self.matched.generator_req.eth0.up() self.matched.generator_req.eth1.up() self.base_dpdk_configuration(config.generator) config.dut.host = self.matched.host_req config.dut.nics.append(self.matched.host_req.eth0) config.dut.nics.append(self.matched.host_req.eth1) self.matched.host_req.eth0.up() self.matched.host_req.eth1.up() self.host_forwarding_configuration(config.dut) self.init_guest_virtctl(config.dut, config.guest) self.shutdown_guest(config.guest) self.configure_guest_xml(config.dut, config.guest) self.create_guest(config.dut, config.guest) self.guest_forwarding(config.guest) self.host_forwarding_vm_configuration(config.dut, config.guest) return config def generate_perf_config(self, config): flows = [] for i in range(0, min(len(config.generator.nics), len(config.guest.nics))): src_nic = config.generator.nics[i] src_ip = src_nic.ips[0] dst_nic = config.guest.nics[i] dst_ip = config.generator.nics[((i + 1) % len(config.generator.nics))].ips[0] src_bind = dict(mac_addr=src_nic.hwaddr, pci_addr=src_nic.bus_info, ip_addr=src_ip) dst_bind = dict(mac_addr=dst_nic.hwaddr, pci_addr=dst_nic.bus_info, ip_addr=dst_ip) flows.append( PerfFlow(type="pvp_loop_rate", generator=config.generator.host, generator_bind=src_bind, receiver=config.guest.host, receiver_bind=dst_bind, msg_size=self.params.perf_msg_size, duration=self.params.perf_duration, parallel_streams=self.params.perf_streams, cpupin=None)) return PerfRecipeConf( measurements=[ self.params.cpu_perf_tool([ config.generator.host, config.dut.host, config.guest.host ]), TRexFlowMeasurement( flows, self.params.trex_dir, self.params.host1_dpdk_cores.split(","), ), ], iterations=self.params.perf_iterations, ) def test_wide_deconfiguration(self, config): try: self.guest_deconfigure(config.guest) except: log_exc_traceback() try: self.host_forwarding_vm_deconfiguration(config.dut, config.guest) except: log_exc_traceback() try: self.host_forwarding_deconfiguration(config.dut) except: log_exc_traceback() try: self.base_dpdk_deconfiguration(config.generator) except: log_exc_traceback() try: #returning the guest to the original running state self.shutdown_guest(config.guest) if config.guest.virtctl: config.guest.virtctl.vm_start(config.guest.name) except: log_exc_traceback() try: config.generator.host.run("service irqbalance start") except: log_exc_traceback() def host_forwarding_vm_configuration(self, host_conf, guest_conf): """ VM - specific forwarding configuration Pin vhost-net kernel threads to the cpus specfied by vhost_cpus param """ # Get a comma separated list of the vhost-net kernel threads' PIDs vhost_pids = host_conf.host.run( """ ps --ppid 2 | grep "vhost-$(pidof qemu-kvm)" """ """ | awk '{if (length(pidstring) == 0) { """ """ pidstring=$1 """ """ } else { """ """ pidstring = sprintf("%s,%s", pidstring, $1) """ """ }}; """ """ END{ print pidstring }'""") for pid, cpu in zip(vhost_pids.stdout.strip().split(','), self.params.vhost_cpus.split(',')): mask = 1 << int(cpu) host_conf.host.run('taskset -p {:x} {}'.format(mask, pid)) def host_forwarding_vm_deconfiguration(self, host_conf, guest_conf): """ VM - specific forwarding deconfiguration """ pass def host_forwarding_configuration(self, host_conf): if (self.params.host_fwd == 'bridge'): host_conf.bridges = [] host_conf.host.br0 = BridgeDevice() host_conf.host.br1 = BridgeDevice() host_conf.host.br0.slave_add(host_conf.nics[0]) host_conf.host.br1.slave_add(host_conf.nics[1]) host_conf.host.br0.up() host_conf.host.br1.up() host_conf.bridges.append(host_conf.host.br0) host_conf.bridges.append(host_conf.host.br1) else: # TBD return def host_forwarding_deconfiguration(self, host_conf): if (self.params.host_fwd == 'bridge'): if host_conf.host.br0: host_conf.host.br0.slave_del(host_conf.nics[0]) if host_conf.host.br1: host_conf.host.br1.slave_del(host_conf.nics[1]) else: # TBD return def configure_guest_xml(self, host_conf, guest_conf): guest_xml = self.init_guest_xml(guest_conf) virtctl = guest_conf.virtctl guest_xml = ET.fromstring(virtctl.vm_XMLDesc(guest_conf.name)) guest_conf.libvirt_xml = guest_xml guest_conf.virtio_devs = [] for i, nic in enumerate(host_conf.nics): self._xml_add_vhostnet_dev(guest_xml, "vhostnet-{i}".format(i=i), host_conf.bridges[i], self.params.guest_macs[i]) vhost_device = VirtioDevice( VirtioType.VHOST_NET, self.params.guest_macs[i], config={"bridge": host_conf.bridges[i]}) guest_conf.virtio_devs.append(vhost_device) return guest_xml def guest_forwarding(self, guest_conf): guest = guest_conf.host if (self.params.guest_fwd == 'bridge'): guest.bridge = BridgeDevice() guest.bridge.name = 'guestbr0' for nic in guest_conf.nics: guest.bridge.slave_add(nic) nic.up() guest.run("echo 1 > /proc/sys/net/ipv4/ip_forward") def guest_deconfigure(self, guest_conf): if guest_conf.host: guest_conf.host.run("echo 0 > /proc/sys/net/ipv4/ip_forward") def _xml_add_vhostnet_dev(self, guest_xml, name, bridge, mac_addr): devices = guest_xml.find("devices") interface = ET.SubElement(devices, 'interface', type='bridge') ET.SubElement(interface, 'source', bridge=str(bridge.name)) ET.SubElement(interface, 'mac', address=str(mac_addr)) ET.SubElement(interface, 'model', type='virtio') ET.SubElement(interface, 'driver', name='vhost') # TODO: Add driver suboptions return guest_xml
class VirtOvsVxlanRecipe(VlanPingEvaluatorMixin, CommonHWSubConfigMixin, VirtualEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host1.tap0 = DeviceReq(label="to_guest1") host1.tap1 = DeviceReq(label="to_guest2") host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2.tap0 = DeviceReq(label="to_guest3") host2.tap1 = DeviceReq(label="to_guest4") guest1 = HostReq() guest1.eth0 = DeviceReq(label="to_guest1") guest2 = HostReq() guest2.eth0 = DeviceReq(label="to_guest2") guest3 = HostReq() guest3.eth0 = DeviceReq(label="to_guest3") guest4 = HostReq() guest4.eth0 = DeviceReq(label="to_guest4") def test_wide_configuration(self): host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) for host in [host1, host2]: host.eth0.down() host.tap0.down() host.tap1.down() for guest in [guest1, guest2, guest3, guest4]: guest.eth0.down() net_addr = "192.168.2" vxlan_net_addr = "192.168.100" vxlan_net_addr6 = "fc00:0:0:0" flow_entries = [] flow_entries.append("table=0,in_port=5,actions=set_field:100->" "tun_id,output:10") flow_entries.append("table=0,in_port=6,actions=set_field:200->" "tun_id,output:10") flow_entries.append("table=0,in_port=10,tun_id=100,actions=" "output:5") flow_entries.append("table=0,in_port=10,tun_id=200,actions=" "output:6") flow_entries.append("table=0,priority=100,actions=drop") configuration = super().test_wide_configuration() configuration.test_wide_devices = [ host1.eth0, host2.eth0, guest1.eth0, guest2.eth0, guest3.eth0, guest4.eth0 ] for i, host in enumerate([host1, host2]): host.eth0.ip_add(ipaddress(net_addr + "." + str(i + 1) + "/24")) host.br0 = OvsBridgeDevice() for dev, ofport_r in [(host.tap0, '5'), (host.tap1, '6')]: host.br0.port_add( device=dev, interface_options={'ofport_request': ofport_r}) tunnel_opts = { "option:remote_ip": net_addr + "." + str(2 - i), "option:key": "flow", "ofport_request": '10' } host.br0.tunnel_add("vxlan", tunnel_opts) host.br0.flows_add(flow_entries) for dev in [host.eth0, host.tap0, host.tap1, host.br0]: dev.up() for i, guest in enumerate([guest1, guest2, guest3, guest4]): guest.eth0.ip_add( ipaddress(vxlan_net_addr + "." + str(i + 1) + "/24")) guest.eth0.ip_add( ipaddress(vxlan_net_addr6 + "::" + str(i + 1) + "/64")) guest.eth0.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.ports = {}".format(dev.host.hostid, dev.name, dev.ports) for dev in [host1.br0, host2.br0] ]), "\n".join([ "Configured {}.{}.tunnels = {}".format(dev.host.hostid, dev.name, dev.tunnels) for dev in [host1.br0, host2.br0] ]), "\n".join([ "Configured {}.{}.flows = {}".format(dev.host.hostid, dev.name, dev.flows_str) for dev in [host1.br0, host2.br0] ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): guest1, guest2, guest3, guest4 = (self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) devs = [guest1.eth0, guest2.eth0, guest3.eth0, guest4.eth0] dev_combinations = combinations(devs, 2) return [ PingEndpoints(comb[0], comb[1], reachable=((comb[0].host, comb[1].host) in [(guest1, guest3), (guest2, guest4)])) for comb in dev_combinations ] def generate_perf_endpoints(self, config): return [(self.matched.guest1.eth0, self.matched.guest3.eth0)] @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): return [ self.matched.guest1.eth0, self.matched.guest2.eth0, self.matched.guest3.eth0, self.matched.guest4.eth0 ] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VirtualOvsBridgeVlanInGuestMirroredRecipe(CommonHWSubConfigMixin, OffloadSubConfigMixin, VirtualEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host1.tap0 = DeviceReq(label="to_guest1") host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2.tap0 = DeviceReq(label="to_guest2") guest1 = HostReq() guest1.eth0 = DeviceReq(label="to_guest1") guest2 = HostReq() guest2.eth0 = DeviceReq(label="to_guest2") offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on", rx="on"), dict(gro="off", gso="on", tso="on", tx="on", rx="on"), dict(gro="on", gso="off", tso="off", tx="on", rx="on"), dict(gro="on", gso="on", tso="off", tx="off", rx="on"), dict(gro="on", gso="on", tso="on", tx="on", rx="off"))) def test_wide_configuration(self): host1, host2, guest1, guest2 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2) for host in [host1, host2]: host.eth0.down() host.tap0.down() host.br0 = OvsBridgeDevice() for dev in [host.eth0, host.tap0]: host.br0.port_add(device=dev) guest1.eth0.down() guest2.eth0.down() guest1.vlan0 = VlanDevice(realdev=guest1.eth0, vlan_id=10) guest2.vlan0 = VlanDevice(realdev=guest2.eth0, vlan_id=10) configuration = super().test_wide_configuration() configuration.test_wide_devices = [guest1.vlan0, guest2.vlan0] net_addr_1 = "192.168.10" net_addr6_1 = "fc00:0:0:1" for i, guest in enumerate([guest1, guest2]): guest.vlan0.ip_add(ipaddress(net_addr_1 + "." + str(i + 3) + "/24")) guest.vlan0.ip_add( ipaddress(net_addr6_1 + "::" + str(i + 3) + "/64")) for host in [host1, host2]: for dev in [host.eth0, host.tap0, host.br0]: dev.up() for guest in [guest1, guest2]: guest.eth0.up() guest.vlan0.up() if "perf_tool_cpu" in self.params: logging.info("'perf_tool_cpu' param (%d) to be set to None" % self.params.perf_tool_cpu) self.params.perf_tool_cpu = None self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format(dev.host.hostid, dev.name, dev.vlan_id) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.ports = {}".format(dev.host.hostid, dev.name, dev.ports) for dev in [host1.br0, host2.br0] ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [ PingEndpoints(self.matched.guest1.vlan0, self.matched.guest2.vlan0) ] def generate_perf_endpoints(self, config): return [(self.matched.guest1.vlan0, self.matched.guest2.vlan0)] @property def offload_nics(self): return [ self.matched.host1.eth0, self.matched.host2.eth0, self.matched.guest1.eth0, self.matched.guest2.eth0 ] @property def mtu_hw_config_dev_list(self): host1, host2, guest1, guest2 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2) result = [] for host in [host1, host2]: for dev in [host.eth0, host.tap0, host.br0]: result.append(dev) for guest in [guest1, guest2]: result.extend([guest.eth0, guest.vlan0]) return result @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class DoubleTeamRecipe(CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host2.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver")) offload_combinations = Param(default=( dict(gro="on", gso="on", tso="on", tx="on"), dict(gro="off", gso="on", tso="on", tx="on"), dict(gro="on", gso="off", tso="off", tx="on"), dict(gro="on", gso="on", tso="off", tx="off"))) perf_reverse = BoolParam(default=True) runner_name = StrParam(mandatory=True) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 net_addr_1 = "192.168.10" net_addr6_1 = "fc00:0:0:1" for i, host in enumerate([host1, host2]): #The config argument needs to be used with a team device #normally (e.g to specify the runner mode), but it is not used #here due to a bug in the TeamDevice module host.team0 = TeamDevice() for dev in [host.eth0, host.eth1]: dev.down() host.team0.slave_add(dev) host.team0.ip_add(ipaddress(net_addr_1 + "." + str(i+1) + "/24")) host.team0.ip_add(ipaddress(net_addr6_1 + "::" + str(i+1) + "/64")) for dev in [host.eth0, host.eth1, host.team0]: dev.up() configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.team0, host2.team0] self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format( dev.host.hostid, dev.name, dev.ips ) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.slaves = {}".format( dev.host.hostid, dev.name, ['.'.join([dev.host.hostid, slave.name]) for slave in dev.slaves] ) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.runner_name = {}".format( dev.host.hostid, dev.name, dev.config ) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [(self.matched.host1.team0, self.matched.host2.team0), (self.matched.host2.team0, self.matched.host1.team0)] def generate_perf_endpoints(self, config): return [(self.matched.host1.team0, self.matched.host2.team0), (self.matched.host2.team0, self.matched.host1.team0)] def wait_tentative_ips(self, devices): def condition(): return all( [not ip.is_tentative for dev in devices for ip in dev.ips] ) self.ctl.wait_for_condition(condition, timeout=5) @property def offload_nics(self): return [self.matched.host1.team0, self.matched.host2.team0] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.team0, self.matched.host2.team0] @property def coalescing_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1] @property def dev_interrupt_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1] @property def parallel_stream_qdisc_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1]
class OvSDPDKPvPRecipe(BasePvPRecipe): m1 = HostReq() m1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) m1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) m2 = HostReq(with_guest="yes") m2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) m2.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) guest_dpdk_cores = StrParam(mandatory=True) guest_testpmd_cores = StrParam(mandatory=True) host2_pmd_cores = StrParam(mandatory=True) host2_l_cores = StrParam(mandatory=True) socket_mem = IntParam(default=2048) cpu_perf_tool = Param(default=StatCPUMeasurement) perf_duration = IntParam(default=60) perf_iterations = IntParam(default=5) perf_msg_size = IntParam(default=64) #doesn't do anything for now... perf_streams = IntParam(default=1) def test(self): self.check_dependencies() ping_config = self.gen_ping_config() self.warmup(ping_config) config = OVSPvPTestConf() self.pvp_test(config) def check_dependencies(self): pass def gen_ping_config(self): return [(self.matched.m1, self.matched.m1.eth0, self.matched.m2.eth0), (self.matched.m1, self.matched.m1.eth1, self.matched.m2.eth1), (self.matched.m2, self.matched.m2.eth0, self.matched.m1.eth0), (self.matched.m2, self.matched.m2.eth1, self.matched.m2.eth1)] def test_wide_configuration(self, config): config.generator.host = self.matched.m1 config.generator.nics.append(self.matched.m1.eth0) config.generator.nics.append(self.matched.m1.eth1) self.matched.m1.eth0.ip_add(ipaddress("192.168.1.1/24")) self.matched.m1.eth1.ip_add(ipaddress("192.168.1.3/24")) self.base_dpdk_configuration(config.generator) config.dut.host = self.matched.m2 config.dut.nics.append(self.matched.m2.eth0) config.dut.nics.append(self.matched.m2.eth1) self.matched.m2.eth0.ip_add(ipaddress("192.168.1.2/24")) self.matched.m2.eth1.ip_add(ipaddress("192.168.1.4/24")) self.base_dpdk_configuration(config.dut) self.ovs_dpdk_bridge_configuration(config.dut) self.init_guest_virtctl(config.dut, config.guest) self.shutdown_guest(config.guest) self.configure_guest_xml(config.dut, config.guest) self.ovs_dpdk_bridge_vm_configuration(config.dut, config.guest) self.ovs_dpdk_bridge_flow_configuration(config.dut) guest = self.create_guest(config.dut, config.guest) self.guest_vfio_modprobe(config.guest) self.base_dpdk_configuration(config.guest) config.guest.testpmd = guest.run(TestPMD( coremask=self.params.guest_testpmd_cores, pmd_coremask=self.params.guest_dpdk_cores, nics=[nic.bus_info for nic in config.guest.nics], peer_macs=[nic.hwaddr for nic in config.generator.nics]), bg=True) time.sleep(5) return config def generate_perf_config(self, config): flows = [] for src_nic, dst_nic in zip(config.generator.nics, config.dut.nics): src_bind = dict(mac_addr=src_nic.hwaddr, pci_addr=src_nic.bus_info, ip_addr=src_nic.ips[0]) dst_bind = dict(mac_addr=dst_nic.hwaddr, pci_addr=dst_nic.bus_info, ip_addr=dst_nic.ips[0]) flows.append( PerfFlow(type="pvp_loop_rate", generator=config.generator.host, generator_bind=src_bind, receiver=config.dut.host, receiver_bind=dst_bind, msg_size=self.params.perf_msg_size, duration=self.params.perf_duration, parallel_streams=self.params.perf_streams, cpupin=None)) return PerfRecipeConf(measurements=[ self.params.cpu_perf_tool( [config.generator.host, config.dut.host, config.guest.host]), TRexFlowMeasurement(flows, self.params.trex_dir) ], iterations=self.params.perf_iterations) def test_wide_deconfiguration(self, config): try: self.guest_deconfigure(config.guest) except: log_exc_traceback() try: config.dut.host.run("ovs-ofctl del-flows br0") for vm_port, port_id in config.dut.vm_ports: config.dut.host.run( "ovs-vsctl del-port br0 {}".format(vm_port)) for dpdk_port, port_id in config.dut.dpdk_ports: config.dut.host.run( "ovs-vsctl del-port br0 {}".format(dpdk_port)) config.dut.host.run("ovs-vsctl del-br br0") config.dut.host.run("service openvswitch restart") self.base_dpdk_deconfiguration(config.dut, ["openvswitch"]) except: log_exc_traceback() try: # returning the guest to the original running state self.shutdown_guest(config.guest) config.guest.virtctl.vm_start(config.guest.name) except: log_exc_traceback() try: for nic in config.generator.nics: config.generator.host.run("driverctl unset-override {}".format( nic.bus_info)) config.generator.host.run("service irqbalance start") except: log_exc_traceback() def ovs_dpdk_bridge_configuration(self, host_conf): host = host_conf.host host.run("systemctl enable openvswitch") host.run("systemctl start openvswitch") host.run( "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true" ) host.run( "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem={}" .format(self.params.socket_mem)) host.run( "ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask={}" .format(self.params.host2_pmd_cores)) host.run( "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask={}" .format(self.params.host2_l_cores)) host.run("systemctl restart openvswitch") # TODO use an actual OvS Device object # TODO config.dut.nics.append(CachedRemoteDevice(m2.ovs)) host.run("ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev") host_conf.dpdk_ports = [] for i, nic in enumerate(host_conf.nics): host.run("ovs-vsctl add-port br0 dpdk{i} -- " "set interface dpdk{i} type=dpdk ofport_request=1{i} " "options:dpdk-devargs={pci_addr}".format( i=i, pci_addr=nic.bus_info)) host_conf.dpdk_ports.append(("dpdk{}".format(i), "1{}".format(i))) def configure_guest_xml(self, host_conf, guest_conf): # Initialize guest XML guest_xml = self.init_guest_xml(guest_conf) guest_conf.virtio_devs = [] for i, nic in enumerate(host_conf.nics): path = self._xml_add_vhostuser_dev(guest_xml, "vhost_nic{i}".format(i=i), nic.hwaddr) virtio_dev = VirtioDevice(VirtioType.VHOST_USER, str(nic.hwaddr), config={"path": path}) guest_conf.virtio_devs.append(virtio_dev) cpu = guest_xml.find("cpu") numa = ET.SubElement(cpu, 'numa') ET.SubElement(numa, 'cell', id='0', cpus='0', memory=str(self.params.guest_mem_size), unit='KiB', memAccess='shared') memoryBacking = ET.SubElement(guest_xml, "memoryBacking") hugepages = ET.SubElement(memoryBacking, "hugepages") ET.SubElement(hugepages, "page", size="2", unit="M", nodeset="0") return guest_xml def ovs_dpdk_bridge_vm_configuration(self, host_conf, guest_conf): host = host_conf.host host_conf.vm_ports = [] for i, vhuser_nic in enumerate(guest_conf.virtio_devs): host.run("ovs-vsctl add-port br0 guest_nic{i} -- " "set interface guest_nic{i} type=dpdkvhostuserclient " "ofport_request=2{i} " "options:vhost-server-path={path}".format( i=i, path=vhuser_nic.config.get("path"))) host_conf.vm_ports.append( ("guest_nic{}".format(i), "2{}".format(i))) def ovs_dpdk_bridge_flow_configuration(self, host_conf): host = host_conf.host host.run("ovs-ofctl del-flows br0") for dpdk_port, vm_port in zip(host_conf.dpdk_ports, host_conf.vm_ports): host.run("ovs-ofctl add-flow br0 in_port={},action={}".format( dpdk_port[1], vm_port[1])) host.run("ovs-ofctl add-flow br0 in_port={},action={}".format( vm_port[1], dpdk_port[1])) def guest_vfio_modprobe(self, guest_conf): guest = guest_conf.host guest.run("modprobe -r vfio_iommu_type1") guest.run("modprobe -r vfio") guest.run("modprobe vfio enable_unsafe_noiommu_mode=1") guest.run("modprobe vfio-pci") def guest_deconfigure(self, guest_conf): guest = guest_conf.host if not guest: return testpmd = guest_conf.testpmd if testpmd: testpmd.kill(signal.SIGINT) testpmd.wait() self.base_dpdk_deconfiguration(guest_conf) def _xml_add_vhostuser_dev(self, guest_xml, name, mac_addr): vhost_server_path = "/tmp/{}".format(name) devices = guest_xml.find("devices") interface = ET.SubElement(devices, 'interface', type='vhostuser') ET.SubElement(interface, 'mac', address=str(mac_addr)) ET.SubElement(interface, 'model', type='virtio') ET.SubElement(interface, 'source', type='unix', path=vhost_server_path, mode='server') return vhost_server_path
class SimpleMacsecRecipe(CommonHWSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) macsec_settings = [None, 'on', 'off'] ids = ['00', '01'] keys = [ "7a16780284000775d4f0a3c0f0e092c0", "3212ef5c4cc5d0e4210b17208e88779e" ] def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.eth0, host2.eth0] net_addr = "192.168.0" for i, host in enumerate([host1, host2]): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + '.' + str(i + 1) + "/24")) self.wait_tentative_ips(configuration.test_wide_devices) if (self.params.ping_parallel or self.params.ping_bidirect or self.params.perf_reverse): logging.debug("Parallel pings or reverse perf tests are " "not supported for this recipe, ping_parallel" "/ping_bidirect/perf_reverse will be ignored.") configuration.endpoint1 = host1.eth0 configuration.endpoint2 = host2.eth0 configuration.host1 = host1 configuration.host2 = host2 return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def wait_tentative_ips(self, devices): def condition(): return all( [not ip.is_tentative for dev in devices for ip in dev.ips]) self.ctl.wait_for_condition(condition, timeout=5) def generate_sub_configurations(self, config): for subconf in ConfMixin.generate_sub_configurations(self, config): for encryption in self.macsec_settings: new_config = copy.copy(subconf) new_config.encrypt = encryption if encryption is not None: new_config.ip_vers = self.params.ip_versions yield new_config def apply_sub_configuration(self, config): if not config.encrypt: config.endpoint1.up() config.endpoint2.up() else: net_addr = "192.168.100" net_addr6 = "fc00:0:0:0" host1, host2 = config.host1, config.host2 k_ids = list(zip(self.ids, self.keys)) hosts_and_keys = [(host1, host2, k_ids), (host2, host1, k_ids[::-1])] for host_a, host_b, k_ids in hosts_and_keys: host_a.msec0 = MacsecDevice(realdev=host_a.eth0, encrypt=config.encrypt) rx_kwargs = dict(port=1, address=host_b.eth0.hwaddr) tx_sa_kwargs = dict(sa=0, pn=1, enable='on', id=k_ids[0][0], key=k_ids[0][1]) rx_sa_kwargs = rx_kwargs.copy() rx_sa_kwargs.update(tx_sa_kwargs) rx_sa_kwargs['id'] = k_ids[1][0] rx_sa_kwargs['key'] = k_ids[1][1] host_a.msec0.rx('add', **rx_kwargs) host_a.msec0.tx_sa('add', **tx_sa_kwargs) host_a.msec0.rx_sa('add', **rx_sa_kwargs) for i, host in enumerate([host1, host2]): host.msec0.ip_add( ipaddress(net_addr + "." + str(i + 1) + "/24")) host.msec0.ip_add( ipaddress(net_addr6 + "::" + str(i + 1) + "/64")) host.eth0.up() host.msec0.up() def remove_sub_configuration(self, config): if config.encrypt: host1, host2 = config.host1, config.host2 for host in (host1, host2): host.msec0.destroy() del host.msec0 config.endpoint1.down() config.endpoint2.down() def generate_ping_configurations(self, config): if not config.encrypt: client_nic = config.endpoint1 server_nic = config.endpoint2 ip_vers = ('ipv4', ) else: client_nic = config.host1.msec0 server_nic = config.host2.msec0 ip_vers = self.params.ip_versions count = self.params.ping_count interval = self.params.ping_interval size = self.params.ping_psize common_args = {'count': count, 'interval': interval, 'size': size} for ipv in ip_vers: kwargs = {} if ipv == "ipv4": kwargs.update(family=AF_INET) elif ipv == "ipv6": kwargs.update(family=AF_INET6) kwargs.update(is_link_local=False) client_ips = client_nic.ips_filter(**kwargs) server_ips = server_nic.ips_filter(**kwargs) if ipv == "ipv6": client_ips = client_ips[::-1] server_ips = server_ips[::-1] if len(client_ips) != len(server_ips) or (len(client_ips) * len(server_ips) == 0): raise LnstError("Source/destination ip lists are of " "different size or empty.") for src_addr, dst_addr in zip(client_ips, server_ips): pconf = PingConf(client=client_nic.netns, client_bind=src_addr, destination=server_nic.netns, destination_address=dst_addr, **common_args) yield [pconf] def generate_perf_configurations(self, config): if config.encrypt: client_nic = config.host1.msec0 server_nic = config.host2.msec0 client_netns = client_nic.netns server_netns = server_nic.netns flow_combinations = self.generate_flow_combinations(config) for flows in flow_combinations: perf_recipe_conf = dict( recipe_config=config, flows=flows, ) flows_measurement = self.params.net_perf_tool( flows, perf_recipe_conf) cpu_measurement = self.params.cpu_perf_tool( [client_netns, server_netns], perf_recipe_conf, ) perf_conf = PerfRecipeConf( measurements=[cpu_measurement, flows_measurement], iterations=self.params.perf_iterations, ) perf_conf.register_evaluators(cpu_measurement, self.cpu_perf_evaluators) perf_conf.register_evaluators(flows_measurement, self.net_perf_evaluators) yield perf_conf def generate_flow_combinations(self, config): client_nic = config.host1.msec0 server_nic = config.host2.msec0 client_netns = client_nic.netns server_netns = server_nic.netns for ipv in self.params.ip_versions: if ipv == "ipv4": family = AF_INET elif ipv == "ipv6": family = AF_INET6 client_bind = client_nic.ips_filter(family=family)[0] server_bind = server_nic.ips_filter(family=family)[0] for perf_test in self.params.perf_tests: for size in self.params.perf_msg_sizes: pstreams = self.params.perf_parallel_streams flow = PerfFlow(type=perf_test, generator=client_netns, generator_bind=client_bind, receiver=server_netns, receiver_bind=server_bind, msg_size=size, duration=self.params.perf_duration, parallel_streams=pstreams, cpupin=self.params.perf_tool_cpu if ("perf_tool_cpu" in self.params) else None) yield [flow] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VlansRecipe(CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on", rx="on"), dict(gro="off", gso="on", tso="on", tx="on", rx="on"), dict(gro="on", gso="off", tso="off", tx="on", rx="on"), dict(gro="on", gso="on", tso="off", tx="off", rx="on"), dict(gro="on", gso="on", tso="on", tx="on", rx="off"))) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 host1.eth0.down() host2.eth0.down() host1.vlan0 = VlanDevice(realdev=host1.eth0, vlan_id=10) host1.vlan1 = VlanDevice(realdev=host1.eth0, vlan_id=20) host1.vlan2 = VlanDevice(realdev=host1.eth0, vlan_id=30) host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=10) host2.vlan1 = VlanDevice(realdev=host2.eth0, vlan_id=20) host2.vlan2 = VlanDevice(realdev=host2.eth0, vlan_id=30) configuration = super().test_wide_configuration() configuration.test_wide_devices = [] for host in [host1, host2]: configuration.test_wide_devices.extend( [host.vlan0, host.vlan1, host.vlan2]) net_addr = "192.168" net_addr6 = "fc00:0:0" for i, host in enumerate([host1, host2]): host.vlan0.ip_add( ipaddress(net_addr + '.10' + '.' + str(i + 1) + "/24")) host.vlan0.ip_add( ipaddress(net_addr6 + ":1::" + str(i + 1) + "/64")) host.vlan1.ip_add( ipaddress(net_addr + '.20' + '.' + str(i + 1) + "/24")) host.vlan1.ip_add( ipaddress(net_addr6 + ":2::" + str(i + 1) + "/64")) host.vlan2.ip_add( ipaddress(net_addr + '.30' + '.' + str(i + 1) + "/24")) host.vlan2.ip_add( ipaddress(net_addr6 + ":3::" + str(i + 1) + "/64")) for dev in [host.eth0, host.vlan0, host.vlan1, host.vlan2]: dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format(dev.host.hostid, dev.name, dev.vlan_id) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): host1, host2 = self.matched.host1, self.matched.host2 result = [] for src in [host1.vlan0, host1.vlan1, host1.vlan2]: for dst in [host2.vlan0, host2.vlan1, host2.vlan2]: result += [(src, dst)] return result def generate_perf_endpoints(self, config): return [(self.matched.host1.vlan0, self.matched.host2.vlan0)] def wait_tentative_ips(self, devices): def condition(): return all( [not ip.is_tentative for dev in devices for ip in dev.ips]) self.ctl.wait_for_condition(condition, timeout=5) @property def offload_nics(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): result = [] for host in [self.matched.host1, self.matched.host2]: for dev in [host.eth0, host.vlan0, host.vlan1, host.vlan2]: result.append(dev) return result @property def coalescing_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] def do_ping_tests(self, recipe_config): for ping_config in self.generate_ping_configurations(recipe_config): exp_fail = [] for pconf in ping_config: cond = self.vlan_id_same(pconf.client_bind, pconf.destination_address) exp_fail.append(cond) result = self.ping_test(ping_config, exp_fail) self.ping_evaluate_and_report(ping_config, result) def ping_test(self, ping_config, exp_fail): results = {} running_ping_array = [] for pingconf, fail in zip(ping_config, exp_fail): ping, client = self.ping_init(pingconf) running_ping = client.prepare_job(ping, fail=fail) running_ping.start(bg=True) running_ping_array.append((pingconf, running_ping)) for _, pingjob in running_ping_array: try: pingjob.wait() finally: pingjob.kill() for pingconf, pingjob in running_ping_array: result = pingjob.result passed = pingjob.passed results[pingconf] = (result, passed) return results def single_ping_evaluate_and_report(self, ping_config, result): fmt = "From: <{0.client.hostid} ({0.client_bind})> To: " \ "<{0.destination.hostid} ({0.destination_address})>" description = fmt.format(ping_config) if result[0].get("rate", 0) > 50: message = "Ping successful --- " + description self.add_result(result[1], message, result[0]) else: message = "Ping unsuccessful --- " + description self.add_result(result[1], message, result[0]) def vlan_id_same(self, src_addr, dst_addr): host1, host2 = self.matched.host1, self.matched.host2 devs = [] for dev in (host1.devices + host2.devices): if src_addr in dev.ips or dst_addr in dev.ips: devs.append(dev) try: return devs[0].vlan_id != devs[1].vlan_id except (IndexError, AttributeError): return False
class VxlanGpeTunnelRecipe( PauseFramesHWConfigMixin, OffloadSubConfigMixin, BaseTunnelRecipe ): """ This class implements a recipe that configures a simple Vxlan GPE tunnel between two hosts. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | | | ----' '--- | | ----' '--- | | vxlan tunnel | | vxlan tunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. The recipe provides additional parameter: :param carrier_ipversion: This parameter specifies whether IPv4 or IPv6 addresses are used for the underlying (carrier) network. The value is either **ipv4** or **ipv6** """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param( default=( dict(gro="on", gso="on", tso="on"), dict(gro="off", gso="on", tso="on"), dict(gro="on", gso="off", tso="off"), dict(gro="on", gso="on", tso="off"), ) ) # TODO: ping over IPv6 does not work yet ip_versions = Param(default=("ipv4",)) # TODO: IPv6 does not work as carrier network carrier_ipversion = ChoiceParam(type=StrParam, choices=set(["ipv4"])) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1, host2 = self.matched.host1, self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): if self.params.carrier_ipversion == "ipv4": device.ip_add(ipaddress("192.168.101." + str(i + 1) + "/24")) else: device.ip_add(ipaddress("fc00::" + str(i + 1) + "/64")) device.up() configuration.test_wide_devices.append(device) self.wait_tentative_ips(configuration.test_wide_devices) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ The Vxlan tunnel devices are created with external flag specified so that the encapsulation can be defined externally by routes. The devices are configured to use VXLAN-GPE. Routes for IPv4 and IPv6 networks to be tunneled through the Vxlan are added. IPv4 and IPv6 addresses of the tunneled networks are configured on the loopback devices of the matched hosts. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] m1_dummy_ip = ipaddress("172.16.10.1/32") m1_dummy_ip6 = ipaddress("fc00:a::1/128") m2_dummy_ip = ipaddress("172.16.20.1/32") m2_dummy_ip6 = ipaddress("fc00:b::1/128") m1.vxlan_tunnel = VxlanDevice(external=True, gpe=True, learning=0) m2.vxlan_tunnel = VxlanDevice(external=True, gpe=True, learning=0) m1.lo = LoopbackDevice() m2.lo = LoopbackDevice() # A m1.lo.ip_add(m1_dummy_ip) m1.lo.ip_add(m1_dummy_ip6) m1.vxlan_tunnel.mtu = 1400 m1.vxlan_tunnel.up() # B m2.lo.ip_add(m2_dummy_ip) m2.lo.ip_add(m2_dummy_ip6) m2.vxlan_tunnel.mtu = 1400 m2.vxlan_tunnel.up() tunnel_id = 1234 encap = "ip" if self.params.carrier_ipversion == "ipv4" else "ip6" m1.run( "ip route add {} encap {} id {} dst {} dev {}".format( m2_dummy_ip, encap, tunnel_id, endpoint2_ip, m1.vxlan_tunnel.name ) ) m2.run( "ip route add {} encap {} id {} dst {} dev {}".format( m1_dummy_ip, encap, tunnel_id, endpoint1_ip, m2.vxlan_tunnel.name ) ) m1.run( "ip route add {} encap {} id {} dst {} dev {}".format( m2_dummy_ip6, encap, tunnel_id, endpoint2_ip, m1.vxlan_tunnel.name ) ) m2.run( "ip route add {} encap {} id {} dst {} dev {}".format( m1_dummy_ip6, encap, tunnel_id, endpoint1_ip, m2.vxlan_tunnel.name ) ) configuration.tunnel_devices.extend([m1.vxlan_tunnel, m2.vxlan_tunnel]) self.wait_tentative_ips([m1.lo, m2.lo]) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are the loopback devices that are configured with IP addresses of the tunnelled networks. Returned as:: [PingEndpoints(self.matched.host1.lo, self.matched.host2.lo)] """ return [PingEndpoints(self.matched.host1.lo, self.matched.host2.lo)] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for source and destination addresses matching the carrier network with udp header bits specific to VXLAN tunneling. The grep patterns match the ICMP or ICMP6 echo requests encapsulated by Vxlan with VXLAN-GPE extension. """ if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} pa_kwargs["p_filter"] = "src {} and dst {}".format(m2_carrier_ip, m1_carrier_ip) if isinstance(ip2, Ip4Address): grep_pattern = "VXLAN-GPE.*IP {} > {}: ICMP echo reply".format(ip2, ip1) elif isinstance(ip2, Ip6Address): grep_pattern = "VXLAN-GPE.*IP6 {} > {}: ICMP6, echo reply".format(ip2, ip1) else: raise Exception("The destination address is nor IPv4 or IPv6 address") pa_kwargs["grep_for"] = [grep_pattern] if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def offload_nics(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class IpIpTunnelRecipe(MTUHWConfigMixin, PauseFramesHWConfigMixin, BaseTunnelRecipe): """ This class implements a recipe that configures a simple IpIp tunnel between two hosts. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | | | ----' '--- | | ----' '--- | | ipip tunnel | | ipip tunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. The recipe provides additional parameter: :param tunnel_mode: this parameter specifies the mode of the IPIP tunnel, can be any of the **any**, **ipip** or **mplsip** """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) tunnel_mode = ChoiceParam(type=StrParam, choices=set(["any", "ipip", "mplsip"]), mandatory=True) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1, host2 = self.matched.host1, self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): device.ip_add(ipaddress("172.16.200." + str(i + 1) + "/16")) device.up() configuration.test_wide_devices.append(device) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ The ipip tunnel devices are configured with IPv4 addresses. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns ip_filter = {"family": AF_INET} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] a_ip4 = ipaddress("192.168.200.1/24") b_ip4 = ipaddress("192.168.200.2/24") m1.ipip_tunnel = IpIpDevice( local=endpoint1_ip, remote=endpoint2_ip, mode=self.params.tunnel_mode, ttl=64, ) m2.ipip_tunnel = IpIpDevice( local=endpoint2_ip, remote=endpoint1_ip, mode=self.params.tunnel_mode, ttl=64, ) # A m1.ipip_tunnel.up() m1.ipip_tunnel.ip_add(a_ip4) # B m2.ipip_tunnel.up() m2.ipip_tunnel.ip_add(b_ip4) configuration.tunnel_devices.extend([m1.ipip_tunnel, m2.ipip_tunnel]) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.ipip_tunnel, self.matched.host2.ipip_tunnel)] """ return [ PingEndpoints(self.matched.host1.ipip_tunnel, self.matched.host2.ipip_tunnel) ] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for ip protocol and grep patterns to match the ICMP echo requests encapsulated by IPIP. """ ip_filter = {"family": AF_INET} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} pa_kwargs["p_filter"] = "ip host {}".format(m1_carrier_ip) # TODO: handle mplsip mode grep_pattern = [ "IP {} > {}: IP {} > {}: ICMP".format(m1_carrier_ip, m2_carrier_ip, ip1, ip2) ] pa_kwargs["grep_for"] = grep_pattern if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.ipip_tunnel, self.matched.host2.ipip_tunnel]
class BondRecipe(PerfReversibleFlowMixin, CommonHWSubConfigMixin, OffloadSubConfigMixin, BaremetalEnrtRecipe): """ This recipe implements Enrt testing for a network scenario that looks as follows .. code-block:: none .--------. .----------------+ | | .-------+ switch +-------. | | '--------' | .-------------------. | | | bond0 | | | | .---'--. .---'--. | .---'--. .----|-| eth0 |-| eth1 |-|----. .----| eth0 |----. | | '------' '------' | | | '------' | | '-------------------' | | | | | | | | host1 | | host2 | '-----------------------------' '----------------' The recipe provides additional recipe parameters to configure the bonding device. :param bonding_mode: (mandatory test parameter) the bonding mode to be configured on the bond0 device. :param miimon_value: (mandatory test parameter) the miimon interval to be configured on the bond0 device. All sub configurations are included via Mixin classes. The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on"), dict(gro="off", gso="on", tso="on", tx="on"), dict(gro="on", gso="off", tso="off", tx="on"), dict(gro="on", gso="on", tso="off", tx="off"))) bonding_mode = StrParam(mandatory=True) miimon_value = IntParam(mandatory=True) def test_wide_configuration(self): """ Test wide configuration for this recipe involves creating a bonding device using the two matched physical devices as slaves on host1. The bonding mode and miimon interval is configured on the bonding device according to the recipe parameters. IPv4 and IPv6 addresses are added to the bonding device and to the matched ethernet device on host2. | host1.bond0 = 192.168.101.1/24 and fc00::1/64 | host2.eth0 = 192.168.101.2/24 and fc00::2/64 """ host1, host2 = self.matched.host1, self.matched.host2 host1.bond0 = BondDevice(mode=self.params.bonding_mode, miimon=self.params.miimon_value) configuration = super().test_wide_configuration() configuration.test_wide_devices = [] for dev in [host1.eth0, host1.eth1]: dev.down() host1.bond0.slave_add(dev) net_addr = "192.168.101" net_addr6 = "fc00:0:0:0" for i, dev in enumerate([host1.bond0, host2.eth0]): dev.ip_add(ipaddress(net_addr + "." + str(i + 1) + "/24")) dev.ip_add(ipaddress(net_addr6 + "::" + str(i + 1) + "/64")) configuration.test_wide_devices.append(dev) for dev in [host1.eth0, host1.eth1, host1.bond0, host2.eth0]: dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): """ Test wide description is extended with the configured IP addresses, the configured bonding slave interfaces, bonding mode and the miimon interval. """ host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "Configured {}.{}.slaves = {}".format( host1.hostid, host1.bond0.name, [ '.'.join([host1.hostid, slave.name]) for slave in host1.bond0.slaves ]), "Configured {}.{}.mode = {}".format(host1.hostid, host1.bond0.name, host1.bond0.mode), "Configured {}.{}.miimon = {}".format(host1.hostid, host1.bond0.name, host1.bond0.miimon) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are the configured bonding device on host1 and the matched ethernet device on host2. Returned as:: [PingEndpoints(self.matched.host1.bond0, self.matched.host2.eth0) """ return [ PingEndpoints(self.matched.host1.bond0, self.matched.host2.eth0) ] def generate_perf_endpoints(self, config): """ The perf endpoints for this recipe are the configured bonding device on host1 and the matched ethernet device on host2. The traffic egresses the bonding device. | host1.bond0 | host2.eth0 Returned as:: [(self.matched.host1.bond0, self.matched.host2.eth0)] """ return [(self.matched.host1.bond0, self.matched.host2.eth0)] @property def offload_nics(self): """ The `offload_nics` property value for this scenario is a list containing the configured bonding device on host1 and the matched ethernet device on host2. | host1.bond0 | host2.eth0 For detailed explanation of this property see :any:`OffloadSubConfigMixin` class and :any:`OffloadSubConfigMixin.offload_nics`. """ return [self.matched.host1.bond0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): """ The `mtu_hw_config_dev_list` property value for this scenario is a list containing the configured bonding device on host1 and the matched ethernet device on host2. | host1.bond0 | host2.eth0 For detailed explanation of this property see :any:`MTUHWConfigMixin` class and :any:`MTUHWConfigMixin.mtu_hw_config_dev_list`. """ return [self.matched.host1.bond0, self.matched.host2.eth0] @property def coalescing_hw_config_dev_list(self): """ The `coalescing_hw_config_dev_list` property value for this scenario is a list containing the matched physical devices used to create the bonding device on host1 and the matched ethernet device on host2. | host1.eth0, host.eth1 | host2.eth0 For detailed explanation of this property see :any:`CoalescingHWConfigMixin` class and :any:`CoalescingHWConfigMixin.coalescing_hw_config_dev_list`. """ return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0 ] @property def dev_interrupt_hw_config_dev_list(self): """ The `dev_interrupt_hw_config_dev_list` property value for this scenario is a list containing the matched physical devices used to create the bonding device on host1 and the matched ethernet device on host2. | host1.eth0, host1.eth1 | host2.eth0 For detailed explanation of this property see :any:`DevInterruptHWConfigMixin` class and :any:`CoalescingHWConfigMixin.coalescing_hw_config_dev_list`. """ return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0 ] @property def parallel_stream_qdisc_hw_config_dev_list(self): """ The `parallel_stream_qdisc_hw_config_dev_list` property value for this scenario is a list containing the matched physical devices used to create the bonding device on host1 and the matched ethernet device on host2. | host1.eth0, host.eth1 | host2.eth0 For detailed explanation of this property see :any:`ParallelStreamQDiscHWConfigMixin` class and :any:`ParallelStreamQDiscHWConfigMixin.parallel_stream_qdisc_hw_config_dev_list`. """ return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0 ] @property def pause_frames_dev_list(self): """ The `pause_frames_dev_list` property value for this scenario is a list containing the matched physical devices used to create the bonding device on host1 and the matched ethernet device on host2. | host1.eth0, host.eth1 | host2.eth0 For detailed explanation of this property see :any:`PauseFramesHWConfigMixin` and :any:`PauseFramesHWConfigMixin.pause_frames_dev_list`. """ return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0 ]
class NoVirtOvsVxlanRecipe(CommonHWSubConfigMixin, BaremetalEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 net_addr = "192.168.2" vxlan_net_addr = "192.168.100" vxlan_net_addr6 = "fc00:0:0:0" flow_entries = [] flow_entries.append("table=0,in_port=5,actions=set_field:100->" "tun_id,output:10") flow_entries.append("table=0,in_port=10,tun_id=100,actions=" "output:5") flow_entries.append("table=0,priority=100,actions=drop") for i, host in enumerate([host1, host2]): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + "." + str(i + 1) + "/24")) host.br0 = OvsBridgeDevice() host.int0 = host.br0.port_add(interface_options={ 'type': 'internal', 'ofport_request': 5, 'name': 'int0' }) host.int0.ip_add( ipaddress(vxlan_net_addr + "." + str(i + 1) + "/24")) host.int0.ip_add( ipaddress(vxlan_net_addr6 + "::" + str(i + 1) + "/64")) tunnel_opts = { "option:remote_ip": net_addr + "." + str(2 - i), "option:key": "flow", "ofport_request": "10" } host.br0.tunnel_add("vxlan", tunnel_opts) host.br0.flows_add(flow_entries) host.eth0.up() host.int0.up() host.br0.up() configuration = super().test_wide_configuration() configuration.test_wide_devices = [ host1.eth0, host1.int0, host2.eth0, host2.int0 ] self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.ports = {}".format(dev.host.hostid, dev.name, dev.ports) for dev in [host1.br0, host2.br0] ]), "\n".join([ "Configured {}.{}.tunnels = {}".format(dev.host.hostid, dev.name, dev.tunnels) for dev in [host1.br0, host2.br0] ]), "\n".join([ "Configured {}.{}.flows = {}".format(dev.host.hostid, dev.name, dev.flows_str) for dev in [host1.br0, host2.br0] ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [ PingEndpoints(self.matched.host1.int0, self.matched.host2.int0) ] def generate_perf_endpoints(self, config): return [(self.matched.host1.int0, self.matched.host2.int0)] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.int0, self.matched.host2.int0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VxlanRemoteRecipe(CommonHWSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 for host in [host1, host2]: host.eth0.down() net_addr = "192.168.0" vxlan_net_addr = "192.168.100" vxlan_net_addr6 = "fc00:0:0:0" for i, host in enumerate([host1, host2]): host.eth0.ip_add(ipaddress(net_addr + "." + str(i + 1) + "/24")) host.vxlan0 = VxlanDevice(vxlan_id='1', remote=net_addr + "." + str(2 - i)) configuration = super().test_wide_configuration() configuration.test_wide_devices = [ host1.eth0, host1.vxlan0, host2.eth0, host2.vxlan0 ] for i, host in enumerate([host1, host2]): host.vxlan0.realdev = host.eth0 host.vxlan0.ip_add( ipaddress(vxlan_net_addr + "." + str(i + 1) + "/24")) host.vxlan0.ip_add( ipaddress(vxlan_net_addr6 + "::" + str(i + 1) + "/64")) for host in [host1, host2]: host.eth0.up() host.vxlan0.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.vxlan_id = {}".format( dev.host.hostid, dev.name, dev.vxlan_id) for dev in [host1.vxlan0, host2.vxlan0] ]), "\n".join([ "Configured {}.{}.remote = {}".format(dev.host.hostid, dev.name, dev.remote) for dev in [host1.vxlan0, host2.vxlan0] ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in [host1.vxlan0, host2.vxlan0] ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [ PingEndpoints(self.matched.host1.vxlan0, self.matched.host2.vxlan0) ] def generate_perf_endpoints(self, config): return [(self.matched.host1.vxlan0, self.matched.host2.vxlan0)] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.vxlan0, self.matched.host2.vxlan0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class GeneveTunnelRecipe(PauseFramesHWConfigMixin, OffloadSubConfigMixin, BaseTunnelRecipe): """ This class implements a recipe that configures a simple Geneve tunnel between two hosts. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | | | ----' '--- | | ----' '--- | | gnv tunnel | | gnv tunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. The recipe provides additional parameter: :param carrier_ipversion: This parameter specifies whether IPv4 or IPv6 addresses are used for the underlying (carrier) network. The value is either **ipv4** or **ipv6** """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param(default=( dict(gro="on", gso="on", tso="on"), dict(gro="off", gso="on", tso="on"), dict(gro="on", gso="off", tso="off"), dict(gro="on", gso="on", tso="off"), )) carrier_ipversion = ChoiceParam(type=StrParam, choices=set(["ipv4", "ipv6"])) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1, host2 = self.matched.host1, self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): if self.params.carrier_ipversion == "ipv4": device.ip_add(ipaddress("192.168.101." + str(i + 1) + "/24")) else: device.ip_add(ipaddress("fc00::" + str(i + 1) + "/64")) device.up() configuration.test_wide_devices.append(device) self.wait_tentative_ips(configuration.test_wide_devices) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ The Geneve tunnel devices are configured with IPv4 and IPv6 addresses. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] a_ip4 = Ip4Address("20.0.0.10/8") b_ip4 = Ip4Address("20.0.0.20/8") a_ip6 = Ip6Address("fee0::10/64") b_ip6 = Ip6Address("fee0::20/64") m1.gnv_tunnel = GeneveDevice(remote=endpoint2_ip, id=1234) m2.gnv_tunnel = GeneveDevice(remote=endpoint1_ip, id=1234) # A m1.gnv_tunnel.mtu = 1400 m1.gnv_tunnel.up() m1.gnv_tunnel.ip_add(a_ip4) m1.gnv_tunnel.ip_add(a_ip6) # B m2.gnv_tunnel.mtu = 1400 m2.gnv_tunnel.up() m2.gnv_tunnel.ip_add(b_ip4) m2.gnv_tunnel.ip_add(b_ip6) configuration.tunnel_devices.extend([m1.gnv_tunnel, m2.gnv_tunnel]) self.wait_tentative_ips(configuration.tunnel_devices) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.gnv_tunnel, self.matched.host2.gnv_tunnel)] """ return [ PingEndpoints(self.matched.host1.gnv_tunnel, self.matched.host2.gnv_tunnel) ] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for ip6 protocol and grep patterns to match the ICMP or ICMP6 echo requests encapsulated by Geneve. """ if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} if self.params.carrier_ipversion == "ipv4": pa_kwargs["p_filter"] = "ip host {}".format(m1_carrier_ip) grep_pattern = "IP " else: pa_kwargs["p_filter"] = "ip6" grep_pattern = "IP6 " grep_pattern += "{}\.[0-9]+ > {}\.[0-9]+: Geneve.*vni 0x4d2: ".format( m1_carrier_ip, m2_carrier_ip) if isinstance(ip2, Ip4Address): grep_pattern += "IP {} > {}: ICMP".format(ip1, ip2) elif isinstance(ip2, Ip6Address): grep_pattern += "IP6 {} > {}: ICMP6".format(ip1, ip2) pa_kwargs["grep_for"] = [grep_pattern] if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def offload_nics(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class L2TPTunnelRecipe(PauseFramesHWConfigMixin, BaseTunnelRecipe): """ This class implements a recipe that configures a simple L2TP tunnel with one tunnel session between two hosts. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | | | ----' '--- | | ----' '--- | | L2TP tunnel | | L2TPtunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. The recipe provides additional parameter: :param carrier_ipversion: This parameter specifies whether IPv4 or IPv6 addresses are used for the underlying (carrier) network. The value is either **ipv4** or **ipv6** :param l2tp_encapsulation: (mandatory test parameter) the encapsulation mode for the L2TP tunnel, can be either **udp** or **ip** """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) carrier_ipversion = ChoiceParam(type=StrParam, choices=set(["ipv4", "ipv6"])) l2tp_encapsulation = ChoiceParam(type=StrParam, choices=set(["udp", "ip"]), mandatory=True) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1 = self.matched.host1 host2 = self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): if self.params.carrier_ipversion == "ipv4": device.ip_add("192.168.200." + str(i + 1) + "/24") else: device.ip_add("fc00::" + str(i + 1) + "/64") device.up() configuration.test_wide_devices.append(device) self.wait_tentative_ips(configuration.test_wide_devices) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ One L2TP tunnel is configured on both hosts using the :any:`L2TPManager`. Each host configures one L2TP session for the tunnel. IPv4 addresses are assigned to the l2tp session devices. """ endpoint1, endpoint2 = configuration.tunnel_endpoints host1 = endpoint1.netns host2 = endpoint2.netns if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] for host in [host1, host2]: host.run("modprobe l2tp_eth") host1.l2tp = host1.init_class(L2TPManager) host2.l2tp = host2.init_class(L2TPManager) host1.l2tp.create_tunnel( tunnel_id=1000, peer_tunnel_id=1000, encap=self.params.l2tp_encapsulation, local=str(endpoint1_ip), remote=str(endpoint2_ip), udp_sport=5000, udp_dport=5000, ) host2.l2tp.create_tunnel( tunnel_id=1000, peer_tunnel_id=1000, encap=self.params.l2tp_encapsulation, local=str(endpoint2_ip), remote=str(endpoint1_ip), udp_sport=5000, udp_dport=5000, ) host1.l2tp_session1 = L2TPSessionDevice( tunnel_id=1000, session_id=2000, peer_session_id=2000, ) host2.l2tp_session1 = L2TPSessionDevice( tunnel_id=1000, session_id=2000, peer_session_id=2000, ) for device in [host1.l2tp_session1, host2.l2tp_session1]: device.up() ip1 = "10.42.1.1/8" ip2 = "10.42.1.2/8" host1.l2tp_session1.ip_add(ip1, peer=ip2) host2.l2tp_session1.ip_add(ip2, peer=ip1) configuration.tunnel_devices.extend( [host1.l2tp_session1, host2.l2tp_session1]) def test_wide_deconfiguration(self, config): for host in [self.matched.host1, self.matched.host2]: host.l2tp.cleanup() super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.l2tp_session1, self.matched.host2.l2tp_session1)] """ return [ PingEndpoints(self.matched.host1.l2tp_session1, self.matched.host2.l2tp_session1) ] def get_packet_assert_config(self, ping_config): pa_kwargs = {} if self.params.carrier_ipversion == "ipv4": ip_filter = {"family": AF_INET} else: ip_filter = {"family": AF_INET6, "is_link_local": False} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] """ encap udp: IP 192.168.200.1.5000 > 192.168.200.2.5000: UDP encap ip: 192.168.200.1 > 192.168.200.2: ip-proto-115 106 """ if self.params.l2tp_encapsulation == "ip": pa_kwargs["p_filter"] = "{} proto 115".format( "ip" if self.params.carrier_ipversion == "ipv4" else "ip6", ) grep_pattern = "{} {} > {}:[ ]*ip-proto-115".format( "IP" if self.params.carrier_ipversion == "ipv4" else "IP6", m1_carrier_ip, m2_carrier_ip, ) elif self.params.l2tp_encapsulation == "udp": pa_kwargs["p_filter"] = "udp" grep_pattern = "{} {}.[0-9]+ > {}.[0-9]+:[ ]*UDP".format( "IP" if self.params.carrier_ipversion == "ipv4" else "IP6", m1_carrier_ip, m2_carrier_ip, ) pa_kwargs["grep_for"] = [grep_pattern] if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VlansOverBondRecipe(PerfReversibleFlowMixin, VlanPingEvaluatorMixin, CommonHWSubConfigMixin, OffloadSubConfigMixin, BaremetalEnrtRecipe): """ This recipe implements Enrt testing for a network scenario that looks as follows .. code-block:: none .--------. .-------------+ switch +--------. | .---+ | | | | '--------' | .-----|---------|----. | | .---'--. .---'--. | .--'---. .-|-| eth0 |--| eth1 |-|-. .-------| eth0 |------. | | '------' '------' | | | '------' | | | bond0 | | | / | \ | | '-------/--|--\------' | | vlan0 vlan1 vlan2 | | / | \ | | id=10 id=20 id=30 | | vlan0 vlan1 vlan2 | | | | id=10 id=20 id=30 | | | | | | | | host1 | | host2 | '------------------------' '---------------------' The recipe provides additional recipe parameters to configure the bonding device. :param bonding_mode: (mandatory test parameter) the bonding mode to be configured on the bond0 device :param miimon_value: (mandatory test parameter) the miimon interval to be configured on the bond0 device All sub configurations are included via Mixin classes. The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) vlan0_id = IntParam(default=10) vlan1_id = IntParam(default=20) vlan2_id = IntParam(default=30) offload_combinations = Param(default=( dict(gro="on", gso="on", tso="on", tx="on"), dict(gro="off", gso="on", tso="on", tx="on"), dict(gro="on", gso="off", tso="off", tx="on"), dict(gro="on", gso="on", tso="off", tx="off"))) bonding_mode = StrParam(mandatory=True) miimon_value = IntParam(mandatory=True) def test_wide_configuration(self): """ Test wide configuration for this recipe involves creating one bonding device on the first host. This device bonds two NICs matched by the recipe. The bonding mode and miimon interval is configured on the bonding device according to the recipe parameters. Then three VLAN (802.1Q) tunnels are created on top of the bonding device on the first host and on the matched NIC on the second host. The tunnels are configured with VLAN ids from vlan0_id, vlan1_id and vlan2_id params (by default: 10, 20, 30). An IPv4 and IPv6 address is configured on each tunnel endpoint. | host1.vlan0 = 192.168.10.1/24 and fc00:0:0:1::1/64 | host1.vlan1 = 192.168.20.1/24 and fc00:0:0:2::1/64 | host1.vlan2 = 192.168.30.1/24 and fc00:0:0:3::1/64 | host2.vlan0 = 192.168.10.2/24 and fc00:0:0:1::2/64 | host2.vlan1 = 192.168.20.2/24 and fc00:0:0:2::2/64 | host2.vlan2 = 192.168.30.2/24 and fc00:0:0:3::2/64 """ host1, host2 = self.matched.host1, self.matched.host2 host1.bond0 = BondDevice(mode=self.params.bonding_mode, miimon=self.params.miimon_value) for dev in [host1.eth0, host1.eth1]: dev.down() host1.bond0.slave_add(dev) host1.vlan0 = VlanDevice(realdev=host1.bond0, vlan_id=self.params.vlan0_id) host1.vlan1 = VlanDevice(realdev=host1.bond0, vlan_id=self.params.vlan1_id) host1.vlan2 = VlanDevice(realdev=host1.bond0, vlan_id=self.params.vlan2_id) host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan0_id) host2.vlan1 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan1_id) host2.vlan2 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan2_id) configuration = super().test_wide_configuration() configuration.test_wide_devices = [] for host in [host1, host2]: configuration.test_wide_devices.extend([host.vlan0, host.vlan1, host.vlan2]) configuration.test_wide_devices.append(host1.bond0) net_addr = "192.168" net_addr6 = "fc00:0:0" for i, host in enumerate([host1, host2]): host.vlan0.ip_add(ipaddress('{}.10.{}/24'.format(net_addr, i+1))) host.vlan1.ip_add(ipaddress('{}.20.{}/24'.format(net_addr, i+1))) host.vlan2.ip_add(ipaddress('{}.30.{}/24'.format(net_addr, i+1))) host.vlan0.ip_add(ipaddress('{}:1::{}/64'.format(net_addr6, i+1))) host.vlan1.ip_add(ipaddress('{}:2::{}/64'.format(net_addr6, i+1))) host.vlan2.ip_add(ipaddress('{}:3::{}/64'.format(net_addr6, i+1))) for dev in [host1.eth0, host1.eth1, host1.bond0, host1.vlan0, host1.vlan1, host1.vlan2, host2.eth0, host2.vlan0, host2.vlan1, host2.vlan2]: dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): """ Test wide description is extended with the configured VLAN tunnels, their IP addresses and the bonding device configuration. """ host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format( dev.host.hostid, dev.name, dev.ips ) for dev in config.test_wide_devices if isinstance(dev, Vlan) ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format( dev.host.hostid, dev.name, dev.vlan_id ) for dev in config.test_wide_devices if isinstance(dev, Vlan) ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name]) ) for dev in config.test_wide_devices if isinstance(dev, Vlan) ]), "Configured {}.{}.slaves = {}".format( host1.hostid, host1.bond0.name, ['.'.join([host1.hostid, slave.name]) for slave in host1.bond0.slaves] ), "Configured {}.{}.mode = {}".format( host1.hostid, host1.bond0.name, host1.bond0.mode ), "Configured {}.{}.miimon = {}".format( host1.hostid, host1.bond0.name, host1.bond0.miimon ) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are the matching VLAN tunnel endpoints of the hosts. Returned as:: [PingEndpoints(host1.vlan0, host2.vlan0), PingEndpoints(host1.vlan1, host2.vlan1), PingEndpoints(host1.vlan2, host2.vlan2)] """ host1, host2 = self.matched.host1, self.matched.host2 return [PingEndpoints(host1.vlan0, host2.vlan0), PingEndpoints(host1.vlan1, host2.vlan1), PingEndpoints(host1.vlan2, host2.vlan2)] def generate_perf_endpoints(self, config): """ The perf endpoints for this recipe are the VLAN tunnel endpoints with VLAN id from parameter vlan_ids[0] (by default: 10): host1.vlan0 and host2.vlan0 Returned as:: [(self.matched.host1.vlan0, self.matched.host2.vlan0)] """ return [(self.matched.host1.vlan0, self.matched.host2.vlan0)] @property def offload_nics(self): """ The `offload_nics` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0, host1.eth1 and host2.eth0 For detailed explanation of this property see :any:`OffloadSubConfigMixin` class and :any:`OffloadSubConfigMixin.offload_nics`. """ host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0] @property def mtu_hw_config_dev_list(self): """ The `mtu_hw_config_dev_list` property value for this scenario is a list of all configured VLAN tunnel devices and the underlying bonding or physical devices: | host1.bond0, host1.vlan0, host1.vlan1, host1.vlan2 | host2.eth0, host2.vlan0, host2.vlan1, host2.vlan2 For detailed explanation of this property see :any:`MTUHWConfigMixin` class and :any:`MTUHWConfigMixin.mtu_hw_config_dev_list`. """ host1, host2 = self.matched.host1, self.matched.host2 result = [] for host in [host1, host2]: for dev in [host.vlan0, host.vlan1, host.vlan2]: result.append(dev) result.extend([host1.bond0, host2.eth0]) return result @property def coalescing_hw_config_dev_list(self): """ The `coalescing_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0, host1.eth1 and host2.eth0 For detailed explanation of this property see :any:`CoalescingHWConfigMixin` class and :any:`CoalescingHWConfigMixin.coalescing_hw_config_dev_list`. """ host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): """ The `dev_interrupt_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0, host1.eth1 and host2.eth0 For detailed explanation of this property see :any:`DevInterruptHWConfigMixin` class and :any:`DevInterruptHWConfigMixin.dev_interrupt_hw_config_dev_list`. """ host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): """ The `parallel_stream_qdisc_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0, host1.eth1 and host2.eth0 For detailed explanation of this property see :any:`ParallelStreamQDiscHWConfigMixin` class and :any:`ParallelStreamQDiscHWConfigMixin.parallel_stream_qdisc_hw_config_dev_list`. """ host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0] @property def pause_frames_dev_list(self): """ The `pause_frames_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0, host1.eth1 and host2.eth0 For detailed explanation of this property see :any:`PauseFramesHWConfigMixin` and :any:`PauseFramesHWConfigMixin.pause_frames_dev_list`. """ host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0]
class IpsecEspAeadRecipe(CommonHWSubConfigMixin, BaseEnrtRecipe, PacketAssertTestAndEvaluate): """ This recipe implements Enrt testing for a simple IPsec scenario that looks as follows .. code-block:: none +--------+ +------+ switch +-----+ | +--------+ | +--+-+ +-+--+ +-|eth0|-+ +-|eth0|-+ | +----+ | | +----+ | | host1 | | host2 | +--------+ +--------+ The recipe provides additional recipe parameter to configure IPsec tunel. :param ipsec_mode: mode of the ipsec tunnel All sub configurations are included via Mixin classes. The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) algorithm = [('rfc4106(gcm(aes))', 160, 96)] spi_values = ["0x00001000", "0x00001001"] ipsec_mode = StrParam(default="transport") def test_wide_configuration(self): """ Test wide configuration for this recipe involves just adding an IPv4 and IPv6 address to the matched eth0 nics on both hosts and route between them. host1.eth0 = 192.168.101.1/24 and fc00::1/64 host2.eth0 = 192.168.101.2/24 and fc00::2/64 """ host1, host2 = self.matched.host1, self.matched.host2 configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.eth0, host2.eth0] net_addr = "192.168." net_addr6 = "fc00:" for i, host in enumerate([host1, host2]): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + str(i + 99) + ".1/24")) host.eth0.ip_add(ipaddress(net_addr6 + str(i + 1) + "::1/64")) host.eth0.up() self.wait_tentative_ips(configuration.test_wide_devices) if self.params.ping_parallel or self.params.ping_bidirect: logging.debug("Parallelism in pings is not supported for this " "recipe, ping_parallel/ping_bidirect will be ignored.") for host, dst in [(host1, host2), (host2, host1)]: for family in [AF_INET, AF_INET6]: host.run("ip route add %s dev %s" % (dst.eth0.ips_filter(family=family)[0], host.eth0.name)) configuration.endpoint1 = host1.eth0 configuration.endpoint2 = host2.eth0 return configuration def generate_test_wide_description(self, config): """ Test wide description is extended with the configured IP addresses, specified IPsec algorithm, key length and integrity check value length. """ desc = super().generate_test_wide_description(config) desc += [ "\n".join([ f"Configured {dev.host.hostid}.{dev.name}.ips = {dev.ips}" for dev in config.test_wide_devices ]).join([f"Configured IPsec {self.params.ipsec_mode} mode with {algo} algorithm " f"using key length of {key_len} and icv length of {icv_len}" for algo, key_len, icv_len in self.algorithm]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_sub_configurations(self, config): """ Test wide configuration is extended with subconfiguration containing IPsec tunnel with predefined parameters for both IP versions. """ ipsec_mode = self.params.ipsec_mode spi_values = self.spi_values for subconf in ConfMixin.generate_sub_configurations(self, config): for ipv in self.params.ip_versions: if ipv == "ipv4": family = AF_INET elif ipv == "ipv6": family = AF_INET6 ip1 = subconf.endpoint1.ips_filter(family=family)[0] ip2 = subconf.endpoint2.ips_filter(family=family)[0] for algo, key_len, icv_len in self.algorithm: g_key = generate_key(key_len) new_config = copy.copy(subconf) new_config.ips = (ip1, ip2) new_config.ipsec_settings = (algo, g_key, icv_len, ipsec_mode, spi_values) yield new_config def apply_sub_configuration(self, config): """ Subconfiguration containing IPsec tunnel is applied through XfrmTools class. """ super().apply_sub_configuration(config) ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips ipsec_sets = config.ipsec_settings configure_ipsec_esp_aead(ns1, ip1, ns2, ip2, *ipsec_sets) def remove_sub_configuration(self, config): ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns for ns in (ns1, ns2): ns.run("ip xfrm policy flush") ns.run("ip xfrm state flush") super().remove_sub_configuration(config) def generate_ping_configurations(self, config): """ The ping endpoints for this recipe are the configured endpoints of the IPsec tunnel on both hosts. """ ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips count = self.params.ping_count interval = self.params.ping_interval size = self.params.ping_psize common_args = {'count': count, 'interval': interval, 'size': size} ping_conf = PingConf(client=ns1, client_bind=ip1, destination=ns2, destination_address=ip2, **common_args) yield [ping_conf] def generate_flow_combinations(self, config): """ Flow combinations are generated based on the tunnel endpoints and test parameters. """ nic1, nic2 = config.endpoint1, config.endpoint2 ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips for perf_test in self.params.perf_tests: for size in self.params.perf_msg_sizes: flow = PerfFlow( type=perf_test, generator=ns1, generator_bind=ip1, generator_nic=nic1, receiver=ns2, receiver_bind=ip2, receiver_nic=nic2, msg_size=size, duration=self.params.perf_duration, parallel_streams=self.params.perf_parallel_streams, cpupin=self.params.perf_tool_cpu if ( "perf_tool_cpu" in self.params) else None ) yield [flow] if ("perf_reverse" in self.params and self.params.perf_reverse): reverse_flow = self._create_reverse_flow(flow) yield [reverse_flow] def ping_test(self, ping_configs): """ Ping test is utilizing PacketAssert class to search for the appropriate ESP IP packet. Result of ping test is handed to the super class' method. Returned as:: (ping_result, pa_config, pa_result) """ m1, m2 = ping_configs[0].client, ping_configs[0].destination ip1, ip2 = (ping_configs[0].client_bind, ping_configs[0].destination_address) if1_name = self.get_dev_by_ip(m1, ip1).name if2 = self.get_dev_by_ip(m2, ip2) pa_kwargs = {} pa_kwargs["p_filter"] = "esp" pa_kwargs["grep_for"] = ['ESP\(spi=' + self.spi_values[1]] if ping_configs[0].count: pa_kwargs["p_min"] = ping_configs[0].count pa_config = PacketAssertConf(m2, if2, **pa_kwargs) dump = m1.run("tcpdump -i %s -nn -vv" % if1_name, bg=True) self.packet_assert_test_start(pa_config) self.ctl.wait(2) ping_result = super().ping_test(ping_configs) self.ctl.wait(2) pa_result = self.packet_assert_test_stop() dump.kill(signal=signal.SIGINT) return (ping_result, pa_config, pa_result) def ping_report_and_evaluate(self, results): super().ping_report_and_evaluate(results[0]) self.packet_assert_evaluate_and_report(results[1], results[2]) def get_dev_by_ip(self, netns, ip): for dev in netns.device_database: if ip in dev.ips: return dev raise LnstError("Could not match ip %s to any device of %s." % (ip, netns.name)) @property def mtu_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class SitTunnelRecipe(MTUHWConfigMixin, PauseFramesHWConfigMixin, BaseTunnelRecipe): """ This class implements a recipe that configures a simple SIT tunnel between two hosts. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | | | ----' '--- | | ----' '--- | | sit tunnel | | sit tunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. The recipe provides additional parameter: :param tunnel_mode: this parameter specifies the mode of the SIT tunnel, can be any of the **any**, **ip6ip6**, **ipip** or **mplsip** """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) tunnel_mode = ChoiceParam(type=StrParam, choices=set(["any", "ip6ip", "ipip", "mplsip"]), mandatory=True) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1, host2 = self.matched.host1, self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): device.ip_add(ipaddress("192.168.101." + str(i + 1) + "/24")) device.up() configuration.test_wide_devices.append(device) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ The SIT tunnel devices are configured with IPv4 and IPv6 addresses of individual networks. Routes are configured accordingly. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns ip_filter = {"family": AF_INET} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] a_ip4 = Ip4Address("192.168.6.2/24") a_net4 = "192.168.6.0/24" b_ip4 = Ip4Address("192.168.7.2/24") b_net4 = "192.168.7.0/24" a_ip6 = Ip6Address("6001:db8:ac10:fe01::2/64") a_net6 = "6001:db8:ac10:fe01::0/64" b_ip6 = Ip6Address("7001:db8:ac10:fe01::2/64") b_net6 = "7001:db8:ac10:fe01::0/64" m1.sit_tunnel = SitDevice(local=endpoint1_ip, remote=endpoint2_ip, mode=self.params.tunnel_mode) m2.sit_tunnel = SitDevice(local=endpoint2_ip, remote=endpoint1_ip, mode=self.params.tunnel_mode) # A m1.sit_tunnel.up() m1.sit_tunnel.ip_add(a_ip4) m1.sit_tunnel.ip_add(a_ip6) m1.run("ip -4 route add {} dev {}".format(b_net4, m1.sit_tunnel.name)) m1.run("ip -6 route add {} dev {}".format(b_net6, m1.sit_tunnel.name)) # B m2.sit_tunnel.up() m2.sit_tunnel.ip_add(b_ip4) m2.sit_tunnel.ip_add(b_ip6) m2.run("ip -4 route add {} dev {}".format(a_net4, m2.sit_tunnel.name)) m2.run("ip -6 route add {} dev {}".format(a_net6, m2.sit_tunnel.name)) configuration.tunnel_devices.extend([m1.sit_tunnel, m2.sit_tunnel]) self.wait_tentative_ips(configuration.tunnel_devices) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.sit_tunnel, self.matched.host2.sit_tunnel)] """ return [ PingEndpoints(self.matched.host1.sit_tunnel, self.matched.host2.sit_tunnel) ] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for ip6 protocol and grep patterns to match the ICMP or ICMP6 echo requests encapsulated by SIT. """ ip_filter = {"family": AF_INET} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} pa_kwargs["p_filter"] = "ip host {}".format(m1_carrier_ip) # TODO: handle mplsip mode if isinstance(ip2, Ip4Address): grep_pattern = [ "IP {} > {}: IP {} > {}: ICMP".format(m1_carrier_ip, m2_carrier_ip, ip1, ip2) ] elif isinstance(ip2, Ip6Address): grep_pattern = [ "IP {} > {}: IP6 {} > {}: ICMP6".format( m1_carrier_ip, m2_carrier_ip, ip1, ip2) ] pa_kwargs["grep_for"] = grep_pattern if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.sit_tunnel, self.matched.host2.sit_tunnel]
class VirtualBridgeVlansOverBondRecipe(VlanPingEvaluatorMixin, CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host1.tap0 = DeviceReq(label="to_guest1") host1.tap1 = DeviceReq(label="to_guest2") host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2.tap0 = DeviceReq(label="to_guest3") host2.tap1 = DeviceReq(label="to_guest4") guest1 = HostReq() guest1.eth0 = DeviceReq(label="to_guest1") guest2 = HostReq() guest2.eth0 = DeviceReq(label="to_guest2") guest3 = HostReq() guest3.eth0 = DeviceReq(label="to_guest3") guest4 = HostReq() guest4.eth0 = DeviceReq(label="to_guest4") offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on"), dict(gro="off", gso="on", tso="on", tx="on"), dict(gro="on", gso="off", tso="off", tx="on"), dict(gro="on", gso="on", tso="off", tx="off"))) bonding_mode = StrParam(mandatory=True) miimon_value = IntParam(mandatory=True) def test_wide_configuration(self): host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) for host in [host1, host2]: for dev in [host.eth0, host.eth1, host.tap0, host.tap1]: dev.down() host.bond0 = BondDevice(mode=self.params.bonding_mode, miimon=self.params.miimon_value) host.bond0.slave_add(host.eth0) host.bond0.slave_add(host.eth1) host.br0 = BridgeDevice() host.br0.slave_add(host.tap0) host.br1 = BridgeDevice() host.br1.slave_add(host.tap1) for guest in (guest1, guest2, guest3, guest4): guest.eth0.down() host1.vlan0 = VlanDevice(realdev=host1.bond0, vlan_id=10, master=host1.br0) host1.vlan1 = VlanDevice(realdev=host1.bond0, vlan_id=20, master=host1.br1) host2.vlan0 = VlanDevice(realdev=host2.bond0, vlan_id=10, master=host2.br0) host2.vlan1 = VlanDevice(realdev=host2.bond0, vlan_id=20, master=host2.br1) configuration = super().test_wide_configuration() configuration.test_wide_devices = [ host1.br0, host2.br0, guest1.eth0, guest2.eth0, guest3.eth0, guest4.eth0 ] net_addr = "192.168" net_addr6 = "fc00:0:0" for host, (guest_a, guest_b), n in [(host1, (guest1, guest2), 1), (host2, (guest3, guest4), 3)]: host.br0.ip_add(ipaddress(net_addr + ".10." + str(n) + "/24")) host.br1.ip_add(ipaddress(net_addr + ".20." + str(n) + "/24")) guest_a.eth0.ip_add( ipaddress(net_addr + ".10." + str(n + 1) + "/24")) guest_a.eth0.ip_add( ipaddress(net_addr6 + ":1::" + str(n + 1) + "/64")) guest_b.eth0.ip_add( ipaddress(net_addr + ".20." + str(n + 1) + "/24")) guest_b.eth0.ip_add( ipaddress(net_addr6 + ":2::" + str(n + 1) + "/64")) for host in [host1, host2]: for dev in [ host.eth0, host.eth1, host.tap0, host.tap1, host.bond0, host.vlan0, host.vlan1, host.br0, host.br1 ]: dev.up() for guest in [guest1, guest2, guest3, guest4]: guest.eth0.up() if "perf_tool_cpu" in self.params: logging.info("'perf_tool_cpu' param (%d) to be set to None" % self.params.perf_tool_cpu) self.params.perf_tool_cpu = None self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.slaves = {}".format( dev.host.hostid, dev.name, [ '.'.join([dev.host.hostid, slave.name]) for slave in dev.slaves ]) for dev in [ host1.bond0, host2.bond0, host1.br0, host1.br1, host2.br0, host2.br1 ] ]), "\n".join([ "Configured {}.{}.mode = {}".format(dev.host.hostid, dev.name, dev.mode) for dev in [host1.bond0, host2.bond0] ]), "\n".join([ "Configured {}.{}.miimon = {}".format(dev.host.hostid, dev.name, dev.miimon) for dev in [host1.bond0, host2.bond0] ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format(dev.host.hostid, dev.name, dev.vlan_id) for dev in [host1.vlan0, host1.vlan1, host2.vlan0, host2.vlan1] ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in [host1.vlan0, host1.vlan1, host2.vlan0, host2.vlan1] ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): guest1, guest2, guest3, guest4 = (self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) dev_combinations = product([guest1.eth0, guest2.eth0], [guest3.eth0, guest4.eth0]) return [ PingEndpoints(comb[0], comb[1], reachable=((comb[0].host, comb[1].host) in [(guest1, guest3), (guest2, guest4)])) for comb in dev_combinations ] def generate_perf_endpoints(self, config): return [(self.matched.guest1.eth0, self.matched.guest3.eth0)] @property def offload_nics(self): host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) result = [] for machine in host1, host2, guest1, guest2, guest3, guest4: result.append(machine.eth0) result.extend([host1.eth1, host2.eth1]) return result @property def mtu_hw_config_dev_list(self): host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1, self.matched.host2, self.matched.guest1, self.matched.guest2, self.matched.guest3, self.matched.guest4) result = [] for host in [host1, host2]: for dev in [ host.bond0, host.tap0, host.tap1, host.br0, host.br1, host.vlan0, host.vlan1 ]: result.append(dev) for guest in [guest1, guest2, guest3, guest4]: result.append(guest.eth0) return result @property def dev_interrupt_hw_config_dev_list(self): return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0, self.matched.host2.eth1 ] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [ self.matched.host1.eth0, self.matched.host1.eth1, self.matched.host2.eth0, self.matched.host2.eth1 ]
class TeamVsBondRecipe(PerfReversibleFlowMixin, CommonHWSubConfigMixin, OffloadSubConfigMixin, BaremetalEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver")) host2.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver")) offload_combinations = Param(default=( dict(gro="on", gso="on", tso="on", tx="on"), dict(gro="off", gso="on", tso="on", tx="on"), dict(gro="on", gso="off", tso="off", tx="on"), dict(gro="on", gso="on", tso="off", tx="off"))) runner_name = StrParam(mandatory = True) bonding_mode = StrParam(mandatory = True) miimon_value = IntParam(mandatory = True) def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 host1.team0 = TeamDevice(config={'runner': {'name': self.params.runner_name}}) host2.bond0 = BondDevice(mode=self.params.bonding_mode, miimon=self.params.miimon_value) configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.team0, host2.bond0] net_addr_1 = "192.168.10" net_addr6_1 = "fc00:0:0:1" for i, (host, dev) in enumerate([(host1, host1.team0), (host2, host2.bond0)]): host.eth0.down() host.eth1.down() dev.slave_add(host.eth0) dev.slave_add(host.eth1) dev.ip_add(ipaddress(net_addr_1 + "." + str(i+1) + "/24")) dev.ip_add(ipaddress(net_addr6_1 + "::" + str(i+1) + "/64")) for host, dev in [(host1, host1.team0), (host2, host2.bond0)]: host.eth0.up() host.eth1.up() dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format( dev.host.hostid, dev.name, dev.ips ) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.slaves = {}".format( dev.host.hostid, dev.name, ['.'.join([dev.host.hostid, slave.name]) for slave in dev.slaves] ) for dev in config.test_wide_devices ]), "Configured {}.{}.runner_name = {}".format( host1.hostid, host1.team0.name, host1.team0.config ), "Configured {}.{}.mode = {}".format( host2.hostid, host2.bond0.name, host2.bond0.mode ), "Configured {}.{}.miimon = {}".format( host2.hostid, host2.bond0.name, host2.bond0.miimon ) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [ PingEndpoints(self.matched.host1.team0, self.matched.host2.bond0), PingEndpoints(self.matched.host2.bond0, self.matched.host1.team0) ] def generate_perf_endpoints(self, config): return [(self.matched.host1.team0, self.matched.host2.bond0)] @property def offload_nics(self): return [self.matched.host1.team0, self.matched.host2.bond0] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.team0, self.matched.host2.bond0] @property def coalescing_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1] @property def dev_interrupt_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1] @property def parallel_stream_qdisc_hw_config_dev_list(self): host1, host2 = self.matched.host1, self.matched.host2 return [host1.eth0, host1.eth1, host2.eth0, host2.eth1]
class IpsecEspAhCompRecipe(CommonHWSubConfigMixin, BaremetalEnrtRecipe, PacketAssertTestAndEvaluate): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) ciphers = Param(default=[('aes', 128), ('aes', 256)]) hashes = Param(default=[('hmac(md5)', 128), ('sha256', 256)]) ipsec_mode = StrParam(default="transport") spi_values = ["0x00000001", "0x00000002", "0x00000003", "0x00000004"] def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.eth0, host2.eth0] net_addr = "192.168." net_addr6 = "fc00:" for i, host in enumerate([host1, host2]): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + str(i + 99) + ".1/24")) host.eth0.ip_add(ipaddress(net_addr6 + str(i + 1) + "::1/64")) host.eth0.up() self.wait_tentative_ips(configuration.test_wide_devices) if self.params.ping_parallel or self.params.ping_bidirect: logging.debug( "Parallelism in pings is not supported for this" "recipe, ping_parallel/ping_bidirect will be ignored.") for host, dst in [(host1, host2), (host2, host1)]: for family in [AF_INET, AF_INET6]: host.run( "ip route add %s dev %s" % (dst.eth0.ips_filter(family=family)[0], host.eth0.name)) configuration.endpoint1 = host1.eth0 configuration.endpoint2 = host2.eth0 return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_sub_configurations(self, config): ipsec_mode = self.params.ipsec_mode spi_values = self.spi_values for subconf in ConfMixin.generate_sub_configurations(self, config): for ipv in self.params.ip_versions: if ipv == "ipv4": family = AF_INET elif ipv == "ipv6": family = AF_INET6 ip1 = config.endpoint1.ips_filter(family=family)[0] ip2 = config.endpoint2.ips_filter(family=family)[0] for ciph_alg, ciph_len in self.params.ciphers: for hash_alg, hash_len in self.params.hashes: ciph_key = generate_key(ciph_len) hash_key = generate_key(hash_len) new_config = copy.copy(subconf) new_config.ips = (ip1, ip2) new_config.ipsec_settings = (ciph_alg, ciph_key, hash_alg, hash_key, ipsec_mode, spi_values) yield new_config def apply_sub_configuration(self, config): super().apply_sub_configuration(config) ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips ipsec_sets = config.ipsec_settings configure_ipsec_esp_ah_comp(ns1, ip1, ns2, ip2, *ipsec_sets) def remove_sub_configuration(self, config): ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns for ns in (ns1, ns2): ns.run("ip xfrm policy flush") ns.run("ip xfrm state flush") super().remove_sub_configuration(config) def generate_ping_configurations(self, config): ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips count = self.params.ping_count interval = self.params.ping_interval size = self.params.ping_psize common_args = {'count': count, 'interval': interval, 'size': size} ping_conf = PingConf(client=ns1, client_bind=ip1, destination=ns2, destination_address=ip2, **common_args) yield [ping_conf] def generate_flow_combinations(self, config): ns1, ns2 = config.endpoint1.netns, config.endpoint2.netns ip1, ip2 = config.ips for perf_test in self.params.perf_tests: for size in self.params.perf_msg_sizes: flow = PerfFlow( type=perf_test, generator=ns1, generator_bind=ip1, generator_nic=config.endpoint1, receiver=ns2, receiver_bind=ip2, receiver_nic=config.endpoint2, msg_size=size, duration=self.params.perf_duration, parallel_streams=self.params.perf_parallel_streams, cpupin=self.params.perf_tool_cpu if ("perf_tool_cpu" in self.params) else None) yield [flow] if ("perf_reverse" in self.params and self.params.perf_reverse): reverse_flow = self._create_reverse_flow(flow) yield [reverse_flow] def ping_test(self, ping_configs): m1, m2 = ping_configs[0].client, ping_configs[0].destination ip1, ip2 = (ping_configs[0].client_bind, ping_configs[0].destination_address) if1_name = self.get_dev_by_ip(m1, ip1).name if2 = self.get_dev_by_ip(m2, ip2) pa_kwargs = {} pa_kwargs["p_filter"] = "ah" pa_kwargs["grep_for"] = [ "AH\(spi=" + self.spi_values[2], "ESP\(spi=" + self.spi_values[1] ] if ping_configs[0].count: pa_kwargs["p_min"] = 2 * ping_configs[0].count pa_config = PacketAssertConf(m2, if2, **pa_kwargs) dump = m1.run("tcpdump -i %s -nn -vv" % if1_name, bg=True) self.packet_assert_test_start(pa_config) self.ctl.wait(2) ping_result = super().ping_test(ping_configs) self.ctl.wait(2) pa_result = self.packet_assert_test_stop() dump.kill(signal=signal.SIGINT) m1.run("ip -s xfrm pol") m1.run("ip -s xfrm state") dump2 = m1.run("tcpdump -i %s -nn -vv" % if1_name, bg=True) no_trans = self.params.ipsec_mode != 'transport' ping_configs2 = copy.copy(ping_configs) ping_configs2[0].size = 1500 if no_trans: pa_kwargs2 = copy.copy(pa_kwargs) pa_kwargs2["p_filter"] = '' pa_kwargs2["grep_for"] = ["IPComp"] if ping_configs2[0].count: pa_kwargs2["p_min"] = ping_configs2[0].count pa_config2 = PacketAssertConf(m2, if2, **pa_kwargs2) self.packet_assert_test_start(pa_config2) self.ctl.wait(2) ping_result2 = super().ping_test(ping_configs2) self.ctl.wait(2) if no_trans: pa_result2 = self.packet_assert_test_stop() dump2.kill(signal=signal.SIGINT) result = ((ping_result, pa_config, pa_result), ) if no_trans: result += ((ping_result2, pa_config2, pa_result2), ) return result def ping_report_and_evaluate(self, results): for res in results: super().ping_report_and_evaluate(res[0]) self.packet_assert_evaluate_and_report(res[1], res[2]) def get_dev_by_ip(self, netns, ip): for dev in netns.device_database: if ip in dev.ips: return dev raise LnstError("Could not match ip %s to any device of %s." % (ip, netns.name)) @property def mtu_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VlansRecipe(VlanPingEvaluatorMixin, CommonHWSubConfigMixin, OffloadSubConfigMixin, BaremetalEnrtRecipe): """ This recipe implements Enrt testing for a network scenario that looks as follows .. code-block:: none .--------. .--------+ switch +-------. | '--------' | .---'--. .--'---. .-------| eth0 |------. .-------| eth0 |------. | '------' | | '------' | | / | \ | | / | \ | | vlan0 vlan1 vlan2 | | vlan0 vlan1 vlan2 | | id=10 id=20 id=30 | | id=10 id=20 id=30 | | | | | | host1 | | host2 | '---------------------' '---------------------' All sub configurations are included via Mixin classes. The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) vlan0_id = IntParam(default=10) vlan1_id = IntParam(default=20) vlan2_id = IntParam(default=30) offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on", rx="on"), dict(gro="off", gso="on", tso="on", tx="on", rx="on"), dict(gro="on", gso="off", tso="off", tx="on", rx="on"), dict(gro="on", gso="on", tso="off", tx="off", rx="on"), dict(gro="on", gso="on", tso="on", tx="on", rx="off"))) def test_wide_configuration(self): """ Test wide configuration for this recipe involves creating three VLAN (802.1Q) tunnels on top of the matched host's NIC with vlan ids from parameters vlan0_id, vlan1_id and vlan2_id (by default: 10, 20, 30). The same tunnels are configured on the second host. An IPv4 and IPv6 address is configured on each tunnel endpoint. | host1.vlan0 = 192.168.10.1/24 and fc00:0:0:1::1/64 | host1.vlan1 = 192.168.20.1/24 and fc00:0:0:2::1/64 | host1.vlan2 = 192.168.30.1/24 and fc00:0:0:3::1/64 | host2.vlan0 = 192.168.10.2/24 and fc00:0:0:1::2/64 | host2.vlan1 = 192.168.20.2/24 and fc00:0:0:2::2/64 | host2.vlan2 = 192.168.30.2/24 and fc00:0:0:3::2/64 """ host1, host2 = self.matched.host1, self.matched.host2 host1.eth0.down() host2.eth0.down() host1.vlan0 = VlanDevice(realdev=host1.eth0, vlan_id=self.params.vlan0_id) host1.vlan1 = VlanDevice(realdev=host1.eth0, vlan_id=self.params.vlan1_id) host1.vlan2 = VlanDevice(realdev=host1.eth0, vlan_id=self.params.vlan2_id) host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan0_id) host2.vlan1 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan1_id) host2.vlan2 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan2_id) configuration = super().test_wide_configuration() configuration.test_wide_devices = [] for host in [host1, host2]: configuration.test_wide_devices.extend( [host.vlan0, host.vlan1, host.vlan2]) net_addr = "192.168" net_addr6 = "fc00:0:0" for i, host in enumerate([host1, host2]): host.vlan0.ip_add(ipaddress('{}.10.{}/24'.format(net_addr, i + 1))) host.vlan1.ip_add(ipaddress('{}.20.{}/24'.format(net_addr, i + 1))) host.vlan2.ip_add(ipaddress('{}.30.{}/24'.format(net_addr, i + 1))) host.vlan0.ip_add(ipaddress('{}:1::{}/64'.format(net_addr6, i + 1))) host.vlan1.ip_add(ipaddress('{}:2::{}/64'.format(net_addr6, i + 1))) host.vlan2.ip_add(ipaddress('{}:3::{}/64'.format(net_addr6, i + 1))) for dev in [host.eth0, host.vlan0, host.vlan1, host.vlan2]: dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): """ Test wide description is extended with the configured VLAN tunnels and their IP addresses """ host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format(dev.host.hostid, dev.name, dev.vlan_id) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): "" # overriding the parent docstring del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are the matching VLAN tunnel endpoints of the hosts. Returned as:: [PingEndpoints(host1.vlan0, host2.vlan0), PingEndpoints(host1.vlan1, host2.vlan1), PingEndpoints(host1.vlan2, host2.vlan2)] """ host1, host2 = self.matched.host1, self.matched.host2 return [ PingEndpoints(host1.vlan0, host2.vlan0), PingEndpoints(host1.vlan1, host2.vlan1), PingEndpoints(host1.vlan2, host2.vlan2) ] def generate_perf_endpoints(self, config): """ The perf endpoints for this recipe are the VLAN tunnel endpoints with VLAN id from parameter vlan0_id (by default: 10): host1.vlan0 and host2.vlan0 Returned as:: [(self.matched.host1.vlan0, self.matched.host2.vlan0)] """ return [(self.matched.host1.vlan0, self.matched.host2.vlan0)] @property def offload_nics(self): """ The `offload_nics` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0 and host2.eth0 For detailed explanation of this property see :any:`OffloadSubConfigMixin` class and :any:`OffloadSubConfigMixin.offload_nics`. """ return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): """ The `mtu_hw_config_dev_list` property value for this scenario is a list of all configured VLAN tunnel devices and the underlying physical devices: | host1.eth0, host1.vlan0, host1.vlan1, host1.vlan2 | host2.eth0, host2.vlan0, host2.vlan1, host2.vlan2 For detailed explanation of this property see :any:`MTUHWConfigMixin` class and :any:`MTUHWConfigMixin.mtu_hw_config_dev_list`. """ result = [] for host in [self.matched.host1, self.matched.host2]: for dev in [host.eth0, host.vlan0, host.vlan1, host.vlan2]: result.append(dev) return result @property def coalescing_hw_config_dev_list(self): """ The `coalescing_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0 and host2.eth0 For detailed explanation of this property see :any:`CoalescingHWConfigMixin` class and :any:`CoalescingHWConfigMixin.coalescing_hw_config_dev_list`. """ return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): """ The `dev_interrupt_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0 and host2.eth0 For detailed explanation of this property see :any:`DevInterruptHWConfigMixin` class and :any:`DevInterruptHWConfigMixin.dev_interrupt_hw_config_dev_list`. """ return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): """ The `parallel_stream_qdisc_hw_config_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0 and host2.eth0 For detailed explanation of this property see :any:`ParallelStreamQDiscHWConfigMixin` class and :any:`ParallelStreamQDiscHWConfigMixin.parallel_stream_qdisc_hw_config_dev_list`. """ return [self.matched.host1.eth0, self.matched.host2.eth0] @property def pause_frames_dev_list(self): """ The `pause_frames_dev_list` property value for this scenario is a list of the physical devices carrying data of the configured VLAN tunnels: host1.eth0 and host2.eth0 For detailed explanation of this property see :any:`PauseFramesHWConfigMixin` and :any:`PauseFramesHWConfigMixin.pause_frames_dev_list`. """ return [self.matched.host1.eth0, self.matched.host2.eth0]
class GreTunnelOverVlanRecipe(MTUHWConfigMixin, PauseFramesHWConfigMixin, OffloadSubConfigMixin, BaseTunnelRecipe): """ This class implements a recipe that configures a GRE tunnel between two hosts that are connected through a vlan device. .. code-block:: none .--------. .------| switch |-----. | '--------' | | | .-------|------. .-------|------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | | | | | | | vlan0(id=10) | | vlan0(id=10) | | | | | | | | | | | | | | | | | | ----' '--- | | ----' '--- | | gre tunnel | | gre tunnel | | ---------- | | ---------- | | | | | | host1 | | host2 | '--------------' '--------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param(default=( dict(gro="on", gso="on", tso="on"), dict(gro="off", gso="on", tso="on"), dict(gro="on", gso="off", tso="off"), dict(gro="on", gso="on", tso="off"), )) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of two Ethernet devices on the matched hosts. A VLAN is configured on top of each device. """ host1, host2 = self.matched.host1, self.matched.host2 host1.vlan10 = VlanDevice(realdev=host1.eth0, vlan_id=10) host2.vlan10 = VlanDevice(realdev=host2.eth0, vlan_id=10) for i, device in enumerate([host1.vlan10, host2.vlan10]): device.ip_add(ipaddress("192.168.101." + str(i + 1) + "/24")) configuration.test_wide_devices.append(device) for dev in [ host1.eth0, host1.vlan10, host2.eth0, host2.vlan10, ]: dev.up() configuration.tunnel_endpoints = (host1.vlan10, host2.vlan10) def create_tunnel(self, configuration): """ The GRE tunnel devices are configured with local and remote ip addresses matching the VLAN device IP addresses. The GRE tunnel devices are configured with IPv4 and IPv6 addresses of individual networks. Routes are configured accordingly. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns ip_filter = {"family": AF_INET} endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0] endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0] a_ip4 = Ip4Address("192.168.6.2/24") a_net4 = "192.168.6.0/24" b_ip4 = Ip4Address("192.168.7.2/24") b_net4 = "192.168.7.0/24" a_ip6 = Ip6Address("6001:db8:ac10:fe01::2/64") a_net6 = "6001:db8:ac10:fe01::0/64" b_ip6 = Ip6Address("7001:db8:ac10:fe01::2/64") b_net6 = "7001:db8:ac10:fe01::0/64" m1.gre_tunnel = GreDevice(local=endpoint1_ip, remote=endpoint2_ip) m2.gre_tunnel = GreDevice(local=endpoint2_ip, remote=endpoint1_ip) # A m1.gre_tunnel.up() m1.gre_tunnel.ip_add(a_ip4) m1.gre_tunnel.ip_add(a_ip6) m1.run("ip -4 route add {} dev {}".format(b_net4, m1.gre_tunnel.name)) m1.run("ip -6 route add {} dev {}".format(b_net6, m1.gre_tunnel.name)) # B m2.gre_tunnel.up() m2.gre_tunnel.ip_add(b_ip4) m2.gre_tunnel.ip_add(b_ip6) m2.run("ip -4 route add {} dev {}".format(a_net4, m2.gre_tunnel.name)) m2.run("ip -6 route add {} dev {}".format(a_net6, m2.gre_tunnel.name)) configuration.tunnel_devices.extend([m1.gre_tunnel, m2.gre_tunnel]) self.wait_tentative_ips(configuration.tunnel_devices) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel)] """ return [ PingEndpoints(self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel) ] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for gre protocol and grep patterns to match the ICMP or ICMP6 echo requests. """ ip_filter = {"family": AF_INET} m1_carrier = self.matched.host1.vlan10 m2_carrier = self.matched.host2.vlan10 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} pa_kwargs["p_filter"] = "proto gre" if isinstance(ip2, Ip4Address): pat1 = "{} > {}: GREv0, .* IP {} > {}: ICMP echo request".format( m1_carrier_ip, m2_carrier_ip, ip1, ip2) pat2 = "{} > {}: GREv0 \| {} > {}: ICMP echo request".format( m1_carrier_ip, m2_carrier_ip, ip1, ip2) grep_pattern = ["({})|({})".format(pat1, pat2)] elif isinstance(ip2, Ip6Address): pat1 = "{} > {}: GREv0, .* IP6 {} > {}: ICMP6, echo request".format( m1_carrier_ip, m2_carrier_ip, ip1, ip2) pat2 = "{} > {}: GREv0 \| {} > {}: ICMP6, echo request".format( m1_carrier_ip, m2_carrier_ip, ip1, ip2) grep_pattern = ["({})|({})".format(pat1, pat2)] else: raise Exception( "The destination address is nor IPv4 or IPv6 address") pa_kwargs["grep_for"] = grep_pattern if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def offload_nics(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel]
class VirtualBridgeVlanInGuestRecipe(CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host1.tap0 = DeviceReq(label="to_guest") host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) guest1 = HostReq() guest1.eth0 = DeviceReq(label="to_guest") offload_combinations = Param( default=(dict(gro="on", gso="on", tso="on", tx="on", rx="on"), dict(gro="off", gso="on", tso="on", tx="on", rx="on"), dict(gro="on", gso="off", tso="off", tx="on", rx="on"), dict(gro="on", gso="on", tso="off", tx="off", rx="on"), dict(gro="on", gso="on", tso="on", tx="on", rx="off"))) def test_wide_configuration(self): host1, host2, guest1 = (self.matched.host1, self.matched.host2, self.matched.guest1) host1.br0 = BridgeDevice() for dev in [host1.eth0, host1.tap0]: dev.down() host1.br0.slave_add(dev) host2.eth0.down() guest1.eth0.down() host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=10) guest1.vlan0 = VlanDevice(realdev=guest1.eth0, vlan_id=10) configuration = super().test_wide_configuration() configuration.test_wide_devices = [ guest1.vlan0, host1.br0, host2.vlan0 ] net_addr_1 = "192.168.10" net_addr6_1 = "fc00:0:0:1" host1.br0.ip_add(ipaddress(net_addr_1 + ".1/24")) for i, machine in enumerate([host2, guest1]): machine.vlan0.ip_add( ipaddress(net_addr_1 + "." + str(i + 2) + "/24")) machine.vlan0.ip_add( ipaddress(net_addr6_1 + "::" + str(i + 2) + "/64")) for dev in [ host1.eth0, host1.tap0, host1.br0, host2.eth0, host2.vlan0, guest1.eth0, guest1.vlan0 ]: dev.up() self.wait_tentative_ips(configuration.test_wide_devices) return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]), "\n".join([ "Configured {}.{}.vlan_id = {}".format(dev.host.hostid, dev.name, dev.vlan_id) for dev in config.test_wide_devices if isinstance(dev, Vlan) ]), "\n".join([ "Configured {}.{}.realdev = {}".format( dev.host.hostid, dev.name, '.'.join([dev.host.hostid, dev.realdev.name])) for dev in config.test_wide_devices if isinstance(dev, Vlan) ]), "Configured {}.{}.slaves = {}".format( host1.hostid, host1.br0.name, [ '.'.join([host1.hostid, slave.name]) for slave in host1.br0.slaves ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_ping_endpoints(self, config): return [(self.matched.guest1.vlan0, self.matched.host2.vlan0)] def generate_perf_endpoints(self, config): return [(self.matched.guest1.vlan0, self.matched.host2.vlan0)] def wait_tentative_ips(self, devices): def condition(): return all( [not ip.is_tentative for dev in devices for ip in dev.ips]) self.ctl.wait_for_condition(condition, timeout=5) @property def offload_nics(self): return [ self.matched.host1.eth0, self.matched.host2.eth0, self.matched.guest1.eth0 ] @property def mtu_hw_config_dev_list(self): host1, host2, guest1 = (self.matched.host1, self.matched.host2, self.matched.guest1) return [ host1.eth0, host1.tap0, host1.br0, guest1.eth0, host2.eth0, host2.vlan0, guest1.vlan0 ] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def parallel_stream_qdisc_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class SimpleMacsecRecipe(CommonHWSubConfigMixin, BaremetalEnrtRecipe): host1 = HostReq() host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver")) macsec_encryption = Param(default=['on', 'off']) ids = ['00', '01'] keys = [ "7a16780284000775d4f0a3c0f0e092c0", "3212ef5c4cc5d0e4210b17208e88779e" ] def test_wide_configuration(self): host1, host2 = self.matched.host1, self.matched.host2 configuration = super().test_wide_configuration() configuration.test_wide_devices = [host1.eth0, host2.eth0] net_addr = "192.168.0" for i, host in enumerate([host1, host2]): host.eth0.down() host.eth0.ip_add(ipaddress(net_addr + '.' + str(i + 1) + "/24")) self.wait_tentative_ips(configuration.test_wide_devices) configuration.endpoint1 = host1.eth0 configuration.endpoint2 = host2.eth0 configuration.host1 = host1 configuration.host2 = host2 return configuration def generate_test_wide_description(self, config): host1, host2 = self.matched.host1, self.matched.host2 desc = super().generate_test_wide_description(config) desc += [ "\n".join([ "Configured {}.{}.ips = {}".format(dev.host.hostid, dev.name, dev.ips) for dev in config.test_wide_devices ]) ] return desc def test_wide_deconfiguration(self, config): del config.test_wide_devices super().test_wide_deconfiguration(config) def generate_sub_configurations(self, config): for subconf in ConfMixin.generate_sub_configurations(self, config): for encryption in self.params.macsec_encryption: new_config = copy.copy(subconf) new_config.encrypt = encryption new_config.ip_vers = self.params.ip_versions yield new_config def apply_sub_configuration(self, config): super().apply_sub_configuration(config) net_addr = "192.168.100" net_addr6 = "fc00:0:0:0" host1, host2 = config.host1, config.host2 k_ids = list(zip(self.ids, self.keys)) hosts_and_keys = [(host1, host2, k_ids), (host2, host1, k_ids[::-1])] for host_a, host_b, k_ids in hosts_and_keys: host_a.msec0 = MacsecDevice(realdev=host_a.eth0, encrypt=config.encrypt) rx_kwargs = dict(port=1, address=host_b.eth0.hwaddr) tx_sa_kwargs = dict(sa=0, pn=1, enable='on', id=k_ids[0][0], key=k_ids[0][1]) rx_sa_kwargs = rx_kwargs.copy() rx_sa_kwargs.update(tx_sa_kwargs) rx_sa_kwargs['id'] = k_ids[1][0] rx_sa_kwargs['key'] = k_ids[1][1] host_a.msec0.rx('add', **rx_kwargs) host_a.msec0.tx_sa('add', **tx_sa_kwargs) host_a.msec0.rx_sa('add', **rx_sa_kwargs) for i, host in enumerate([host1, host2]): host.msec0.ip_add(ipaddress(net_addr + "." + str(i + 1) + "/24")) host.msec0.ip_add(ipaddress(net_addr6 + "::" + str(i + 1) + "/64")) host.eth0.up() host.msec0.up() self.wait_tentative_ips([host.eth0, host.msec0]) def remove_sub_configuration(self, config): host1, host2 = config.host1, config.host2 for host in (host1, host2): host.msec0.destroy() del host.msec0 config.endpoint1.down() config.endpoint2.down() super().remove_sub_configuration(config) def generate_ping_configurations(self, config): client_nic = config.host1.msec0 server_nic = config.host2.msec0 ip_vers = self.params.ip_versions count = self.params.ping_count interval = self.params.ping_interval size = self.params.ping_psize common_args = {'count': count, 'interval': interval, 'size': size} for ipv in ip_vers: kwargs = {} if ipv == "ipv4": kwargs.update(family=AF_INET) elif ipv == "ipv6": kwargs.update(family=AF_INET6) kwargs.update(is_link_local=False) client_ips = client_nic.ips_filter(**kwargs) server_ips = server_nic.ips_filter(**kwargs) if ipv == "ipv6": client_ips = client_ips[::-1] server_ips = server_ips[::-1] if len(client_ips) != len(server_ips) or (len(client_ips) * len(server_ips) == 0): raise LnstError("Source/destination ip lists are of " "different size or empty.") for src_addr, dst_addr in zip(client_ips, server_ips): pconf = PingConf(client=client_nic.netns, client_bind=src_addr, destination=server_nic.netns, destination_address=dst_addr, **common_args) yield [pconf] def generate_perf_endpoints(self, config): return [(self.matched.host1.msec0, self.matched.host2.msec0)] @property def mtu_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def dev_interrupt_hw_config_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]
class VxlanOvsTunnelRecipe( PauseFramesHWConfigMixin, OffloadSubConfigMixin, BaseTunnelRecipe ): """ This class implements a recipe that configures a simple Vxlan tunnel using OpenVSwitch between two hosts. .. code-block:: none .--------. .----------| switch |-------. | '--------' | | | .-------|----------. .-------|----------. | .--'-. | | .--'-. | | |eth0| | | |eth0| | | '----' | | '----' | | .----| |-------. | | .----| |-------. | | | | | OvS | | | | | | OvS | | | | | | | | | | | | | | | | ---' '------ | | | | ---' '------ | | | | vxlan tunnel | | | | vxlan tunnel | | | | ------------ | | | | ------------ | | | '--------------' | | '--------------' | | | | | | host1 | | host2 | '------------------' '------------------' The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class. The test wide configuration is implemented in the :any:`BaseTunnelRecipe` class. """ host1 = HostReq() host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) host2 = HostReq() host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) offload_combinations = Param( default=( dict(gro="on", gso="on", tso="on"), dict(gro="off", gso="on", tso="on"), dict(gro="on", gso="off", tso="off"), dict(gro="on", gso="on", tso="off"), ) ) def configure_underlying_network(self, configuration): """ The underlying network for the tunnel consists of the Ethernet devices on the matched hosts. """ host1, host2 = self.matched.host1, self.matched.host2 for i, device in enumerate([host1.eth0, host2.eth0]): device.ip_add(ipaddress("192.168.101." + str(i + 1) + "/24")) device.up() configuration.test_wide_devices.append(device) self.wait_tentative_ips(configuration.test_wide_devices) configuration.tunnel_endpoints = (host1.eth0, host2.eth0) def create_tunnel(self, configuration): """ OvS bridges are created on each of the matched hosts with two ports. One port as an integration port and another port of type VXLAN acting as a tunnel interface connecting tunneled networks. Integration ports are configured with IPv4 and IPv6 addresses of the tunneled networks. """ endpoint1, endpoint2 = configuration.tunnel_endpoints m1 = endpoint1.netns m2 = endpoint2.netns ip_filter = {"family": AF_INET} for i, (host, endpoint) in enumerate([(m1, endpoint2), (m2, endpoint1)]): remote_ip = endpoint.ips_filter(**ip_filter)[0] host.br0 = OvsBridgeDevice() host.int0 = host.br0.port_add( interface_options={"type": "internal", "ofport_request": 5} ) configuration.tunnel_devices.append(host.int0) host.int0.ip_add(ipaddress("192.168.200." + str(i + 1) + "/24")) host.int0.ip_add(ipaddress("fc00::" + str(i + 1) + "/64")) host.br0.tunnel_add( "vxlan", { "options:remote_ip": remote_ip, "options:key": "flow", "ofport_request": 10, }, ) host.br0.flows_add( [ "table=0,in_port=5,actions=set_field:1234->tun_id,output:10", "table=0,in_port=10,tun_id=1234,actions=output:5", "table=0,priority=100,actions=drop", ] ) host.br0.up() host.int0.up() self.wait_tentative_ips(configuration.tunnel_devices) def generate_ping_endpoints(self, config): """ The ping endpoints for this recipe are simply the tunnel endpoints Returned as:: [PingEndpoints(self.matched.host1.int0, self.matched.host2.int0)] """ return [PingEndpoints(self.matched.host1.int0, self.matched.host2.int0)] def get_packet_assert_config(self, ping_config): """ The packet assert test configuration contains filter for source and destination addresses matching the carrier network with udp header bits specific to VXLAN tunneling. The grep patterns match the ICMP or ICMP6 echo requests encapsulated by Vxlan. """ ip_filter = {"family": AF_INET} m1_carrier = self.matched.host1.eth0 m2_carrier = self.matched.host2.eth0 m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0] m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0] ip1 = ping_config.client_bind ip2 = ping_config.destination_address pa_kwargs = {} pa_kwargs["p_filter"] = ( "src {} and dst {} " "and udp[8:2] = 0x0800 & 0x0800 " "and udp[11:4] = 1234 & 0x00FFFFFF".format(m2_carrier_ip, m1_carrier_ip) ) if isinstance(ip2, Ip4Address): grep_pattern = "IP {} > {}: ICMP echo reply".format(ip2, ip1) elif isinstance(ip2, Ip6Address): grep_pattern = "IP6 {} > {}: ICMP6, echo reply".format(ip2, ip1) else: raise Exception("The destination address is nor IPv4 or IPv6 address") pa_kwargs["grep_for"] = [grep_pattern] if ping_config.count: pa_kwargs["p_min"] = ping_config.count m2 = ping_config.destination pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs) return pa_config @property def offload_nics(self): return [self.matched.host1.eth0, self.matched.host2.eth0] @property def pause_frames_dev_list(self): return [self.matched.host1.eth0, self.matched.host2.eth0]