def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True): prefix = ipaddress.ip_network(route["prefix"]) dst_host = str(random.choice(list(prefix.hosts()))) pkt, exp_pkt = build_packet_to_server(duthost, ptfadapter, dst_host) ptf_t1_intf = random.choice(get_t1_ptf_ports(duthost, tbinfo)) ptf_t1_intf_index = int(ptf_t1_intf.strip("eth")) is_tunnel_traffic_existed = is_route_existed and not is_duthost_active is_server_traffic_existed = is_route_existed and is_duthost_active tunnel_monitor = tunnel_traffic_monitor( duthost, existing=is_tunnel_traffic_existed) server_traffic_monitor = ServerTrafficMonitor( duthost, ptfhost, vmhost, tbinfo, connection["test_intf"], conn_graph_facts, exp_pkt, existing=is_server_traffic_existed) with tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10)
def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True): prefix = ipaddress.ip_network(route["prefix"]) dst_host = str(next(prefix.hosts())) pkt, exp_pkt = build_packet_to_server(duthost, ptfadapter, dst_host) ptf_t1_intf = random.choice(get_t1_ptf_ports(duthost, tbinfo)) ptf_t1_intf_index = int(ptf_t1_intf.strip("eth")) is_tunnel_traffic_existed = is_route_existed and not is_duthost_active is_server_traffic_existed = is_route_existed and is_duthost_active if isinstance(prefix, ipaddress.IPv4Network): tunnel_innner_pkt = pkt[scapyall.IP].copy() tunnel_innner_pkt[scapyall.IP].ttl -= 1 else: tunnel_innner_pkt = pkt[scapyall.IPv6].copy() tunnel_innner_pkt[scapyall.IPv6].hlim -= 1 tunnel_monitor = tunnel_traffic_monitor( duthost, existing=is_tunnel_traffic_existed, inner_packet=tunnel_innner_pkt) server_traffic_monitor = ServerTrafficMonitor( duthost, ptfhost, vmhost, tbinfo, connection["test_intf"], conn_graph_facts, exp_pkt, existing=is_server_traffic_existed) with tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10)
def test_downstream_standby_mux_toggle_active( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, require_mocked_dualtor, tunnel_traffic_monitor, vmhost, toggle_all_simulator_ports, tor_mux_intfs ): # set rand_selected_dut as standby and rand_unselected_dut to active tor test_params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo) server_ipv4 = test_params["target_server_ip"] random_dst_ip = "1.1.1.2" pkt, exp_pkt = build_packet_to_server(rand_selected_dut, ptfadapter, random_dst_ip) ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, expect_server_traffic=True): tunnel_monitor = tunnel_traffic_monitor(rand_selected_dut, existing=True) server_traffic_monitor = ServerTrafficMonitor( torhost, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) tunnel_monitor.existing = expect_tunnel_traffic server_traffic_monitor.existing = expect_server_traffic with tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logger.info("Stage 1: Verify Standby Forwarding") logger.info("Step 1.1: Add route to a nexthop which is a standby Neighbor") set_mux_state(rand_selected_dut, tbinfo, 'standby', tor_mux_intfs, toggle_all_simulator_ports) add_nexthop_routes(rand_selected_dut, random_dst_ip, nexthops=[server_ipv4]) logger.info("Step 1.2: Verify traffic to this route dst is forwarded to Active ToR and equally distributed") check_tunnel_balance(**test_params) monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, expect_tunnel_traffic=True) logger.info("Stage 2: Verify Active Forwarding") logger.info("Step 2.1: Simulate Mux state change to active") set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) logger.info("Step 2.2: Verify traffic to this route dst is forwarded directly to server") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=True, expect_tunnel_traffic=False) logger.info("Stage 3: Verify Standby Forwarding Again") logger.info("Step 3.1: Simulate Mux state change to standby") set_mux_state(rand_selected_dut, tbinfo, 'standby', tor_mux_intfs, toggle_all_simulator_ports) logger.info("Step 3.2: Verify traffic to this route dst is now redirected back to Active ToR and equally distributed") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, expect_tunnel_traffic=True) check_tunnel_balance(**test_params) remove_static_routes(rand_selected_dut, random_dst_ip)
def test_standby_tor_remove_neighbor_downstream_standby( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, require_mocked_dualtor, set_crm_polling_interval, tunnel_traffic_monitor, vmhost ): """ @summary: Verify that after removing neighbor entry for a server over standby ToR, the packets sent to the server will be dropped(neither passed to the server or redirected to the active ToR). """ @contextlib.contextmanager def stop_garp(ptfhost): """Temporarily stop garp service.""" ptfhost.shell("supervisorctl stop garp_service") yield ptfhost.shell("supervisorctl start garp_service") tor = rand_selected_dut test_params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo) server_ipv4 = test_params["target_server_ip"] pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, server_ipv4) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", server_ipv4, ptf_t1_intf) tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) with tunnel_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after removing neighbor entry", server_ipv4) tunnel_monitor.existing = False server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) # for real dualtor testbed, leave the neighbor restoration to garp service flush_neighbor_ct = flush_neighbor(tor, server_ipv4, restore=is_t0_mocked_dualtor) with crm_neighbor_checker(tor), stop_garp(ptfhost), flush_neighbor_ct, tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after neighbor entry is restored", server_ipv4) tunnel_monitor.existing = True with crm_neighbor_checker(tor), tunnel_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10)
def test_tunnel_memory_leak( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost ): """ Test if there is memory leak for service tunnel_packet_handler. Send ip packets from standby TOR T1 to Server, standby TOR will forward the packets to active TOR with tunnel, active TOR will decapsulate the IPinIP packets, but there is no neighbor for destination as we remove neighbor before test, tunnel_packet_handler will be triggered and neighbor will be added. Server will receive the packets. Check if memory usage is increased after tunnel_packet_handler's operation. Since tunnel_packet_handler is only triggered by the first packet, loop the process for all severs to trigger it as much as possible. """ @contextlib.contextmanager def prepare_services(ptfhost): """ Temporarily start arp and icmp service. Make sure to stop garp service, otherwise, it will add neighbor entry back automatically. It has to stop garp_service for triggering tunnel_packet_handler. It has to start arp and icmp service for receiving packets at server side. """ ptfhost.shell("supervisorctl stop garp_service") ptfhost.shell("supervisorctl start arp_responder") ptfhost.shell("supervisorctl start icmp_responder") yield ptfhost.shell("supervisorctl stop arp_responder") ptfhost.shell("supervisorctl stop icmp_responder") @contextlib.contextmanager def remove_neighbor(duthost, server_ip): """ Remove ip neighbor before test for triggering tunnel_packet_handler, restore it after test """ flush_neighbor_ct = flush_neighbor(duthost, server_ip, True) with flush_neighbor_ct: command = "ip neighbor show %s" % server_ip output = [_.strip() for _ in duthost.shell(command)["stdout_lines"]] pytest_assert(not output, "server ip {} isn't flushed in neighbor table.".format(server_ip)) yield pytest_assert(is_tunnel_packet_handler_running(upper_tor_host), "tunnel_packet_handler is not running in SWSS conainter.") ptf_t1_intf = random.choice(get_t1_ptf_ports(lower_tor_host, tbinfo)) all_servers_ips = mux_cable_server_ip(upper_tor_host) with prepare_services(ptfhost): # Get the original memeory percent before test check_memory_leak(upper_tor_host) for iface, server_ips in all_servers_ips.items(): server_ipv4 = server_ips["server_ipv4"].split("/")[0] logging.info("Select DUT interface {} and server IP {} to test.".format(iface, server_ipv4)) pkt, exp_pkt = build_packet_to_server(lower_tor_host, ptfadapter, server_ipv4) rm_neighbor = remove_neighbor(upper_tor_host, server_ipv4) server_traffic_monitor = ServerTrafficMonitor( upper_tor_host, ptfhost, vmhost, tbinfo, iface, conn_graph_facts, exp_pkt, existing=True, is_mocked=True ) with rm_neighbor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=PACKET_COUNT) logging.info("Sent {} packets from ptf t1 interface {} on standby TOR {}" .format(PACKET_COUNT, ptf_t1_intf, lower_tor_host.hostname)) # Check memory usage for every operation, used for debugging if test failed check_memory_leak(upper_tor_host) pytest_assert(validate_neighbor_entry_exist(upper_tor_host, server_ipv4), "The server ip {} doesn't exist in neighbor table on dut {}. \ tunnel_packet_handler isn't triggered.".format(server_ipv4, upper_tor_host.hostname)) pytest_assert(len(server_traffic_monitor.matched_packets) > PACKET_COUNT /2, "Received {} expected packets for server {}, drop more than 50%." .format(len(server_traffic_monitor.matched_packets), server_ipv4)) # sleep 10s to wait memory usage stable, check if there is memory leak time.sleep(10) check_result = check_memory_leak(upper_tor_host) pytest_assert(check_result == False, "Test failed because there is memory leak on {}" .format(upper_tor_host.hostname))
def test_mac_move(require_mocked_dualtor, announce_new_neighbor, apply_active_state_to_orchagent, conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, set_crm_polling_interval, tbinfo, tunnel_traffic_monitor, vmhost): tor = rand_selected_dut ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) ptf_t1_intf_index = int(ptf_t1_intf.strip("eth")) # new neighbor learnt on an active port test_port = next(announce_new_neighbor) announce_new_neighbor.send(None) logging.info("let new neighbor learnt on active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # mac move to a standby port test_port = next(announce_new_neighbor) announce_new_neighbor.send( lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) logging.info("mac move to a standby port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # standby forwarding check after fdb ageout/flush tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # mac move to another active port test_port = next(announce_new_neighbor) announce_new_neighbor.send(None) logging.info("mac move to another active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # active forwarding check after fdb ageout/flush tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10)
def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, set_crm_polling_interval, tunnel_traffic_monitor, vmhost): """ @Verify those two scenarios: If the neighbor entry of a server is present on active ToR, all traffic to server should be directly forwarded. If the neighbor entry of a server is removed, all traffic to server should be dropped and no tunnel traffic. """ @contextlib.contextmanager def stop_garp(ptfhost): """Temporarily stop garp service.""" ptfhost.shell("supervisorctl stop garp_service") yield ptfhost.shell("supervisorctl start garp_service") tor = rand_selected_dut test_params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo) server_ipv4 = test_params["target_server_ip"] pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, server_ipv4) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", server_ipv4, ptf_t1_intf) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after removing neighbor entry", server_ipv4) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) # for real dualtor testbed, leave the neighbor restoration to garp service flush_neighbor_ct = flush_neighbor(tor, server_ipv4, restore=is_t0_mocked_dualtor) with crm_neighbor_checker(tor), stop_garp( ptfhost ), flush_neighbor_ct, tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after neighbor entry is restored", server_ipv4) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10)
def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, testbed_setup, rand_selected_dut, tbinfo, require_mocked_dualtor, set_crm_polling_interval, tunnel_traffic_monitor, vmhost): """ @Verify those two scenarios: If the neighbor entry of a server is present on active ToR, all traffic to server should be directly forwarded. If the neighbor entry of a server is removed, all traffic to server should be dropped and no tunnel traffic. """ @contextlib.contextmanager def remove_neighbor(ptfhost, duthost, server_ip, ip_version): # restore ipv4 neighbor since it is statically configured if ip_version == "ipv4": restore = True neighbor_advertise_process = "garp_service" elif ip_version == "ipv6": restore = False neighbor_advertise_process = "arp_responder" else: raise ValueError("Unknown IP version '%s'" % ip_version) flush_neighbor_ct = flush_neighbor(duthost, server_ip, restore=restore) try: ptfhost.shell("supervisorctl stop %s" % neighbor_advertise_process) with flush_neighbor_ct: yield finally: ptfhost.shell("supervisorctl start %s" % neighbor_advertise_process) tor = rand_selected_dut test_port, server_ip, ip_version = testbed_setup pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, server_ip) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", server_ip, ptf_t1_intf) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after removing neighbor entry", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo)) remove_neighbor_ct = remove_neighbor(ptfhost, tor, server_ip, ip_version) with crm_neighbor_checker( tor), remove_neighbor_ct, tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after neighbor entry is restored", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo)) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10)