Exemple #1
0
def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, setup_vlan, hash_keys, ptfhost, ipver,
              toggle_all_simulator_ports_to_rand_selected_tor_m,
              tbinfo, mux_server_url, router_macs,
              ignore_ttl, single_fib_for_duts):

    if 'dualtor' in tbinfo['topo']['name']:
        wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state')

    timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
    log_file = "/tmp/hash_test.HashTest.{}.{}.log".format(ipver, timestamp)
    logging.info("PTF log file: %s" % log_file)
    if ipver == "ipv4":
        src_ip_range = SRC_IP_RANGE
        dst_ip_range = DST_IP_RANGE
    else:
        src_ip_range = SRC_IPV6_RANGE
        dst_ip_range = DST_IPV6_RANGE
    ptf_runner(ptfhost,
            "ptftests",
            "hash_test.HashTest",
            platform_dir="ptftests",
            params={"fib_info_files": fib_info_files_per_function[:3],   # Test at most 3 DUTs
                    "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url),
                    "hash_keys": hash_keys,
                    "src_ip_range": ",".join(src_ip_range),
                    "dst_ip_range": ",".join(dst_ip_range),
                    "router_macs": router_macs,
                    "vlan_ids": VLANIDS,
                    "ignore_ttl":ignore_ttl,
                    "single_fib_for_duts": single_fib_for_duts
                   },
            log_file=log_file,
            qlen=PTF_QLEN,
            socket_recv_size=16384)
Exemple #2
0
    def _check_monit_on_dut(*args, **kwargs):
        dut = kwargs['node']
        results = kwargs['results']

        logger.info("Checking status of each Monit service...")
        networking_uptime = dut.get_networking_uptime().seconds
        timeout = max((MONIT_STABILIZE_MAX_TIME - networking_uptime), 0)
        interval = 20
        logger.info("networking_uptime = {} seconds, timeout = {} seconds, interval = {} seconds" \
                    .format(networking_uptime, timeout, interval))

        check_result = {
            "failed": False,
            "check_item": "monit",
            "host": dut.hostname
        }

        if timeout == 0:
            monit_services_status = dut.get_monit_services_status()
            if not monit_services_status:
                logger.info("Monit was not running.")
                check_result["failed"] = True
                check_result["failed_reason"] = "Monit was not running"
                logger.info("Checking status of each Monit service was done!")
                return check_result

            check_result = _check_monit_services_status(
                check_result, monit_services_status)
        else:
            start = time.time()
            elapsed = 0
            is_monit_running = False
            while elapsed < timeout:
                check_result["failed"] = False
                monit_services_status = dut.get_monit_services_status()
                if not monit_services_status:
                    wait(interval, msg="Monit was not started and wait {} seconds to retry. Remaining time: {}." \
                         .format(interval, timeout - elapsed))
                    elapsed = time.time() - start
                    continue

                is_monit_running = True
                check_result = _check_monit_services_status(
                    check_result, monit_services_status)
                if check_result["failed"]:
                    wait(interval,
                         msg="Services were not monitored and wait {} seconds to retry. Remaining time: {}. Services status: {}" \
                         .format(interval, timeout - elapsed, str(check_result["services_status"])))
                    elapsed = time.time() - start
                else:
                    break

            if not is_monit_running:
                logger.info("Monit was not running.")
                check_result["failed"] = True
                check_result["failed_reason"] = "Monit was not running"

        logger.info("Checking status of each Monit service was done on %s" %
                    dut.hostname)
        results[dut.hostname] = check_result
Exemple #3
0
def check_services(dut):
    logger.info("Checking services status on %s..." % dut.hostname)

    networking_uptime = dut.get_networking_uptime().seconds
    timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
    interval = 20
    logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
                (networking_uptime, timeout, interval))

    check_result = {"failed": True, "check_item": "services"}
    if timeout == 0:    # Check services status, do not retry.
        services_status = dut.critical_services_status()
        check_result["failed"] = False if all(services_status.values()) else True
        check_result["services_status"] = services_status
    else:               # Retry checking service status
        start = time.time()
        elapsed = 0
        while elapsed < timeout:
            services_status = dut.critical_services_status()
            check_result["failed"] = False if all(services_status.values()) else True
            check_result["services_status"] = services_status

            if check_result["failed"]:
                wait(interval, msg="Not all services are started, wait %d seconds to retry. Remaining time: %d %s" % \
                     (interval, int(timeout - elapsed), str(check_result["services_status"])))
                elapsed = time.time() - start
            else:
                break

    logger.info("Done checking services status.")
    return check_result
Exemple #4
0
def do_reboot(duthost, localhost, dutip, rw_user, rw_pass):
    # occasionally reboot command fails with some kernel error messages
    # Hence retry if needed.
    #
    wait_time = 120
    retries = 3
    for i in range(retries):
        # Regular reboot command would not work, as it would try to
        # collect show tech, which will fail in RO state.
        #
        chk_ssh_remote_run(localhost, dutip, rw_user, rw_pass,
                           "sudo /sbin/reboot")
        try:
            localhost.wait_for(host=duthost.mgmt_ip,
                               port=22,
                               state="stopped",
                               delay=5,
                               timeout=60)
            break
        except RunAnsibleModuleFail as e:
            logger.error(
                "DUT did not go down, exception: {} attempt:{}/{}".format(
                    repr(e), i, retries))
    assert i < 3, "Failed to reboot"
    localhost.wait_for(host=duthost.mgmt_ip,
                       port=22,
                       state="started",
                       delay=10,
                       timeout=300)
    wait(wait_time,
         msg="Wait {} seconds for system to be stable.".format(wait_time))
Exemple #5
0
def test_flap_neighbor_entry_standby(
        require_mocked_dualtor,
        apply_mock_dual_tor_tables,
        apply_mock_dual_tor_kernel_configs,
        rand_selected_dut,
        tbinfo,
        request,
        mock_server_ip_mac_map):

    dut = rand_selected_dut

    vlan_interface_name = dut.get_extended_minigraph_facts(tbinfo)['minigraph_vlans'].keys()[0]

    # Apply mux standby state
    load_swss_config(dut, _swss_path(SWSS_MUX_STATE_STANDBY_CONFIG_FILE))

    wait(3, 'extra wait for initial CRMs to be updated')

    crm_facts1 = dut.get_crm_facts()
    logger.info(json.dumps(crm_facts1, indent=4))

    for _ in range(request.config.getoption('--mux-stress-count')):
        remove_neighbors(dut, mock_server_ip_mac_map, vlan_interface_name)
        add_neighbors(dut, mock_server_ip_mac_map, vlan_interface_name)

    wait(3, 'extra wait for CRMs to be updated')

    crm_facts2 = dut.get_crm_facts()
    logger.info(json.dumps(crm_facts2, indent=4))

    unmatched_crm_facts = compare_crm_facts(crm_facts1, crm_facts2)
    pytest_assert(len(unmatched_crm_facts)==0, 'Unmatched CRM facts: {}'.format(json.dumps(unmatched_crm_facts, indent=4)))
Exemple #6
0
def test_change_mux_state(
        require_mocked_dualtor,
        apply_mock_dual_tor_tables,
        apply_mock_dual_tor_kernel_configs,
        rand_selected_dut,
        request):

    dut = rand_selected_dut

    # Apply mux active state
    load_swss_config(dut, _swss_path(SWSS_MUX_STATE_ACTIVE_CONFIG_FILE))
    load_swss_config(dut, _swss_path(SWSS_MUX_STATE_STANDBY_CONFIG_FILE))
    load_swss_config(dut, _swss_path(SWSS_MUX_STATE_ACTIVE_CONFIG_FILE))

    wait(10, 'extra wait for initial CRMs to be updated')

    crm_facts1 = dut.get_crm_facts()
    logger.info(json.dumps(crm_facts1, indent=4))

    # Set all mux state to 'standby'/'active' N times.
    for _ in range(request.config.getoption('--mux-stress-count')):
        load_swss_config(dut, _swss_path(SWSS_MUX_STATE_STANDBY_CONFIG_FILE))
        load_swss_config(dut, _swss_path(SWSS_MUX_STATE_ACTIVE_CONFIG_FILE))

    wait(10, 'extra wait for CRMs to be updated')

    crm_facts2 = dut.get_crm_facts()
    logger.info(json.dumps(crm_facts2, indent=4))

    # Check CRM values for leak
    unmatched_crm_facts = compare_crm_facts(crm_facts1, crm_facts2)
    pytest_assert(len(unmatched_crm_facts)==0, 'Unmatched CRM facts: {}'.format(json.dumps(unmatched_crm_facts, indent=4)))
Exemple #7
0
def reboot_dut(dut, localhost, cmd, wait_time):
    logger.info("Reboot dut using cmd='%s'" % cmd)
    reboot_task, reboot_res = dut.command(cmd, module_async=True)

    logger.info("Wait for DUT to go down")
    try:
        localhost.wait_for(host=dut.mgmt_ip,
                           port=22,
                           state="stopped",
                           delay=10,
                           timeout=300)
    except RunAnsibleModuleFail as e:
        logger.error("DUT did not go down, exception: " + repr(e))
        if reboot_task.is_alive():
            logger.error("Rebooting is not completed")
            reboot_task.terminate()
        logger.error("reboot result %s" % str(reboot_res.get()))
        assert False, "Failed to reboot the DUT"

    localhost.wait_for(host=dut.mgmt_ip,
                       port=22,
                       state="started",
                       delay=10,
                       timeout=300)
    wait(wait_time,
         msg="Wait {} seconds for system to be stable.".format(wait_time))
Exemple #8
0
    def test_config_interface_ip(self, setup_config_mode, sample_intf):
        """
        Checks whether 'config interface ip add/remove <intf> <ip>'
        adds/removes the ip on the test interface when its interface
        alias/name is provided as per the configured naming mode
        """
        if sample_intf['ip'] is None:
            pytest.skip('No L3 physical interface present')

        dutHostGuest, mode, ifmode = setup_config_mode
        test_intf = sample_intf[mode]
        test_intf_ip = sample_intf['ip']
        cli_ns_option = sample_intf['cli_ns_option']

        out = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} sudo config interface {} ip remove {} {}'.format(
            ifmode, cli_ns_option, test_intf, test_intf_ip))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_ip_intf = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} show ip interface'.format(ifmode))['stdout']
        logger.info('show_ip_intf:\n{}'.format(show_ip_intf))

        assert re.search(r'{}\s+{}'.format(test_intf, test_intf_ip), show_ip_intf) is None

        out = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} sudo config interface {} ip add {} {}'.format(
            ifmode, cli_ns_option, test_intf, test_intf_ip))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_ip_intf = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} show ip interface'.format(ifmode))['stdout']
        logger.info('show_ip_intf:\n{}'.format(show_ip_intf))

        assert re.search(r'{}\s+{}'.format(test_intf, test_intf_ip), show_ip_intf) is not None
def test_decap(tbinfo, duthosts, mux_server_url, setup_teardown, ptfhost, set_mux_random):

    setup_info = setup_teardown

    if 'dualtor' in tbinfo['topo']['name']:
        wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state')

    log_file = "/tmp/decap.{}.log".format(datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
    ptf_runner(ptfhost,
               "ptftests",
               "IP_decap_test.DecapPacketTest",
                platform_dir="ptftests",
                params={"outer_ipv4": setup_info["outer_ipv4"],
                        "outer_ipv6": setup_info["outer_ipv6"],
                        "inner_ipv4": setup_info["inner_ipv4"],
                        "inner_ipv6": setup_info["inner_ipv6"],
                        "lo_ips": setup_info["lo_ips"],
                        "lo_ipv6s": setup_info["lo_ipv6s"],
                        "router_macs": setup_info["router_macs"],
                        "dscp_mode": setup_info["dscp_mode"],
                        "ttl_mode": setup_info["ttl_mode"],
                        "ignore_ttl": setup_info["ignore_ttl"],
                        "max_internal_hops": setup_info["max_internal_hops"],
                        "fib_info_files": setup_info["fib_info_files"],
                        "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url)
                        },
                qlen=PTFRUNNER_QLEN,
                log_file=log_file)
Exemple #10
0
def test_portstat_clear(duthosts, rand_one_dut_hostname, command):
    duthost = duthosts[rand_one_dut_hostname]
    wait(30, 'Wait for DUT to receive/send some packets')
    before_portstat = parse_portstat(
        duthost.command('portstat')['stdout_lines'])
    pytest_assert(before_portstat, 'No parsed command output')

    duthost.command(command)
    wait(1, 'Wait for portstat counters to refresh')

    after_portstat = parse_portstat(
        duthost.command('portstat')['stdout_lines'])
    pytest_assert(after_portstat, 'No parsed command output')
    """
    Assert only when rx/tx count is no smaller than COUNT_THRES because DUT may send or receive
    some packets during test after port status are clear
    """
    COUNT_THRES = 10
    for intf in before_portstat:
        rx_ok_before = int(before_portstat[intf]['rx_ok'].replace(',', ''))
        rx_ok_after = int(after_portstat[intf]['rx_ok'].replace(',', ''))
        tx_ok_before = int(before_portstat[intf]['tx_ok'].replace(',', ''))
        tx_ok_after = int(after_portstat[intf]['tx_ok'].replace(',', ''))
        if int(rx_ok_before >= COUNT_THRES):
            pytest_assert(rx_ok_before >= rx_ok_after,
                          'Value of RX_OK after clear should be lesser')
        if int(tx_ok_before >= COUNT_THRES):
            pytest_assert(tx_ok_before >= tx_ok_after,
                          'Value of TX_OK after clear should be lesser')
Exemple #11
0
        def restart_ptf_nn_agent(self):
            cmd_list = []
            cmd_list.append('supervisorctl restart ptf_nn_agent')

            cmd = "\n".join(cmd_list)
            self._exec(cmd)
            wait(10)
 def vrf_vni_map_set(self, duthost, setup_dut):
     duthost.shell("config vrf add Vrf1")
     duthost.shell("config vrf add_vrf_vni_map Vrf1 10000")
     yield
     duthost.shell("config vrf del_vrf_vni_map Vrf1")
     duthost.shell("config vrf del Vrf1")
     wait(3)
     duthost.shell("vtysh -c 'configure' -c 'no vrf {}'".format("Vrf1"))
Exemple #13
0
def set_polling_interval(duthost):
    wait_time = 2
    duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))
    wait(wait_time, "Waiting {} sec for CRM counters to become updated".format(wait_time))

    yield

    duthost.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
    wait(wait_time, "Waiting {} sec for CRM counters to become updated".format(wait_time))
Exemple #14
0
def check_monit(dut):
    """
    @summary: Check whether the Monit is running and whether the services which were monitored by Monit are 
              in the correct status or not.
    @return: A dictionary contains the testing result (failed or not failed) and the status of each service.
    """
    logger.info("Checking status of each Monit service...")
    networking_uptime = dut.get_networking_uptime().seconds
    timeout = max((MONIT_STABILIZE_MAX_TIME - networking_uptime), 0)
    interval = 20
    logger.info("networking_uptime = {} seconds, timeout = {} seconds, interval = {} seconds" \
                .format(networking_uptime, timeout, interval))

    check_result = {"failed": False, "check_item": "monit"}

    if timeout == 0:
        monit_services_status = dut.get_monit_services_status()
        if not monit_services_status:
            logger.info("Monit was not running.")
            check_result["failed"] = True
            check_result["failed_reason"] = "Monit was not running"
            logger.info("Checking status of each Monit service was done!")
            return check_result

        check_result = check_monit_services_status(check_result,
                                                   monit_services_status)
    else:
        start = time.time()
        elapsed = 0
        is_monit_running = False
        while elapsed < timeout:
            check_result["failed"] = False
            monit_services_status = dut.get_monit_services_status()
            if not monit_services_status:
                wait(interval, msg="Monit was not started and wait {} seconds to retry. Remaining time: {}." \
                    .format(interval, timeout - elapsed))
                elapsed = time.time() - start
                continue

            is_monit_running = True
            check_result = check_monit_services_status(check_result,
                                                       monit_services_status)
            if check_result["failed"]:
                wait(interval, msg="Services were not monitored and wait {} seconds to retry. Remaining time: {}. Services status: {}" \
                    .format(interval, timeout - elapsed, str(check_result["services_status"])))
                elapsed = time.time() - start
            else:
                break

        if not is_monit_running:
            logger.info("Monit was not running.")
            check_result["failed"] = True
            check_result["failed_reason"] = "Monit was not running"

    logger.info("Checking status of each Monit service was done!")
    return check_result
Exemple #15
0
def load_swss_config(dut, config_file):
    """Load swss config file specified by 'config_file' in swss docker using command 'swssconfig'

    Args:
        dut (obj): Object for interacting with DUT.
        config_file (str): Path and filename of the config file in swss docker container.
    """
    logger.info('Loading swss config {} ...'.format(config_file))
    dut.shell('docker exec swss sh -c "swssconfig {}"'.format(config_file))
    wait(2, 'for CRMs to be updated')
    logger.info('Loading swss config {} done'.format(config_file))
Exemple #16
0
    def get_avg_dpdp_convergence_time(port_name):
        """
        Args:
            port_name: Name of the port
        """

        table, avg, tx_frate, rx_frate, avg_delta = [], [], [], [], []
        for i in range(0, iteration):
            logger.info('|---- {} Link Flap Iteration : {} ----|'.format(port_name, i+1))

            """ Starting Traffic """
            logger.info('Starting Traffic')
            cs = cvg_api.convergence_state()
            cs.transmit.state = cs.transmit.START
            cvg_api.set_state(cs)
            wait(TIMEOUT, "For Traffic To start")
            flow_stats = get_flow_stats(cvg_api)
            tx_frame_rate = flow_stats[0].frames_tx_rate
            assert tx_frame_rate != 0, "Traffic has not started"
            """ Flapping Link """
            logger.info('Simulating Link Failure on {} link'.format(port_name))
            cs = cvg_api.convergence_state()
            cs.link.port_names = [port_name]
            cs.link.state = cs.link.DOWN
            cvg_api.set_state(cs)
            wait(TIMEOUT, "For Link to go down")
            flows = get_flow_stats(cvg_api)
            for flow in flows:
                tx_frate.append(flow.frames_tx_rate)
                rx_frate.append(flow.frames_tx_rate)
            assert sum(tx_frate) == sum(rx_frate), "Traffic has not converged after link flap: TxFrameRate:{},RxFrameRate:{}".format(sum(tx_frate), sum(rx_frate))
            logger.info("Traffic has converged after link flap")
            """ Get control plane to data plane convergence value """
            request = cvg_api.convergence_request()
            request.convergence.flow_names = []
            convergence_metrics = cvg_api.get_results(request).flow_convergence
            for metrics in convergence_metrics:
                logger.info('CP/DP Convergence Time (ms): {}'.format(metrics.control_plane_data_plane_convergence_us/1000))
            avg.append(int(metrics.control_plane_data_plane_convergence_us/1000))
            avg_delta.append(int(flows[0].frames_tx)-int(flows[0].frames_rx))
            """ Performing link up at the end of iteration """
            logger.info('Simulating Link Up on {} at the end of iteration {}'.format(port_name, i+1))
            cs = cvg_api.convergence_state()
            cs.link.port_names = [port_name]
            cs.link.state = cs.link.UP
            cvg_api.set_state(cs)
        table.append('%s Link Failure' % port_name)
        table.append(route_type)
        table.append(number_of_routes)
        table.append(iteration)
        table.append(mean(avg_delta))
        table.append(mean(avg))
        return table
Exemple #17
0
def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu,
                   toggle_all_simulator_ports_to_random_side,
                   fib_info_files_per_function, tbinfo, mux_server_url,
                   router_macs, ignore_ttl, single_fib_for_duts):

    if 'dualtor' in tbinfo['topo']['name']:
        wait(
            30,
            'Wait some time for mux active/standby state to be stable after toggled mux state'
        )

    timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')

    # do not test load balancing for vs platform as kernel 4.9
    # can only do load balance base on L3
    if duthosts[0].facts['asic_type'] in ["vs"]:
        test_balancing = False
    else:
        test_balancing = True

    logging.info("run ptf test")
    log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format(
        ipv4, ipv6, timestamp)
    logging.info("PTF log file: %s" % log_file)
    ptf_runner(
        ptfhost,
        "ptftests",
        "fib_test.FibTest",
        platform_dir="ptftests",
        params={
            "fib_info_files":
            fib_info_files_per_function[:3],  # Test at most 3 DUTs
            "ptf_test_port_map":
            ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url),
            "router_macs":
            router_macs,
            "ipv4":
            ipv4,
            "ipv6":
            ipv6,
            "testbed_mtu":
            mtu,
            "test_balancing":
            test_balancing,
            "ignore_ttl":
            ignore_ttl,
            "single_fib_for_duts":
            single_fib_for_duts
        },
        log_file=log_file,
        qlen=PTF_QLEN,
        socket_recv_size=16384)
Exemple #18
0
    def _check(*args, **kwargs):
        check_results = []
        for dut in duthosts:
            logger.info("Checking process status on %s..." % dut.hostname)

            networking_uptime = dut.get_networking_uptime().seconds
            timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
            interval = 20
            logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
                        (networking_uptime, timeout, interval))

            check_result = {
                "failed": False,
                "check_item": "processes",
                "host": dut.hostname
            }
            if timeout == 0:  # Check processes status, do not retry.
                processes_status = dut.all_critical_process_status()
                check_result["processes_status"] = processes_status
                check_result["services_status"] = {}
                for k, v in processes_status.items():
                    if v['status'] == False or len(
                            v['exited_critical_process']) > 0:
                        check_result['failed'] = True
                    check_result["services_status"].update({k: v['status']})
            else:  # Retry checking processes status
                start = time.time()
                elapsed = 0
                while elapsed < timeout:
                    check_result["failed"] = False
                    processes_status = dut.all_critical_process_status()
                    check_result["processes_status"] = processes_status
                    check_result["services_status"] = {}
                    for k, v in processes_status.items():
                        if v['status'] == False or len(
                                v['exited_critical_process']) > 0:
                            check_result['failed'] = True
                        check_result["services_status"].update(
                            {k: v['status']})

                    if check_result["failed"]:
                        wait(interval, msg="Not all processes are started, wait %d seconds to retry. Remaining time: %d %s" % \
                            (interval, int(timeout - elapsed), str(check_result["processes_status"])))
                        elapsed = time.time() - start
                    else:
                        break

            logger.info("Done checking processes status on %s" % dut.hostname)
            check_results.append(check_result)

        return check_results
    def test_config_interface_state(self, setup_config_mode, sample_intf):
        """
        Checks whether 'config interface startup/shutdown <intf>'
        changes the admin state of the test interface to up/down when
        its interface alias/name is provided as per the configured
        naming mode
        """
        dutHostGuest, mode, ifmode = setup_config_mode
        test_intf = sample_intf[mode]
        interface = sample_intf['default']
        regex_int = re.compile(
            r'(\S+)\s+[\d,N\/A]+\s+(\w+)\s+(\d+)\s+[\w\/]+\s+([\w\/]+)\s+(\w+)\s+(\w+)\s+(\w+)'
        )

        out = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} sudo config interface shutdown {}'.format(
                ifmode, test_intf))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_intf_status = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={0} show interfaces status {1} | grep -w {1}'
            .format(ifmode, test_intf))
        logger.info('show_intf_status:\n{}'.format(show_intf_status['stdout']))

        line = show_intf_status['stdout'].strip()
        if regex_int.match(line) and interface == regex_int.match(line).group(
                1):
            admin_state = regex_int.match(line).group(7)

        assert admin_state == 'down'

        out = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} sudo config interface startup {}'.format(
                ifmode, test_intf))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_intf_status = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={0} show interfaces status {1} | grep -w {1}'
            .format(ifmode, test_intf))
        logger.info('show_intf_status:\n{}'.format(show_intf_status['stdout']))

        line = show_intf_status['stdout'].strip()
        if regex_int.match(line) and interface == regex_int.match(line).group(
                1):
            admin_state = regex_int.match(line).group(7)

        assert admin_state == 'up'
Exemple #20
0
    def _check_interfaces_on_dut(*args, **kwargs):
        dut = kwargs['node']
        results = kwargs['results']
        logger.info("Checking interfaces status on %s..." % dut.hostname)

        networking_uptime = dut.get_networking_uptime().seconds
        timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
        interval = 20
        logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
                    (networking_uptime, timeout, interval))

        down_ports = []
        check_result = {"failed": True, "check_item": "interfaces", "host": dut.hostname}
        for asic in dut.asics:
            ip_interfaces = []
            cfg_facts = asic.config_facts(host=dut.hostname,
                                          source="persistent", verbose=False)['ansible_facts']
            phy_interfaces = [k for k, v in cfg_facts["PORT"].items() if
                              "admin_status" in v and v["admin_status"] == "up"]
            if "PORTCHANNEL_INTERFACE" in cfg_facts:
                ip_interfaces = list(cfg_facts["PORTCHANNEL_INTERFACE"].keys())
            if "VLAN_INTERFACE" in cfg_facts:
                ip_interfaces += list(cfg_facts["VLAN_INTERFACE"].keys())

            logger.info(json.dumps(phy_interfaces, indent=4))
            logger.info(json.dumps(ip_interfaces, indent=4))

            if timeout == 0:  # Check interfaces status, do not retry.
                down_ports += _find_down_ports(asic, phy_interfaces, ip_interfaces)
                check_result["failed"] = True if len(down_ports) > 0 else False
                check_result["down_ports"] = down_ports
            else:  # Retry checking interface status
                start = time.time()
                elapsed = 0
                while elapsed < timeout:
                    down_ports = _find_down_ports(asic, phy_interfaces, ip_interfaces)
                    check_result["failed"] = True if len(down_ports) > 0 else False
                    check_result["down_ports"] = down_ports

                    if check_result["failed"]:
                        wait(interval,
                             msg="Found down ports, wait %d seconds to retry. Remaining time: %d, down_ports=%s" % \
                                 (interval, int(timeout - elapsed), str(check_result["down_ports"])))
                        elapsed = time.time() - start
                    else:
                        break

        logger.info("Done checking interfaces status on %s" % dut.hostname)
        check_result["failed"] = True if len(down_ports) > 0 else False
        check_result["down_ports"] = down_ports
        results[dut.hostname] = check_result
Exemple #21
0
def add_neighbors(dut, neighbors, interface):
    """Helper function for removing specified neighbors.

    Args:
        dut (obj): Object for interacting with DUT.
        neighbors (dict): Dict of neighbors, key is neighbor IP address, value is neighbor MAC address.
        interface (str): Name of the interface that the neighbors to be removed.
    """
    logger.info('Adding neighbors...')
    cmds = []
    for ip, mac in neighbors.items():
        cmds.append('ip -4 neigh replace {} lladdr {} dev {}'.format(ip, mac, interface))
    dut.shell_cmds(cmds=cmds)
    wait(2, 'for CRMs to be updated')
    logger.info('Adding neighbors done')
Exemple #22
0
 def run_traffic(routes):
     logger.info('|-------------------- RIB-IN Capacity test, No.of Routes : {} ----|'.format(routes))
     conv_config = tgen_capacity(routes)
     cvg_api.set_config(conv_config)
     """ Starting Protocols """
     logger.info("Starting all protocols ...")
     cs = cvg_api.convergence_state()
     cs.protocol.state = cs.protocol.START
     cvg_api.set_state(cs)
     wait(TIMEOUT, "For Protocols To start")
     """ Starting Traffic """
     logger.info('Starting Traffic')
     cs = cvg_api.convergence_state()
     cs.transmit.state = cs.transmit.START
     cvg_api.set_state(cs)
     wait(TIMEOUT, "For Traffic To start")
Exemple #23
0
def __recover_interfaces(dut, fanouthosts, result, wait_time):
    action = None
    for port in result['down_ports']:
        logging.warning("Restoring port: {}".format(port))

        pn = str(port).lower()
        if 'portchannel' in pn or 'vlan' in pn:
            action = 'config_reload'
            continue

        fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, port)
        if fanout and fanout_port:
            fanout.no_shutdown(fanout_port)
        dut.no_shutdown(port)
    wait(wait_time, msg="Wait {} seconds for interface(s) to restore.".format(wait_time))
    return action
Exemple #24
0
def do_reboot(duthost, localhost, duthosts, rw_user="", rw_pass=""):
    # occasionally reboot command fails with some kernel error messages
    # Hence retry if needed.
    #
    wait_time = 20
    retries = 3
    rebooted = False

    for i in range(retries):
        #
        try:
            # Reboot DUT using reboot function instead of using ssh_remote_run.
            # ssh_remote_run gets blocked due to console messages from reboot on DUT
            # Do not wait for ssh as next step checks if ssh is stopped to ensure DUT is
            # is rebooting.
            reboot(duthost, localhost, wait_for_ssh=False)
            localhost.wait_for(host=duthost.mgmt_ip,
                               port=22,
                               state="stopped",
                               delay=5,
                               timeout=60)
            rebooted = True
            break
        except RunAnsibleModuleFail as e:
            logger.error(
                "DUT did not go down, exception: {} attempt:{}/{}".format(
                    repr(e), i, retries))

    assert rebooted, "Failed to reboot"
    localhost.wait_for(host=duthost.mgmt_ip,
                       port=22,
                       state="started",
                       delay=10,
                       timeout=300)
    wait(wait_time,
         msg="Wait {} seconds for system to be stable.".format(wait_time))
    assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \
            "All critical services should fully started!"
    # If supervisor node is rebooted in chassis, linecards also will reboot.
    # Check if all linecards are back up.
    if duthost.is_supervisor_node():
        for host in duthosts:
            if host != duthost:
                logger.info("checking if {} critical services are up".format(
                    host.hostname))
                assert wait_until(300, 20, 0, host.critical_services_fully_started), \
                        "All critical services of {} should fully started!".format(host.hostname)
def toggle_all_simulator_ports_to_rand_selected_tor_m(duthosts, mux_server_url,
                                                      tbinfo,
                                                      rand_one_dut_hostname):
    """
    A function level fixture to toggle all ports to randomly selected tor.

    Before toggling, this fixture firstly sets all muxcables to 'manual' mode on all ToRs.
    After test is done, restore all mux cables to 'auto' mode on all ToRs in teardown phase.
    """
    # Skip on non dualtor testbed
    if 'dualtor' not in tbinfo['topo']['name']:
        yield
        return

    logger.info('Set all muxcable to manual mode on all ToRs')
    duthosts.shell('config muxcable mode manual all')

    logger.info("Toggling mux cable to {}".format(rand_one_dut_hostname))
    duthost = duthosts[rand_one_dut_hostname]
    dut_index = tbinfo['duts'].index(rand_one_dut_hostname)
    if dut_index == 0:
        data = {"active_side": UPPER_TOR}
    else:
        data = {"active_side": LOWER_TOR}

    # Allow retry for mux cable toggling
    for attempt in range(1, 4):
        logger.info(
            'attempt={}, toggle active side of all muxcables to {} from mux simulator'
            .format(attempt, data['active_side']))
        _post(mux_server_url, data)
        utilities.wait(
            5,
            'Wait for DUT muxcable status to update after toggled from mux simulator'
        )
        if _are_muxcables_active(duthost):
            break
    else:
        pytest_assert(
            False,
            "Failed to toggle all ports to {} from mux simulator".format(
                rand_one_dut_hostname))

    yield

    logger.info('Set all muxcable to auto mode on all ToRs')
    duthosts.shell('config muxcable mode auto all')
Exemple #26
0
def check_interfaces(dut):
    logger.info("Checking interfaces status on %s..." % dut.hostname)

    networking_uptime = dut.get_networking_uptime().seconds
    timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
    interval = 20
    logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
                (networking_uptime, timeout, interval))

    cfg_facts = dut.config_facts(host=dut.hostname,
                                 source="persistent")['ansible_facts']
    interfaces = [
        k for k, v in cfg_facts["PORT"].items()
        if "admin_status" in v and v["admin_status"] == "up"
    ]
    if "PORTCHANNEL_INTERFACE" in cfg_facts:
        interfaces += cfg_facts["PORTCHANNEL_INTERFACE"].keys()
    if "VLAN_INTERFACE" in cfg_facts:
        interfaces += cfg_facts["VLAN_INTERFACE"].keys()

    logger.info(json.dumps(interfaces, indent=4))

    check_result = {"failed": True, "check_item": "interfaces"}
    if timeout == 0:  # Check interfaces status, do not retry.
        down_ports = _find_down_ports(dut, interfaces)
        check_result["failed"] = True if len(down_ports) > 0 else False
        check_result["down_ports"] = down_ports
    else:  # Retry checking interface status
        start = time.time()
        elapsed = 0
        while elapsed < timeout:
            down_ports = _find_down_ports(dut, interfaces)
            check_result["failed"] = True if len(down_ports) > 0 else False
            check_result["down_ports"] = down_ports

            if check_result["failed"]:
                wait(interval, msg="Found down ports, wait %d seconds to retry. Remaining time: %d, down_ports=%s" % \
                     (interval, int(timeout - elapsed), str(check_result["down_ports"])))
                elapsed = time.time() - start
            else:
                break

    logger.info("Done checking interfaces status.")
    return check_result
Exemple #27
0
def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, mux_server_url, set_mux_random, supported_ttl_dscp_params, ip_ver, loopback_ips,
               duts_running_config_facts, duts_minigraph_facts):
    setup_info = setup_teardown

    ecn_mode = "copy_from_outer"
    ttl_mode = supported_ttl_dscp_params['ttl']
    dscp_mode = supported_ttl_dscp_params['dscp']
    if duthosts[0].facts['asic_type'] in ['mellanox']:
        ecn_mode = 'standard'

    try:
        apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'SET')

        if 'dualtor' in tbinfo['topo']['name']:
            wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state')

        log_file = "/tmp/decap.{}.log".format(datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
        ptf_runner(ptfhost,
                   "ptftests",
                   "IP_decap_test.DecapPacketTest",
                    platform_dir="ptftests",
                    params={"outer_ipv4": setup_info["outer_ipv4"],
                            "outer_ipv6": setup_info["outer_ipv6"],
                            "inner_ipv4": setup_info["inner_ipv4"],
                            "inner_ipv6": setup_info["inner_ipv6"],
                            "lo_ips": setup_info["lo_ips"],
                            "lo_ipv6s": setup_info["lo_ipv6s"],
                            "ttl_mode": ttl_mode,
                            "dscp_mode": dscp_mode,
                            "ignore_ttl": setup_info["ignore_ttl"],
                            "max_internal_hops": setup_info["max_internal_hops"],
                            "fib_info_files": setup_info["fib_info_files"],
                            "single_fib_for_duts": setup_info["single_fib_for_duts"],
                            "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url, duts_running_config_facts, duts_minigraph_facts)
                            },
                    qlen=PTFRUNNER_QLEN,
                    log_file=log_file)
    except Exception as detail:
        raise Exception(detail)
    finally:
        # Remove test decap configuration
        apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'DEL')
Exemple #28
0
def do_reboot(duthost, localhost, dutip="", rw_user="", rw_pass=""):
    # occasionally reboot command fails with some kernel error messages
    # Hence retry if needed.
    #
    wait_time = 20
    retries = 3
    rebooted = False

    for i in range(retries):
        # Regular reboot command would not work, as it would try to
        # collect show tech, which will fail in RO state.
        #
        try:
            if dutip:
                chk_ssh_remote_run(localhost, dutip, rw_user, rw_pass,
                                   "sudo /sbin/reboot")
            else:
                duthost.shell("/sbin/reboot")

            localhost.wait_for(host=duthost.mgmt_ip,
                               port=22,
                               state="stopped",
                               delay=5,
                               timeout=60)
            rebooted = True
            break
        except RunAnsibleModuleFail as e:
            logger.error(
                "DUT did not go down, exception: {} attempt:{}/{}".format(
                    repr(e), i, retries))

    assert rebooted, "Failed to reboot"
    localhost.wait_for(host=duthost.mgmt_ip,
                       port=22,
                       state="started",
                       delay=10,
                       timeout=300)
    wait(wait_time,
         msg="Wait {} seconds for system to be stable.".format(wait_time))
    assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \
            "All critical services should fully started!"
Exemple #29
0
    def test_config_interface_ip(self, setup_config_mode, sample_intf):
        """
        Checks whether 'config interface ip add/remove <intf> <ip>'
        adds/removes the ip on the test interface when its interface
        alias/name is provided as per the configured naming mode
        """
        dutHostGuest, mode, ifmode = setup_config_mode
        test_intf = sample_intf[mode]
        out = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} sudo config interface ip remove {} 10.0.0.0/31'
            .format(ifmode, test_intf))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_ip_intf = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} show ip interface'.format(
                ifmode))['stdout']
        logger.info('show_ip_intf:\n{}'.format(show_ip_intf))

        assert re.search(r'{}\s+10.0.0.0/31'.format(test_intf),
                         show_ip_intf) is None

        out = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} sudo config interface ip add {} 10.0.0.0/31'
            .format(ifmode, test_intf))
        if out['rc'] != 0:
            pytest.fail()

        wait(3)
        show_ip_intf = dutHostGuest.shell(
            'SONIC_CLI_IFACE_MODE={} show ip interface'.format(
                ifmode))['stdout']
        logger.info('show_ip_intf:\n{}'.format(show_ip_intf))

        assert re.search(r'{}\s+10.0.0.0/31'.format(test_intf),
                         show_ip_intf) is not None
def test_cont_warm_reboot(duthost, ptfhost, localhost, conn_graph_facts, continuous_reboot_count, \
    continuous_reboot_delay, enable_continuous_io, get_advanced_reboot):
    """
    @summary: This test case is to perform continuous warm reboot in a row
    """
    asic_type = duthost.facts["asic_type"]
    if asic_type in ["mellanox"]:
        issu_capability = duthost.command("show platform mlnx issu")["stdout"]
        if "disabled" in issu_capability:
            pytest.skip(
                "ISSU is not supported on this DUT, skip this test case")

    # Start advancedReboot script on the ptf host to enable continuous I/O
    advancedReboot = get_advanced_reboot(
        rebootType='warm-reboot', enableContinuousIO=enable_continuous_io)
    thr = threading.Thread(target=advancedReboot.runRebootTestcase)
    thr.setDaemon(True)
    thr.start()

    # Start continuous warm reboot on the DUT
    for count in range(continuous_reboot_count):
        logging.info("==================== Continuous warm reboot iteration: {}/{} ====================".format \
            (count + 1, continuous_reboot_count))
        reboot_and_check(localhost,
                         duthost,
                         conn_graph_facts["device_conn"],
                         reboot_type=REBOOT_TYPE_WARM)
        wait(continuous_reboot_delay,
             msg="Wait {}s before next warm-reboot".format(
                 continuous_reboot_delay))

    # Find the pid of continuous I/O script inside ptf container and send a stop signal
    pid_res = ptfhost.command("cat /tmp/advanced-reboot-pid.log")
    ptfhost.command("kill -SIGUSR1 {}".format(pid_res['stdout']))
    thr.join()
    logging.info("Continuous warm-reboot test completed")