Ejemplo n.º 1
0
    def test_bgp(self, duthost, ctrl_links, upstream_links, profile_name):
        '''Verify BGP neighbourship
        '''
        bgp_config = duthost.get_running_config_facts()["BGP_NEIGHBOR"].values(
        )[0]
        BGP_KEEPALIVE = int(bgp_config["keepalive"])
        BGP_HOLDTIME = int(bgp_config["holdtime"])

        def check_bgp_established(up_link):
            command = "sonic-db-cli STATE_DB HGETALL 'NEIGH_STATE_TABLE|{}'".format(
                up_link["local_ipv4_addr"])
            fact = sonic_db_cli(duthost, command)
            logger.info("bgp state {}".format(fact))
            return fact["state"] == "Established"

        # Ensure the BGP sessions have been established
        for ctrl_port in ctrl_links.keys():
            assert wait_until(30, 5, 0, check_bgp_established,
                              upstream_links[ctrl_port])

        # Check the BGP sessions are present after port macsec disabled
        for ctrl_port, nbr in ctrl_links.items():
            disable_macsec_port(duthost, ctrl_port)
            disable_macsec_port(nbr["host"], nbr["port"])
            wait_until(
                20, 3, 0, lambda: not duthost.iface_macsec_ok(ctrl_port) and
                not nbr["host"].iface_macsec_ok(nbr["port"]))
            # BGP session should keep established even after holdtime
            assert wait_until(BGP_HOLDTIME * 2, BGP_KEEPALIVE, BGP_HOLDTIME,
                              check_bgp_established, upstream_links[ctrl_port])

        # Check the BGP sessions are present after port macsec enabled
        for ctrl_port, nbr in ctrl_links.items():
            enable_macsec_port(duthost, ctrl_port, profile_name)
            enable_macsec_port(nbr["host"], nbr["port"], profile_name)
            wait_until(
                20, 3, 0, lambda: duthost.iface_macsec_ok(ctrl_port) and nbr[
                    "host"].iface_macsec_ok(nbr["port"]))
            # Wait PortChannel up, which might flap if having one port member
            wait_until(
                20, 5, 5, lambda: find_portchannel_from_member(
                    ctrl_port, get_portchannel(duthost))["status"] == "Up")
            # BGP session should keep established even after holdtime
            assert wait_until(BGP_HOLDTIME * 2, BGP_KEEPALIVE, BGP_HOLDTIME,
                              check_bgp_established, upstream_links[ctrl_port])
Ejemplo n.º 2
0
def test_server_down(duthosts, tbinfo, rand_selected_interface, simulator_flap_counter, simulator_server_down, toggle_simulator_port_to_upper_tor, loganalyzer):
    """
    Verify that mux cable is not toggled excessively.
    """

    for analyzer in list(loganalyzer.values()):
        analyzer.ignore_regex.append(r".*ERR swss#orchagent: :- setState: State transition from active to active is not-handled")
        
    upper_tor = duthosts[tbinfo['duts'][0]]
    lower_tor = duthosts[tbinfo['duts'][1]]
    
    def upper_tor_mux_state_verification(state, health):
        mux_state_upper_tor = show_muxcable_status(upper_tor)
        return mux_state_upper_tor[itfs]['status'] == state and mux_state_upper_tor[itfs]['health'] == health
    
    def lower_tor_mux_state_verfication(state, health):
        mux_state_lower_tor = show_muxcable_status(lower_tor)
        return mux_state_lower_tor[itfs]['status'] == state and mux_state_lower_tor[itfs]['health'] == health

    itfs, _ = rand_selected_interface
    # Set upper_tor as active
    toggle_simulator_port_to_upper_tor(itfs)
    pytest_assert(wait_until(30, 1, 0, upper_tor_mux_state_verification, 'active', 'healthy'), 
                    "mux_cable status is unexpected. Should be (active, healthy). Test can't proceed. ")
    mux_flap_counter_0 = simulator_flap_counter(itfs)
    # Server down
    simulator_server_down(itfs)
    # Verify mux_cable state on upper_tor is active
    pytest_assert(wait_until(20, 1, 0, upper_tor_mux_state_verification, 'active', 'unhealthy'), 
                    "mux_cable status is unexpected. Should be (active, unhealthy)")
    # Verify mux_cable state on lower_tor is standby
    pytest_assert(wait_until(20, 1, 0, lower_tor_mux_state_verfication, 'standby', 'unhealthy'), 
                    "mux_cable status is unexpected. Should be (standby, unhealthy)")
    # Verify that mux_cable flap_counter should be no larger than 3
    # lower_tor(standby) -> active -> standby
    # upper_tor(active) -> active
    # The toggle from both tor may be overlapped and invisible 
    mux_flap_counter_1 = simulator_flap_counter(itfs)
    pytest_assert(mux_flap_counter_1 - mux_flap_counter_0 <= 3, 
                    "The mux_cable flap count should be no larger than 3 ({})".format(mux_flap_counter_1 - mux_flap_counter_0)) 
Ejemplo n.º 3
0
def verify_autorestart_with_critical_process(duthost, container_name,
                                             program_name, program_status,
                                             program_pid):
    """
    @summary: Kill a critical process in a container to verify whether the container
              is stopped and restarted correctly
    """
    if program_status == "RUNNING":
        kill_process_by_pid(duthost, container_name, program_name, program_pid)
    elif program_status in ["EXITED", "STOPPED", "STARTING"]:
        pytest.fail(
            "Program '{}' in container '{}' is in the '{}' state, expected 'RUNNING'"
            .format(program_name, container_name, program_status))
    else:
        pytest.fail("Failed to find program '{}' in container '{}'".format(
            program_name, container_name))

    logger.info(
        "Waiting until container '{}' is stopped...".format(container_name))
    stopped = wait_until(CONTAINER_STOP_THRESHOLD_SECS,
                         CONTAINER_CHECK_INTERVAL_SECS, check_container_state,
                         duthost, container_name, False)
    pytest_assert(stopped,
                  "Failed to stop container '{}'".format(container_name))
    logger.info("Container '{}' was stopped".format(container_name))

    logger.info(
        "Waiting until container '{}' is restarted...".format(container_name))
    restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS,
                           CONTAINER_CHECK_INTERVAL_SECS,
                           check_container_state, duthost, container_name,
                           True)
    if not restarted:
        if is_hiting_start_limit(duthost, container_name):
            clear_failed_flag_and_restart(duthost, container_name)
        else:
            pytest.fail(
                "Failed to restart container '{}'".format(container_name))

    logger.info("Container '{}' was restarted".format(container_name))
Ejemplo n.º 4
0
    def test_lldp(self, duthost, ctrl_links, profile_name):
        '''Verify lldp
        '''
        LLDP_ADVERTISEMENT_INTERVAL = 30  # default interval in seconds
        LLDP_HOLD_MULTIPLIER = 4  # default multiplier number
        LLDP_TIMEOUT = LLDP_ADVERTISEMENT_INTERVAL * LLDP_HOLD_MULTIPLIER

        # select one macsec link
        for ctrl_port, nbr in ctrl_links.items():
            # TODO: vsonic vm has issue on lldp
            if not isinstance(nbr["host"], EosHost):
                pytest.skip("test_lldp has issue with vsonic neighbor")
            assert nbr["name"] in get_lldp_list(duthost)

            disable_macsec_port(duthost, ctrl_port)
            disable_macsec_port(nbr["host"], nbr["port"])
            wait_until(
                20, 3, 0, lambda: not duthost.iface_macsec_ok(ctrl_port) and
                not nbr["host"].iface_macsec_ok(nbr["port"]))
            assert wait_until(LLDP_TIMEOUT, LLDP_ADVERTISEMENT_INTERVAL, 0,
                              lambda: nbr["name"] in get_lldp_list(duthost))

            enable_macsec_port(duthost, ctrl_port, profile_name)
            enable_macsec_port(nbr["host"], nbr["port"], profile_name)
            wait_until(
                20, 3, 0, lambda: duthost.iface_macsec_ok(ctrl_port) and nbr[
                    "host"].iface_macsec_ok(nbr["port"]))
            assert wait_until(1, 1, LLDP_TIMEOUT,
                              lambda: nbr["name"] in get_lldp_list(duthost))
Ejemplo n.º 5
0
def test_auto_negotiation_dut_advertises_each_speed(
        enum_dut_portname_module_fixture):
    """Test all candidate ports to advertised all supported speeds one by one and verify
       that the port operational status is up after auto negotiation
    """
    dutname, portname = enum_dut_portname_module_fixture
    duthost, dut_port, fanout, fanout_port = cadidate_test_ports[dutname][
        portname]

    logger.info('Start test for DUT port {} and fanout port {}'.format(
        dut_port, fanout_port))
    # Enable auto negotiation on fanout port
    success = fanout.set_auto_negotiation_mode(fanout_port, True)
    pytest_require(
        success,
        'Failed to set port autoneg on fanout port {}'.format(fanout_port))

    # Advertise all supported speeds in fanout port
    success = fanout.set_speed(fanout_port, None)
    pytest_require(
        success,
        'Failed to advertise speed on fanout port {}, speed {}'.format(
            fanout_port, speed))

    logger.info(
        'Trying to get a common supported speed set among dut port, fanout port and cable'
    )
    supported_speeds = get_supported_speeds_for_port(duthost, dut_port, fanout,
                                                     fanout_port)
    pytest_require(
        supported_speeds,
        'Ignore test for port {} due to cannot get supported speed for it'.
        format(dut_port))

    logger.info(
        'Run test based on supported speeds: {}'.format(supported_speeds))
    duthost.shell('config interface autoneg {} enabled'.format(dut_port))
    for speed in supported_speeds:
        duthost.shell('config interface advertised-speeds {} {}'.format(
            dut_port, speed))
        logger.info(
            'Wait until the port status is up, expected speed: {}'.format(
                speed))
        wait_result = wait_until(SINGLE_PORT_WAIT_TIME,
                                 PORT_STATUS_CHECK_INTERVAL, 0, check_ports_up,
                                 duthost, [dut_port], speed)
        pytest_assert(wait_result, '{} are still down'.format(dut_port))
        fanout_actual_speed = fanout.get_speed(fanout_port)
        pytest_assert(
            fanout_actual_speed == speed,
            'expect fanout speed: {}, but got {}'.format(
                speed, fanout_actual_speed))
Ejemplo n.º 6
0
def test_pfcwd_basic_multi_lossless_prio_restart_service(
        ixia_api, ixia_testbed_config, conn_graph_facts, fanout_graph_facts,
        duthosts, rand_one_dut_hostname, rand_one_dut_portname_oper_up,
        lossless_prio_list, prio_dscp_map, restart_service, trigger_pfcwd):
    """
    Verify PFC watchdog basic test works on multiple lossless priorities after various service restarts

    Args:
        ixia_api (pytest fixture): IXIA session
        ixia_testbed_config (pytest fixture): testbed configuration information
        conn_graph_facts (pytest fixture): connection graph
        fanout_graph_facts (pytest fixture): fanout graph
        duthosts (pytest fixture): list of DUTs
        rand_one_dut_hostname (str): hostname of DUT
        rand_one_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
        lossless_prio_list (pytest fixture): list of all the lossless priorities
        prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
        restart_service (str): service to restart on the DUT. Only 'swss' affects pfcwd currently
        trigger_pfcwd (bool): if PFC watchdog is expected to be triggered

    Returns:
        N/A
    """
    dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
    pytest_require(rand_one_dut_hostname == dut_hostname,
                   "Port is not mapped to the expected DUT")

    duthost = duthosts[rand_one_dut_hostname]
    skip_pfcwd_test(duthost=duthost, trigger_pfcwd=trigger_pfcwd)

    testbed_config, port_config_list = ixia_testbed_config

    logger.info("Issuing a restart of service {} on the dut {}".format(
        restart_service, duthost.hostname))
    services_to_reset = DEPENDENT_SERVICES + [restart_service]
    for service in services_to_reset:
        duthost.command("systemctl reset-failed {}".format(service))
    duthost.command("systemctl restart {}".format(restart_service))
    logger.info("Wait until the system is stable")
    pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started),
                  "Not all critical services are fully started")

    run_pfcwd_basic_test(api=ixia_api,
                         testbed_config=testbed_config,
                         port_config_list=port_config_list,
                         conn_data=conn_graph_facts,
                         fanout_data=fanout_graph_facts,
                         duthost=duthost,
                         dut_port=dut_port,
                         prio_list=lossless_prio_list,
                         prio_dscp_map=prio_dscp_map,
                         trigger_pfcwd=trigger_pfcwd)
Ejemplo n.º 7
0
def test_pfcwd_basic_multi_lossless_prio_reboot(
        ixia_api, ixia_testbed_config, conn_graph_facts, fanout_graph_facts,
        localhost, duthosts, rand_one_dut_hostname,
        rand_one_dut_portname_oper_up, lossless_prio_list, prio_dscp_map,
        reboot_type, trigger_pfcwd):
    """
    Verify PFC watchdog basic test works on multiple lossless priorities after various kinds of reboots

    Args:
        ixia_api (pytest fixture): IXIA session
        ixia_testbed_config (pytest fixture): testbed configuration information
        conn_graph_facts (pytest fixture): connection graph
        fanout_graph_facts (pytest fixture): fanout graph
        localhost (pytest fixture): localhost handle
        duthosts (pytest fixture): list of DUTs
        rand_one_dut_hostname (str): hostname of DUT
        rand_one_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
        lossless_prio_list (pytest fixture): list of all the lossless priorities
        prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
        reboot_type (str): reboot type to be issued on the DUT
        trigger_pfcwd (bool): if PFC watchdog is expected to be triggered

    Returns:
        N/A
    """
    dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
    pytest_require(rand_one_dut_hostname == dut_hostname,
                   "Port is not mapped to the expected DUT")

    duthost = duthosts[rand_one_dut_hostname]
    skip_pfcwd_test(duthost=duthost, trigger_pfcwd=trigger_pfcwd)
    skip_warm_reboot(duthost, reboot_type)

    testbed_config, port_config_list = ixia_testbed_config

    logger.info("Issuing a {} reboot on the dut {}".format(
        reboot_type, duthost.hostname))
    reboot(duthost, localhost, reboot_type=reboot_type)
    logger.info("Wait until the system is stable")
    pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started),
                  "Not all critical services are fully started")

    run_pfcwd_basic_test(api=ixia_api,
                         testbed_config=testbed_config,
                         port_config_list=port_config_list,
                         conn_data=conn_graph_facts,
                         fanout_data=fanout_graph_facts,
                         duthost=duthost,
                         dut_port=dut_port,
                         prio_list=lossless_prio_list,
                         prio_dscp_map=prio_dscp_map,
                         trigger_pfcwd=trigger_pfcwd)
Ejemplo n.º 8
0
def setup_tacacs_server(ptfhost, creds_all_duts, duthost):
    """setup tacacs server"""

    # configure tacacs server
    extra_vars = {'tacacs_passkey': creds_all_duts[duthost]['tacacs_passkey'],
                  'tacacs_rw_user': creds_all_duts[duthost]['tacacs_rw_user'],
                  'tacacs_rw_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_rw_user_passwd'], 'abc'),
                  'tacacs_ro_user': creds_all_duts[duthost]['tacacs_ro_user'],
                  'tacacs_ro_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_ro_user_passwd'], 'abc'),
                  'tacacs_jit_user': creds_all_duts[duthost]['tacacs_jit_user'],
                  'tacacs_jit_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'abc'),
                  'tacacs_jit_user_membership': creds_all_duts[duthost]['tacacs_jit_user_membership']}

    ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
    ptfhost.template(src="tacacs/tac_plus.conf.j2", dest="/etc/tacacs+/tac_plus.conf")
    ptfhost.lineinfile(path="/etc/default/tacacs+", line="DAEMON_OPTS=\"-d 10 -l /var/log/tac_plus.log -C /etc/tacacs+/tac_plus.conf\"", regexp='^DAEMON_OPTS=.*')
    check_all_services_status(ptfhost)

    # FIXME: This is a short term mitigation, we need to figure out why the tacacs+ server does not start
    # reliably all of a sudden.
    wait_until(5, 1, 0, start_tacacs_server, ptfhost)
    check_all_services_status(ptfhost)
Ejemplo n.º 9
0
def restart_bgp(duthost, asic_index=DEFAULT_ASIC_ID):
    """
    Restart bgp services on the DUT

    Args:
        duthost: DUT host object
    """
    duthost.asic_instance(asic_index).reset_service("bgp")
    duthost.asic_instance(asic_index).restart_service("bgp")
    docker_name = duthost.asic_instance(asic_index).get_docker_name("bgp")
    pytest_assert(
        wait_until(100, 10, duthost.is_service_fully_started, docker_name),
        "BGP not started.")
Ejemplo n.º 10
0
 def test_ntp(self, duthosts, rand_one_dut_hostname, ptfhost,
              check_ntp_sync, ntp_servers):
     duthost = duthosts[rand_one_dut_hostname]
     # Check if ntp was not in sync with ntp server before enabling mvrf, if yes then setup ntp server on ptf
     if check_ntp_sync:
         setup_ntp(ptfhost, duthost, ntp_servers)
     force_ntp = "ntpd -gq"
     duthost.service(name="ntp", state="stopped")
     logger.info("Ntp restart in mgmt vrf")
     execute_dut_command(duthost, force_ntp)
     duthost.service(name="ntp", state="restarted")
     pytest_assert(wait_until(400, 10, check_ntp_status, duthost),
                   "Ntp not started")
Ejemplo n.º 11
0
def test_interface_binding(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data):
    duthost = duthosts[rand_one_dut_hostname]
    skip_release(duthost, ["201811", "201911", "202106"])
    if not check_interface_status(duthost):
        config_reload(duthost)
        wait_critical_processes(duthost)
        pytest_assert(wait_until(120, 5, 0, check_interface_status, duthost))
    output = duthost.shell("docker exec -it dhcp_relay ss -nlp | grep dhcrelay", module_ignore_errors=True)["stdout"].encode("utf-8")
    logger.info(output)
    for dhcp_relay in dut_dhcp_relay_data:
        assert "{}:67".format(dhcp_relay['downlink_vlan_iface']['name']) in output, "{} is not found in {}".format("{}:67".format(dhcp_relay['downlink_vlan_iface']['name']), output)
        for iface in dhcp_relay['uplink_interfaces']:
            assert "{}:67".format(iface) in output, "{} is not found in {}".format("{}:67".format(iface), output)
Ejemplo n.º 12
0
def test_po_cleanup(duthosts, rand_one_dut_hostname, enum_asic_index):
    """
    test port channel are cleaned up correctly and teammgrd and teamsyncd process
    handle  SIGTERM gracefully
    """
    duthost = duthosts[rand_one_dut_hostname]
    logging.info("Disable swss/teamd Feature")
    duthost.asic_instance(enum_asic_index).stop_service("swss")
    # Check if Linux Kernel Portchannel Interface teamdev are clean up
    if not wait_until(10, 1, check_kernel_po_interface_cleaned, duthost,
                      enum_asic_index):
        fail_msg = "PortChannel interface still exists in kernel"
        pytest.fail(fail_msg)
Ejemplo n.º 13
0
def _perform_swap_syncd_shutdown_check(duthost):
    def ready_for_swap():
        if any([
            duthost.is_container_present("syncd"),
            duthost.is_container_present("swss"),
            not duthost.is_bgp_state_idle()
        ]):
            return False

        return True

    shutdown_check = wait_until(30, 3, ready_for_swap)
    pytest_assert(shutdown_check, "Docker and/or BGP failed to shut down in 30s")
def clear_failed_flag_and_restart(duthost, container_name):
    """
    @summary: If a container hits the restart limitation, then we clear the failed flag and
              restart it.
    """
    logger.info("{} hits start limit and clear reset-failed flag".format(container_name))
    duthost.shell("sudo systemctl reset-failed {}.service".format(container_name))
    duthost.shell("sudo systemctl start {}.service".format(container_name))
    restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS,
                           CONTAINER_CHECK_INTERVAL_SECS,
                           0,
                           check_container_state, duthost, container_name, True)
    pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name))
Ejemplo n.º 15
0
def test_toggle_mux_from_simulator(duthosts, active_side,
                                   toggle_all_simulator_ports, get_mux_status,
                                   restore_mux_auto_mode):
    logger.info('Set all muxcable to manual mode on all ToRs')
    duthosts.shell('config muxcable mode manual all')

    logger.info('Toggle mux active side from mux simulator')
    toggle_all_simulator_ports(active_side)

    check_result = wait_until(10, 2, 2, check_mux_status, duthosts,
                              active_side)

    validate_check_result(check_result, duthosts)
Ejemplo n.º 16
0
def test_service_checker(duthosts, enum_rand_one_per_hwsku_hostname):
    duthost = duthosts[enum_rand_one_per_hwsku_hostname]
    wait_system_health_boot_up(duthost)
    with ConfigFileContext(duthost, os.path.join(FILES_DIR, IGNORE_DEVICE_CHECK_CONFIG_FILE)):
        processes_status = duthost.all_critical_process_status()
        expect_error_dict = {}
        for container_name, processes in processes_status.items():
            if processes["status"] is False or len(processes["exited_critical_process"]) > 0:
                for process_name in processes["exited_critical_process"]:
                    expect_error_dict[process_name] = '{}:{} is not running'.format(container_name, process_name)

        if expect_error_dict:
            logger.info('Verify data in redis')
            for name, error in expect_error_dict.items():
                result = wait_until(WAIT_TIMEOUT, 10, 2, check_system_health_info, duthost, name, error)
                value = redis_get_field_value(duthost, STATE_DB, HEALTH_TABLE_NAME, name)
                assert result == True, 'Expect error {}, got {}'.format(error, value)

        expect_summary = SUMMARY_OK if not expect_error_dict else SUMMARY_NOT_OK
        result = wait_until(WAIT_TIMEOUT, 10, 2, check_system_health_info, duthost, 'summary', expect_summary)
        summary = redis_get_field_value(duthost, STATE_DB, HEALTH_TABLE_NAME, 'summary')
        assert result == True, 'Expect summary {}, got {}'.format(expect_summary, summary)
 def set_mtu(cls, mtu, iface):
     cls.mtu = duthost.command("redis-cli -n 4 hget \"PORTCHANNEL|{}\" mtu".format(iface))["stdout"]
     if not cls.mtu:
         cls.mtu = cls.default_mtu
     if "PortChannel" in iface:
         duthost.command("redis-cli -n 4 hset \"PORTCHANNEL|{}\" mtu {}".format(iface, mtu))["stdout"]
     elif "Ethernet" in iface:
         duthost.command("redis-cli -n 4 hset \"PORT|{}\" mtu {}".format(iface, mtu))["stdout"]
     else:
         raise Exception("Unsupported interface parameter - {}".format(iface))
     cls.iface = iface
     check_mtu = lambda: get_intf_mtu(duthost, iface) == mtu
     pytest_assert(wait_until(5, 1, check_mtu), "MTU on interface {} not updated".format(iface))
Ejemplo n.º 18
0
def shutdown_port(duthost, interface):
    """
    Shutdown port on the DUT

    Args:
        duthost: DUT host object
        interface: Interface of DUT
    """
    duthost.shutdown(interface)
    pytest_assert(
        wait_until(3, 1, 0, __check_interface_state, duthost, interface,
                   'down'),
        "DUT's port {} didn't go down as expected".format(interface))
Ejemplo n.º 19
0
def start_sai_test_conatiner_with_retry(duthost, container_name):
    """
    Attempts to start a sai test container with retry.

    Args:
        duthost (SonicHost): The target device.
        container_name: The container name for sai testing on DUT.
    """

    dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
    logger.info("Checking the PRC connection before starting the {}.".format(container_name))
    rpc_ready = wait_until(1, 1, _is_rpc_server_ready, dut_ip)
    
    if not rpc_ready:
        logger.info("Attempting to start {}.".format(container_name))
        sai_ready = wait_until(SAI_TEST_CTNR_CHECK_TIMEOUT_IN_SEC, SAI_TEST_CTNR_RESTART_INTERVAL_IN_SEC, _is_sai_test_container_restarted, duthost, container_name)
        pt_assert(sai_ready, "[{}] sai test container failed to start in {}s".format(container_name, SAI_TEST_CTNR_CHECK_TIMEOUT_IN_SEC))
        logger.info("Waiting for another {} second for sai test container warm up.".format(SAI_TEST_CONTAINER_WARM_UP_IN_SEC))
        time.sleep(SAI_TEST_CONTAINER_WARM_UP_IN_SEC)
        logger.info("Successful in starting {} at : {}:{}".format(container_name, dut_ip, SAI_PRC_PORT))
    else:
        logger.info("PRC connection already set up before starting the {}.".format(container_name))
Ejemplo n.º 20
0
def remove_member_from_vlan(duthost, vlan_id, vlan_member):
    """
    Remove members of VLAN on DUT

    Args:
        duthost: DUT host object
        vlan_id: VLAN id
        vlan_member: VLAN member
    """
    if __check_vlan_member(duthost, vlan_id, vlan_member):
        duthost.shell('config vlan member del {} {}'.format(vlan_id, vlan_member))
        pytest_assert(wait_until(3, 1, __check_vlan_member, duthost, vlan_id, vlan_member, True),
                      "VLAN RIF Vlan{} have {} member".format(vlan_id, vlan_member))
Ejemplo n.º 21
0
def add_member_to_vlan(duthost, vlan_id, vlan_member):
    """
    Add members of VLAN on DUT

    Args:
        duthost: DUT host object
        vlan_id: VLAN id
        vlan_member: VLAN member
    """
    if not __check_vlan_member(duthost, vlan_id, vlan_member):
        duthost.shell('config vlan member add {} {}'.format(vlan_id, vlan_member))
        pytest_assert(wait_until(3, 1, __check_vlan_member, duthost, vlan_id, vlan_member),
                      "VLAN RIF Vlan{} doesn't have {} member".format(vlan_id, vlan_member))
Ejemplo n.º 22
0
def pre_condition_install_trap(ptfhost, duthost, copp_testbed, trap_id,
                               feature_name):
    copp_utils.install_trap(duthost, feature_name)
    logger.info("Set always_enabled of {} to false".format(trap_id))
    copp_utils.configure_always_enabled_for_trap(duthost, trap_id, "false")

    logger.info(
        "Verify {} trap status is installed by sending traffic in pre_condition"
        .format(trap_id))
    pytest_assert(
        wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, trap_id.upper(),
                   copp_testbed, dut_type),
        "Installing {} trap fail".format(trap_id))
Ejemplo n.º 23
0
def test_techsupport(request, config, duthost, testbed):
    """
    test the "show techsupport" command in a loop
    :param config: fixture to configure additional setups_list on dut.
    :param duthost: DUT host
    :param testbed: testbed
    """
    loop_range = request.config.getoption("--loop_num") or DEFAULT_LOOP_RANGE
    loop_delay = request.config.getoption("--loop_delay") or DEFAULT_LOOP_DELAY
    since = request.config.getoption("--logs_since") or str(randint(
        1, 23)) + " minute ago"

    logger.debug("Loop_range is {} and loop_delay is {}".format(
        loop_range, loop_delay))

    for i in range(loop_range):
        logger.debug("Running show techsupport ... ")
        wait_until(300, 20, execute_command, duthost, str(since))
        tar_file = [j for j in pytest.tar_stdout.split('\n') if j != ''][-1]
        stdout = duthost.command("rm -rf {}".format(tar_file))
        logger.debug("Sleeping for {} seconds".format(loop_delay))
        time.sleep(loop_delay)
Ejemplo n.º 24
0
def check_interfaces_and_services(dut, interfaces, reboot_type = None):
    """
    Perform a further check after reboot-cause, including transceiver status, interface status
    @param localhost: The Localhost object.
    @param dut: The AnsibleHost object of DUT.
    @param interfaces: DUT's interfaces defined by minigraph
    """
    logging.info("Wait until all critical services are fully started")
    check_critical_services(dut)

    if reboot_type is not None:
        logging.info("Check reboot cause")
        assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, check_reboot_cause, dut, reboot_type), \
            "got reboot-cause failed after rebooted by %s" % reboot_type

        if reboot_ctrl_dict[reboot_type]["test_reboot_cause_only"]:
            logging.info("Further checking skipped for %s test which intends to verify reboot-cause only" % reboot_type)
            return

    logging.info("Wait %d seconds for all the transceivers to be detected" % MAX_WAIT_TIME_FOR_INTERFACES)
    assert wait_until(MAX_WAIT_TIME_FOR_INTERFACES, 20, check_interface_information, dut, interfaces), \
        "Not all transceivers are detected or interfaces are up in %d seconds" % MAX_WAIT_TIME_FOR_INTERFACES

    logging.info("Check transceiver status")
    check_transceiver_basic(dut, interfaces)

    logging.info("Check pmon daemon status")
    assert check_pmon_daemon_status(dut), "Not all pmon daemons running."

    if dut.facts["asic_type"] in ["mellanox"]:

        from .mellanox.check_hw_mgmt_service import check_hw_management_service
        from .mellanox.check_sysfs import check_sysfs

        logging.info("Check the hw-management service")
        check_hw_management_service(dut)

        logging.info("Check sysfs")
        check_sysfs(dut)
Ejemplo n.º 25
0
def test_auto_negotiation_advertised_speeds_all():
    """Test all candidate ports to advertised all supported speeds and verify:
        1. All ports are up after auto negotiation
        2. All ports are negotiated to its highest supported speeds
    """
    for dutname, candidates in cadidate_test_ports.items():
        if not candidates:
            continue

        for duthost, dut_port, fanout, fanout_port in candidates.values():
            logger.info('Start test for DUT port {} and fanout port {}'.format(dut_port, fanout_port))
            # Enable auto negotiation on fanout port
            success = fanout.set_auto_negotiation_mode(fanout_port, True)
            if not success:
                # Fanout does not support set auto negotiation mode for this port
                logger.info('Ignore port {} due to fanout port {} does not support setting auto-neg mode'.format(dut_port, fanout_port))
                continue

            # Advertise all supported speeds in fanout port
            success = fanout.set_speed(fanout_port, None)
            if not success:
                # Fanout does not support set advertise speeds for this port
                logger.info('Ignore port {} due to fanout port {} does not support setting advertised speeds'.format(dut_port, fanout_port))
                continue

            duthost.shell('config interface autoneg {} enabled'.format(dut_port))
            duthost.shell('config interface advertised-speeds {} all'.format(dut_port))

        logger.info('Wait until all ports are up')
        wait_result = wait_until(ALL_PORT_WAIT_TIME, 
                                 PORT_STATUS_CHECK_INTERVAL, 
                                 0, 
                                 check_ports_up, 
                                 duthost, 
                                 [item[1] for item in candidates.values()])
        pytest_assert(wait_result, 'Some ports are still down')

        # Make sure all ports are negotiated to the highest speed
        logger.info('Checking the actual speed is equal to highest speed')
        int_status = duthost.show_interface(command="status")["ansible_facts"]['int_status']
        for _, dut_port, fanout, fanout_port in candidates.values():
            supported_speeds = get_supported_speeds_for_port(duthost, dut_port, fanout, fanout_port)
            logger.info('DUT port = {}, fanout port = {}, supported speeds = {}, actual speed = {}'.format(
                dut_port,
                fanout_port,
                supported_speeds,
                int_status[dut_port]['speed']
            ))
            highest_speed = supported_speeds[-1]
            actual_speed = int_status[dut_port]['speed'][:-1] + '000'
            pytest_assert(actual_speed == highest_speed, 'Actual speed is not the highest speed')
Ejemplo n.º 26
0
def test_reboot_system(duthosts, localhost, all_cfg_facts, nbrhosts, nbr_macs):
    """
    Tests the system after all cards are explicitly reset, interfaces/neighbors should be in sync across the system.

    Args:
        duthosts: duthosts fixture
        localhost: localhost fixture
        all_cfg_facts: all_cfg_facts fixture
        nbrhosts: nbrhosts fixture
        nbr_macs: nbr_macs fixture
    """
    @reset_ansible_local_tmp
    def reboot_node(lh, node=None, results=None):
        node_results = []
        node_results.append(reboot(node, lh, wait=600))
        results[node.hostname] = node_results

    logger.info("=" * 80)
    logger.info("Precheck")
    logger.info("-" * 80)

    check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)
    check_ip_fwd(duthosts, all_cfg_facts, nbrhosts)

    logger.info("=" * 80)
    logger.info("Coldboot on all nodes")
    logger.info("-" * 80)

    t0 = time.time()

    parallel_run(reboot_node, [localhost], {}, duthosts.nodes, timeout=1000)

    for node in duthosts.nodes:
        assert wait_until(300, 20, node.critical_services_fully_started
                          ), "Not all critical services are fully started"

    poll_bgp_restored(duthosts)

    t1 = time.time()
    elapsed = t1 - t0

    logger.info("-" * 80)
    logger.info("Time to reboot and recover: %s seconds.", str(elapsed))
    logger.info("-" * 80)

    logger.info("=" * 80)
    logger.info("Postcheck")
    logger.info("-" * 80)

    check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)
    check_ip_fwd(duthosts, all_cfg_facts, nbrhosts)
Ejemplo n.º 27
0
    def test_config_interface_state(self, setup_config_mode, sample_intf):
        """
        Checks whether 'config interface startup/shutdown <intf>'
        changes the admin state of the test interface to up/down when
        its interface alias/name is provided as per the configured
        naming mode
        """
        dutHostGuest, mode, ifmode = setup_config_mode
        test_intf = sample_intf[mode]
        interface = sample_intf['default']
        cli_ns_option = sample_intf['cli_ns_option']

        regex_int = re.compile(r'(\S+)\s+[\d,N\/A]+\s+(\w+)\s+(\d+)\s+[\w\/]+\s+([\w\/]+)\s+(\w+)\s+(\w+)\s+(\w+)')
        
        def _port_status(expected_state):
            admin_state = ""
            show_intf_status = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={0} show interfaces status {1} | grep -w {1}'.format(ifmode, test_intf))
            logger.info('show_intf_status:\n{}'.format(show_intf_status['stdout']))

            line = show_intf_status['stdout'].strip()
            if regex_int.match(line) and interface == regex_int.match(line).group(1):
                admin_state = regex_int.match(line).group(7)

            return admin_state == expected_state

        out = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} sudo config interface {} shutdown {}'.format(
            ifmode, cli_ns_option, test_intf))
        if out['rc'] != 0:
            pytest.fail()
        pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, _port_status, 'down'),
                        "Interface {} should be admin down".format(test_intf))

        out = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} sudo config interface {} startup {}'.format(
            ifmode, cli_ns_option, test_intf))
        if out['rc'] != 0:
            pytest.fail()
        pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, _port_status, 'up'),
                        "Interface {} should be admin up".format(test_intf))
Ejemplo n.º 28
0
def test_turn_off_pdu_and_check_psu_info(duthost, localhost, creds,
                                         pdu_controller):
    """
    Turn off one PSU and check all PSU sensor entity being removed because it can no longer get any value
    :param duthost: DUT host object
    :param localhost: localhost object
    :param creds: Credential for snmp
    :param pdu_controller: PDU controller
    :return:
    """
    if not pdu_controller:
        pytest.skip('pdu_controller is None, skipping this test')
    outlet_status = pdu_controller.get_outlet_status()
    if len(outlet_status) < 2:
        pytest.skip(
            'At least 2 outlets required for rest of the testing in this case')

    # turn on all PSU
    for outlet in outlet_status:
        if not outlet['outlet_on']:
            pdu_controller.turn_on_outlet(outlet)
    time.sleep(5)

    outlet_status = pdu_controller.get_outlet_status()
    for outlet in outlet_status:
        if not outlet['outlet_on']:
            pytest.skip(
                'Not all outlet are powered on, skip rest of the testing in this case'
            )

    # turn off the first PSU
    first_outlet = outlet_status[0]
    pdu_controller.turn_off_outlet(first_outlet)
    assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet,
                      False)
    # wait for psud update the database
    assert wait_until(120, 20, _check_psu_status_after_power_off, duthost,
                      localhost, creds)
Ejemplo n.º 29
0
def test_turn_off_psu_and_check_psu_info(duthost, localhost, creds,
                                         psu_controller):
    """
    Turn off one PSU and check all PSU sensor entity being removed because it can no longer get any value
    :param duthost: DUT host object
    :param localhost: localhost object
    :param creds: Credential for snmp
    :param psu_controller: PSU controller
    :return:
    """
    if not psu_controller:
        pytest.skip('psu_controller is None, skipping this test')
    psu_status = psu_controller.get_psu_status()
    if len(psu_status) < 2:
        pytest.skip(
            'At least 2 PSUs required for rest of the testing in this case')

    # turn on all PSU
    for item in psu_status:
        if not item['psu_on']:
            psu_controller.turn_on_psu(item["psu_id"])
    time.sleep(5)

    psu_status = psu_controller.get_psu_status()
    for item in psu_status:
        if not item['psu_on']:
            pytest.skip(
                'Not all PSU are powered on, skip rest of the testing in this case'
            )

    # turn off the first PSU
    first_psu_id = psu_status[0]['psu_id']
    psu_controller.turn_off_psu(first_psu_id)
    assert wait_until(30, 5, check_psu_status, psu_controller, first_psu_id,
                      False)
    # wait for psud update the database
    assert wait_until(120, 20, _check_psu_status_after_power_off, duthost,
                      localhost, creds)
Ejemplo n.º 30
0
    def teardown_dut_base(self, input_list=None):
        if input_list == None:
            input_list = self.vtep_param_list

        for item in input_list:
            self.frr_helper.unset_neighbor(neighbor_ip=str(item.ip_ptf.ip),
                                           as_number=item.as_number_ptf)
            self.dut_helper.unset_ip(iface=item.if_index,
                                     ip_mask=str(item.ip_dut))
        self.frr_helper.unset_advertise_all_vni()
        self.dut_helper.del_vxlan(vlanid="1000", vni="10000")
        pt_assert(
            wait_until(10, 2, self.check_interface_status, self.vtep_if,
                       False))