Exemple #1
0
def restart_thermal_control_daemon(dut):
    """
    Restart thermal control daemon by killing it and waiting supervisord to restart
    it automatically.
    :param dut: DUT object representing a SONiC switch under test.
    :return:
    """
    logging.info('Restarting thermal control daemon...')
    find_thermalctld_pid_cmd = 'docker exec -i pmon bash -c \'pgrep -f thermalctld\' | sort'
    output = dut.shell(find_thermalctld_pid_cmd)
    assert output[
        "rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
    # Usually there should be 2 thermalctld processes, but there is chance that
    # sonic platform API might use subprocess which creates extra thermalctld process.
    # For example, chassis.get_all_sfps will call sfp constructor, and sfp constructor may
    # use subprocess to call ethtool to do initialization.
    # So we check here thermalcltd must have at least 2 processes.
    assert len(output["stdout_lines"]
               ) >= 2, "There should be at least 2 thermalctld process"
    pid_0 = int(output["stdout_lines"][0].strip())
    pid_1 = int(output["stdout_lines"][1].strip())
    # find and kill the parent process
    pid_to_kill = pid_0 if pid_0 < pid_1 else pid_1
    logging.info(
        'Killing old thermal control daemon with pid: {}'.format(pid_to_kill))
    kill_thermalctld_cmd = 'docker exec -i pmon bash -c \'kill {}\''.format(
        pid_to_kill)
    output = dut.command(
        kill_thermalctld_cmd
    )  # kill thermalctld and wait supervisord auto reboot thermalctld
    assert output["rc"] == 0, "Run command '%s' failed" % kill_thermalctld_cmd

    # make sure thermalctld has restarted
    max_wait_time = 30
    while max_wait_time > 0:
        max_wait_time -= 1
        output = dut.shell(find_thermalctld_pid_cmd)
        assert output[
            "rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
        if len(output["stdout_lines"]) != 2:
            time.sleep(1)
            continue

        new_pid_0 = int(output["stdout_lines"][0].strip())
        new_pid_1 = int(output["stdout_lines"][1].strip())
        parent_pid = new_pid_0 if new_pid_0 < new_pid_1 else new_pid_1

        if parent_pid == pid_to_kill:
            logging.info(
                'Old thermal control daemon is still alive, waiting...')
            time.sleep(1)
            continue
        else:
            logging.info(
                'New pid of thermal control daemon is {}'.format(parent_pid))
            return

    # try restore by config reload...
    config_reload(dut)
    assert 0, 'Wait thermal control daemon restart failed'
Exemple #2
0
def load_minigraph_after_test(rand_selected_dut):
    """
    Restore config_db as vnet with wram-reboot will write testing config into
    config_db.json
    """
    yield
    config_reload(rand_selected_dut, config_source='minigraph')
    def setupDutConfig(self, duthosts,
                       enum_rand_one_per_hwsku_frontend_hostname):
        """
            Disabled BGP to reduce load on switch and restores DUT configuration after test completes

            Args:
                duthost (AnsibleHost): Device Under Test (DUT)

            Returns:
                None
        """
        duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
        if not duthost.get_facts().get("modular_chassis"):
            duthost.command("sudo config bgp shutdown all")
            if not wait_until(120, 2.0, 0, self._check_no_bgp_routes, duthost):
                pytest.fail(
                    'BGP Shutdown Timeout: BGP route removal exceeded 120 seconds.'
                )

        yield

        logger.info("Reload Config DB")
        config_reload(duthost,
                      config_source='config_db',
                      safe_reload=True,
                      check_intf_up_ports=True)
def update_mux_configs_and_config_reload(dut, state):
    """
    @summary: Update config_db.json, and then load with 'config reload'
            Please note that this is a general method, and caller must
            backup config_db.json and do a restore at the end.
    @param dut: The DUT we are testing against
    @param state: A str, auto|active|standby
    """
    STATE_LIST = ['auto', 'active', 'standby']
    pytest_assert(state in STATE_LIST, "state should be one of {}".format(STATE_LIST))

    mux_cable_config = dut.shell("sonic-cfggen -d  --var-json 'MUX_CABLE'")['stdout']
    pytest_assert(len(mux_cable_config.strip()) != 0, "No mux_cable configuration is found in config_db")

    # Update mux_cable state and dump to a temp file
    mux_cable_config_json = json.loads(mux_cable_config)
    for _, config in mux_cable_config_json.items():
        config['state'] = state
    mux_cable_config_json = {"MUX_CABLE": mux_cable_config_json}
    TMP_FILE = "/tmp/mux_config.json"
    with open(TMP_FILE, "w") as f:
        json.dump(mux_cable_config_json, f)

    dut.copy(src=TMP_FILE, dest=TMP_FILE)

    # Load updated mux_cable config with sonic-cfggen
    cmds = [
        "sonic-cfggen -j {} -w".format(TMP_FILE),
        "config save -y"
    ]
    dut.shell_cmds(cmds=cmds)
    config_reload(dut)
    dut.file(path=TMP_FILE, state='absent')
Exemple #5
0
def setup_env(duthosts, rand_one_dut_hostname, cfg_facts):
    """
    Setup/teardown fixture for acl config
    Args:
        duthosts: list of DUTs.
        rand_selected_dut: The fixture returns a randomly selected DuT.
        cfg_facts: config facts for selected DUT
    """
    duthost = duthosts[rand_one_dut_hostname]

    config_tmpfile = generate_tmpfile(duthost)
    logger.info(
        "config_tmpfile {} Backing up config_db.json".format(config_tmpfile))
    duthost.shell(
        "sudo cp /etc/sonic/config_db.json {}".format(config_tmpfile))

    # Cleanup acl config
    duthost.shell(
        'sonic-db-cli CONFIG_DB keys "ACL_RULE|*" | xargs --no-run-if-empty sonic-db-cli CONFIG_DB del'
    )
    duthost.shell(
        'sonic-db-cli CONFIG_DB keys "ACL_TABLE|*" | xargs --no-run-if-empty sonic-db-cli CONFIG_DB del'
    )

    yield

    logger.info("Restoring config_db.json")
    duthost.shell(
        "sudo cp {} /etc/sonic/config_db.json".format(config_tmpfile))
    delete_tmpfile(duthost, config_tmpfile)

    # Cleanup acl config
    config_reload(duthost)
Exemple #6
0
def setup_env(duthosts, rand_one_dut_hostname, cfg_facts):
    """
    Setup/teardown fixture for syslog config
    Args:
        duthosts: list of DUTs.
        rand_selected_dut: The fixture returns a randomly selected DuT.
        cfg_facts: config facts for selected DUT
    """
    duthost = duthosts[rand_one_dut_hostname]

    config_tmpfile = generate_tmpfile(duthost)
    logger.info(
        "config_tmpfile {} Backing up config_db.json".format(config_tmpfile))
    duthost.shell(
        "sudo cp /etc/sonic/config_db.json {}".format(config_tmpfile))

    # Cleanup syslog server config
    syslog_servers = cfg_facts.get('SYSLOG_SERVER', {})
    for syslog_server in syslog_servers:
        del_syslog_server = duthost.shell(
            "sudo config syslog del {}".format(syslog_server),
            module_ignore_errors=True)
        pytest_assert(
            not del_syslog_server['rc'],
            "syslog server '{}' is not deleted successfully".format(
                syslog_server))

    yield

    logger.info("Restoring config_db.json")
    duthost.shell(
        "sudo cp {} /etc/sonic/config_db.json".format(config_tmpfile))
    delete_tmpfile(duthost, config_tmpfile)
    config_reload(duthost)
Exemple #7
0
def tearDown(vlan_ports_list, duthost, ptfhost, vlan_intfs_list,
             portchannel_interfaces):

    logger.info("VLAN test ending ...")
    logger.info("Stop arp_responder")
    ptfhost.command('supervisorctl stop arp_responder')

    logger.info("Delete VLAN intf")
    try:
        for item in vlan_ports_list:
            for i in vlan_ports_list[0]['permit_vlanid']:
                duthost.command('ip route flush {}'.format(
                    item['permit_vlanid'][i]['remote_ip']))

        for vlan_port in vlan_ports_list:
            for permit_vlanid in vlan_port["permit_vlanid"].keys():
                if int(permit_vlanid) != vlan_port["pvid"]:
                    ptfhost.command("ip link delete eth{idx}.{pvid}".format(
                        idx=vlan_port["port_index"][0], pvid=permit_vlanid))
    except RunAnsibleModuleFail as e:
        logger.error(e)

    config_reload(duthost)
    # make sure Portchannels go up for post-test link sanity
    time.sleep(90)
Exemple #8
0
def test_config_reload_toggle_reset(duthost, k8scluster):
    """
    Test case to ensure that when DUT is joined to master (disable=false, unsaved) but config is saved with disable=true, DUT resets from master after config reload

    Saves config with disable=true

    Joins master, which sets disable=false unsaved

    Performs config reload

    Ensures that DUT has reset from the master after config reload, as disable=true was saved 
    
    Args:
        duthost: DUT host object
        k8scluster: shortcut fixture for getting cluster of Kubernetes master hosts
    """
    dut_cmds = ['sudo config kube server disable on',
                'sudo config save -y']
    duthost.shell_cmds(cmds=dut_cmds)

    ku.join_master(duthost, k8scluster.vip) 

    config_reload(duthost)
    wait_critical_processes(duthost)

    server_connect_exp_status = False
    server_connect_act_status = ku.check_connected(duthost)
    server_connect_status_updated = ku.poll_for_status_change(duthost, server_connect_exp_status)
    pytest_assert(server_connect_status_updated, "Unexpected k8s server connection status after config reload, Expected server connected status: {}, Found server connected status: {}".format(server_connect_exp_status, server_connect_act_status))
def setup_env(duthosts, rand_one_dut_hostname, golden_config_exists_on_dut):
    """
    Setup/teardown
    Args:
        duthosts: list of DUTs.
        rand_selected_dut: The fixture returns a randomly selected DuT.
    """
    duthost = duthosts[rand_one_dut_hostname]

    # Backup configDB
    backup_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP)
    # Backup Golden Config if exists.
    if golden_config_exists_on_dut:
        backup_config(duthost, GOLDEN_CONFIG, GOLDEN_CONFIG_BACKUP)

    # Reload test env with minigraph
    config_reload(duthost, config_source="minigraph", safe_reload=True)
    running_config = get_running_config(duthost)

    yield running_config

    # Restore configDB after test.
    restore_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP)
    # Restore Golden Config after test, else cleanup test file.
    if golden_config_exists_on_dut:
        restore_config(duthost, GOLDEN_CONFIG, GOLDEN_CONFIG_BACKUP)
    else:
        duthost.file(path=GOLDEN_CONFIG, state='absent')

    # Restore config before test
    config_reload(duthost)
def restart_thermal_control_daemon(dut):
    """
    Restart thermal control daemon by killing it and waiting supervisord to restart
    it automatically.
    :param dut: DUT object representing a SONiC switch under test.
    :return:
    """
    logging.info('Restarting thermal control daemon on {}...'.format(dut.hostname))
    find_thermalctld_pid_cmd = 'docker exec -i pmon bash -c \'pgrep -f thermalctld\' | sort'
    output = dut.shell(find_thermalctld_pid_cmd)
    assert output["rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
    # Usually there should be 2 thermalctld processes, but there is chance that
    # sonic platform API might use subprocess which creates extra thermalctld process.
    # For example, chassis.get_all_sfps will call sfp constructor, and sfp constructor may 
    # use subprocess to call ethtool to do initialization.
    # So we check here thermalcltd must have at least 2 processes.
    assert len(output["stdout_lines"]) >= 2, "There should be at least 2 thermalctld process"

    restart_thermalctl_cmd = "docker exec -i pmon bash -c 'supervisorctl restart thermalctld'"
    output = dut.shell(restart_thermalctl_cmd)
    if output["rc"] == 0:
        output = dut.shell(find_thermalctld_pid_cmd)
        assert output["rc"] == 0, "Run command '{}' failed after restart of thermalctld on {}".format(find_thermalctld_pid_cmd, dut.hostname)
        assert len(output["stdout_lines"]) >= 2, "There should be at least 2 thermalctld process"
        logging.info("thermalctld processes restarted successfully on {}".format(dut.hostname))
        return
    # try restore by config reload...
    config_reload(dut)
    assert 0, 'Wait thermal control daemon restart failed'
Exemple #11
0
def test_config_reload_toggle_join(duthost, k8scluster):
    """
    Test case to ensure that when DUT is not joined to the master due to (unsaved) disable=true, but config is saved with disable=false, DUT joins after config reload

    Saves config with configured VIP and disable=false

    Sets disable=true without saving config, and ensure that DUT resets from master

    Performs config reload

    Ensures that DUT is joined to master after config reload

    Args:
        duthost: DUT host object
        k8scluster: shortcut fixture for getting cluster of Kubernetes master hosts
    """
    dut_cmds = ['sudo config kube server ip {}'.format(k8scluster.vip),
                'sudo config kube server disable off',
                'sudo config save -y']
    duthost.shell_cmds(cmds=dut_cmds)
  
    duthost.shell('sudo config kube server disable on')
    server_connect_exp_status = False
    server_connect_act_status = ku.check_connected(duthost)
    server_connect_status_updated = ku.poll_for_status_change(duthost, server_connect_exp_status)
    pytest_assert(server_connect_status_updated, "Unexpected k8s server connection status after setting disable=true, Expected server connected status: {}, Found server connected status: {}".format(server_connect_exp_status, server_connect_act_status))
    
    config_reload(duthost)
    wait_critical_processes(duthost)

    server_connect_exp_status = True
    server_connect_act_status = ku.check_connected(duthost)
    server_connect_status_updated = ku.poll_for_status_change(duthost, server_connect_exp_status)
    pytest_assert(server_connect_status_updated, "Unexpected k8s server connection status after config reload, Expected server connected status: {}, Found server connected status: {}".format(server_connect_exp_status, server_connect_act_status))
Exemple #12
0
def common_setup_teardown(ptfhost, intfs_for_test):
    intf1, intf1_indice, intf2, intf2_index, intf_facts, mg_facts, duthost = intfs_for_test

    po1 = get_po(mg_facts, intf1)
    po2 = get_po(mg_facts, intf2)

    try:
        # Make sure selected interfaces are not in portchannel
        if po1 is not None:
            duthost.shell('config portchannel member del {0} {1}'.format(po1, intf1))
            collect_info(duthost)
            duthost.shell('config interface startup {0}'.format(intf1))
            collect_info(duthost)

        if po2 is not None:
            duthost.shell('config portchannel member del {0} {1}'.format(po2, intf2))
            collect_info(duthost)
            duthost.shell('config interface startup {0}'.format(intf2))
            collect_info(duthost)

        # Change SONiC DUT interface IP to test IP address
        duthost.shell('config interface ip add {0} 10.10.1.2/28'.format(intf1))
        collect_info(duthost)
        duthost.shell('config interface ip add {0} 10.10.1.20/28'.format(intf2))
        collect_info(duthost)

        if (po1 is not None) or (po2 is not None):
            time.sleep(40)

        yield duthost, ptfhost, intf_facts, intf1, intf2, intf1_indice, intf2_index
    finally:
        # Recover DUT interface IP address
        config_reload(duthost, config_source='config_db', wait=120)
Exemple #13
0
def remove_dataacl_table(duthosts):
    """
    Remove DATAACL to free TCAM resources.
    The change is written to configdb as we don't want DATAACL recovered after reboot  
    """
    TABLE_NAME = "DATAACL"
    for duthost in duthosts:
        lines = duthost.shell(
            cmd="show acl table {}".format(TABLE_NAME))['stdout_lines']
        data_acl_existing = False
        for line in lines:
            if TABLE_NAME in line:
                data_acl_existing = True
                break
        if not data_acl_existing:
            yield
            return
        # Remove DATAACL
        logger.info("Removing ACL table {}".format(TABLE_NAME))
        cmds = [
            "config acl remove table {}".format(TABLE_NAME), "config save -y"
        ]
        duthost.shell_cmds(cmds=cmds)
    yield
    # Recover DATAACL by reloading minigraph
    for duthost in duthosts:
        config_reload(duthost, config_source="minigraph")
Exemple #14
0
def restore_config_db(dut):
    """
    Restore config db
    Args:
        dut (SonicHost): The target device
    """
    dut.command("sudo cp {} {}".format(_TEMP_CONFIG_DB, _CONFIG_DB))
    dut.command("sudo rm -f {}".format(_TEMP_CONFIG_DB))
    config_reload(dut)
Exemple #15
0
def restart_thermal_control_daemon(dut):
    """
    Restart thermal control daemon by killing it and waiting supervisord to restart
    it automatically.
    :param dut: DUT object representing a SONiC switch under test.
    :return:
    """
    logging.info('Restarting thermal control daemon...')
    find_thermalctld_pid_cmd = 'docker exec -i pmon bash -c \'pgrep -f thermalctld\' | sort'
    output = dut.shell(find_thermalctld_pid_cmd)
    assert output[
        "rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
    assert len(
        output["stdout_lines"]) == 2, "There should be 2 thermalctld process"
    pid_0 = int(output["stdout_lines"][0].strip())
    pid_1 = int(output["stdout_lines"][1].strip())
    # find and kill the parent process
    pid_to_kill = pid_0 if pid_0 < pid_1 else pid_1
    logging.info(
        'Killing old thermal control daemon with pid: {}'.format(pid_to_kill))
    kill_thermalctld_cmd = 'docker exec -i pmon bash -c \'kill {}\''.format(
        pid_to_kill)
    output = dut.command(
        kill_thermalctld_cmd
    )  # kill thermalctld and wait supervisord auto reboot thermalctld
    assert output["rc"] == 0, "Run command '%s' failed" % kill_thermalctld_cmd

    # make sure thermalctld has restarted
    max_wait_time = 30
    while max_wait_time > 0:
        max_wait_time -= 1
        output = dut.shell(find_thermalctld_pid_cmd)
        assert output[
            "rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd
        if len(output["stdout_lines"]) != 2:
            time.sleep(1)
            continue

        new_pid_0 = int(output["stdout_lines"][0].strip())
        new_pid_1 = int(output["stdout_lines"][1].strip())
        parent_pid = new_pid_0 if new_pid_0 < new_pid_1 else new_pid_1

        if parent_pid == pid_to_kill:
            logging.info(
                'Old thermal control daemon is still alive, waiting...')
            time.sleep(1)
            continue
        else:
            logging.info(
                'New pid of thermal control daemon is {}'.format(parent_pid))
            return

    # try restore by config reload...
    config_reload(dut)
    assert 0, 'Wait thermal control daemon restart failed'
Exemple #16
0
def recover_ports(duthosts, fanouthosts):
    """Module level fixture that automatically do following job:
        1. Build global candidate test ports 
        2. Save fanout port state before the test
        3. Restore fanout and DUT after test

    Args:
        duthosts: DUT object
        enum_dut_portname_module_fixture (str): DUT port name
        fanouthosts: Fanout objects
    """
    global cadidate_test_ports
    fanout_original_port_states = {}
    logger.info('Collecting existing port configuration for DUT and fanout...')
    for duthost in duthosts:
        # Only do the sampling when there are no candidates
        if duthost.hostname in cadidate_test_ports.keys():
            continue
        all_ports = build_test_candidates(duthost, fanouthosts, 'all_ports')

        # Test all ports takes too much time (sometimes more than an hour),
        # so we choose 3 ports randomly as the cadidates ports
        candidates = random.sample(all_ports, min(3, len(all_ports)))
        cadidate_test_ports[duthost.hostname] = {}
        for dut_port, fanout, fanout_port in candidates:
            auto_neg_mode = fanout.get_auto_negotiation_mode(fanout_port)
            if auto_neg_mode is not None:
                cadidate_test_ports[duthost.hostname][dut_port] = (duthost,
                                                                   dut_port,
                                                                   fanout,
                                                                   fanout_port)
        pytest_require(
            len(cadidate_test_ports) > 0,
            "Skip test due to fanout port does not support setting auto-neg mode"
        )

        for _, _, fanout, fanout_port in cadidate_test_ports[
                duthost.hostname].values():
            speed = fanout.get_speed(fanout_port)
            if not fanout in fanout_original_port_states:
                fanout_original_port_states[fanout] = {}
            fanout_original_port_states[fanout][fanout_port] = (auto_neg_mode,
                                                                speed)

    yield

    logger.info('Recovering port configuration for fanout...')
    for fanout, port_data in fanout_original_port_states.items():
        for port, state in port_data.items():
            fanout.set_auto_negotiation_mode(port, state[0])
            fanout.set_speed(port, state[1])

    logger.info('Recovering port configuration for DUT...')
    for duthost in duthosts:
        config_reload(duthost)
Exemple #17
0
def rollback_or_reload(duthost, cp=DEFAULT_CHECKPOINT_NAME):
    """Run rollback on target duthost. config_reload if rollback failed.

    Args:
        duthost: Device Under Test (DUT)
    """
    output = rollback(duthost, cp)

    if output['rc'] or "Config rolled back successfull" not in output['stdout']:
        config_reload(duthost)
        pytest.fail("config rollback failed. Restored by config_reload")
def teardown(duthosts, rand_one_dut_hostname):
    """
    Teardown fixture to clean up DUT to initial state

    Args:
        duthosts: All DUTs objects belonging to the testbed
        rand_one_dut_hostname: Hostname of a random chosen dut to run test
    """
    yield
    duthost = duthosts[rand_one_dut_hostname]
    config_reload(duthost, safe_reload=True, check_intf_up_ports=True)
Exemple #19
0
def common_setup_teardown(duthosts, ptfhost, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, tbinfo):
    try:
        duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
        config_facts = duthost.asic_instance(enum_frontend_asic_index).config_facts(host=duthost.hostname, source="running")['ansible_facts']
        router_mac = config_facts['DEVICE_METADATA']['localhost']['mac'].lower()

        # Copy test files
        ptfhost.copy(src="ptftests", dest="/root")
        logging.info("router_mac {}".format(router_mac))
        yield duthost, ptfhost, router_mac
    finally:
        #Recover DUT interface IP address
        config_reload(duthost, config_source='config_db', wait=120)
def setup_host_vlan_intf_mac(duthosts, rand_one_dut_hostname, testbed_params,
                             verify_host_port_vlan_membership):
    vlan_intf, _ = testbed_params
    duthost = duthosts[rand_one_dut_hostname]
    duthost.shell('redis-cli -n 4 hmset "VLAN|%s" mac %s' %
                  (vlan_intf["attachto"], DUT_VLAN_INTF_MAC))
    wait_until(
        10, 2, 2, lambda: duthost.get_dut_iface_mac(vlan_intf["attachto"]) ==
        DUT_VLAN_INTF_MAC)

    yield

    config_reload(duthost)
Exemple #21
0
def common_setup_teardown(duthosts, ptfhost,
                          enum_rand_one_per_hwsku_frontend_hostname):
    try:
        duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
        router_mac = duthost.shell(
            'sonic-cfggen -d -v \'DEVICE_METADATA.localhost.mac\''
        )["stdout_lines"][0].decode("utf-8")
        # Copy test files
        ptfhost.copy(src="ptftests", dest="/root")
        logging.info("router_mac {}".format(router_mac))
        yield duthost, ptfhost, router_mac
    finally:
        #Recover DUT interface IP address
        config_reload(duthost, config_source='config_db', wait=120)
    def restoreDutConfig(self, duthost):
        """
            Restores DUT configuration after test completes

            Args:
                duthost (AnsibleHost): Device Under Test (DUT)

            Returns:
                None
        """
        yield

        logger.info("Reload Config DB")
        config_reload(duthost, config_source='config_db', wait=120)
Exemple #23
0
def apply_global_nat_config(duthost, config_nat_feature_enabled):
    """
    applies DUT's global NAT configuration;
    after test run cleanup DUT's NAT configration
    :param duthost: DUT host object
    """
    status, _ = duthost.get_feature_status()
    if 'nat' not in status or status['nat'] == 'disabled':
        pytest.skip('nat feature is not enabled with image version {}'.format(duthost.os_version))

    nat_global_config(duthost)
    yield
    # reload config on teardown
    config_reload(duthost, config_source='minigraph')
Exemple #24
0
def common_setup_teardown(duthosts, ptfhost,
                          enum_rand_one_per_hwsku_frontend_hostname,
                          enum_frontend_asic_index, tbinfo):
    try:
        duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
        router_mac = duthost.asic_instance(
            enum_frontend_asic_index).get_router_mac()

        # Copy test files
        ptfhost.copy(src="ptftests", dest="/root")
        logging.info("router_mac {}".format(router_mac))
        yield duthost, ptfhost, router_mac
    finally:
        #Recover DUT interface IP address
        config_reload(duthost, config_source='config_db', wait=120)
Exemple #25
0
def tearDown(work_vlan_ports_list, duthost, ptfhost):

    logger.info("VLAN test ending ...")

    logger.info("Delete VLAN intf")
    for vlan_port in work_vlan_ports_list:
        for permit_vlanid in vlan_port["permit_vlanid"]:
            if int(permit_vlanid) != vlan_port["pvid"]:
                try:
                    ptfhost.command("ip link delete eth{idx}.{pvid}".format(
                        idx=vlan_port["port_index"][0], pvid=permit_vlanid))
                except RunAnsibleModuleFail as e:
                    logger.error(e)

    config_reload(duthost)
    def restoreDutConfig(self, duthosts,
                         enum_rand_one_per_hwsku_frontend_hostname):
        """
            Restores DUT configuration after test completes

            Args:
                duthost (AnsibleHost): Device Under Test (DUT)

            Returns:
                None
        """
        duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
        yield

        logger.info("Reload Config DB")
        config_reload(duthost, config_source='config_db', wait=120)
def configure_dut(minigraph_facts, duthosts, rand_one_dut_hostname,
                  vnet_config, vnet_test_params):
    """
    Setup/teardown fixture for VNET route leak test

    During the setup portion, generates VNET VxLAN configurations and applies them to the DUT
    During the teardown portion, removes all previously pushed VNET VxLAN information from the DUT

    Args:
        minigraph_facts: Minigraph information
        duthost: DUT host object
        vnet_config: Dictionary containing VNET configuration information
        vnet_test_params: Dictionary containing VNET test parameters
    """
    duthost = duthosts[rand_one_dut_hostname]

    logger.info("Backing up config_db.json")
    duthost.shell(BACKUP_CONFIG_DB_CMD)

    duthost.shell("sonic-clear fdb all")
    generate_dut_config_files(duthost, minigraph_facts, vnet_test_params,
                              vnet_config)
    apply_dut_config_files(duthost, vnet_test_params)

    # In this case yield is used only to separate this fixture into setup and teardown portions
    yield

    if vnet_test_params[CLEANUP_KEY]:
        logger.info("Restoring config_db.json")
        duthost.shell(RESTORE_CONFIG_DB_CMD)
        duthost.shell(DELETE_BACKUP_CONFIG_DB_CMD)

        cleanup_vnet_routes(duthost, vnet_test_params)
        cleanup_dut_vnets(duthost, minigraph_facts, vnet_config)
        cleanup_vxlan_tunnels(duthost, vnet_test_params)

        logger.info("Restarting BGP and waiting for BGP sessions")
        duthost.shell(RESTART_BGP_CMD)

        if not wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected,
                          duthost):
            logger.warning(
                "BGP sessions not up {} seconds after BGP restart, restoring with `config_reload`"
                .format(BGP_WAIT_TIMEOUT))
            config_reload(duthost)
    else:
        logger.info("Skipping cleanup")
Exemple #28
0
def reload_dut_config(request, duthost, setup_test_env):
    """
    DUT's configuration reload on teardown
    :param request: pytest request object
    :param duthost: DUT host object
    """
    yield
    interface_type, setup_info = setup_test_env
    setup_data = copy.deepcopy(setup_info)
    dut_iface = setup_data[interface_type]["vrf_conf"]["red"]["dut_iface"]
    gw_ip = setup_data[interface_type]["vrf_conf"]["red"]["gw"]
    mask = setup_data[interface_type]["vrf_conf"]["red"]["mask"]
    config_reload(duthost, config_source='minigraph')
    pch_ip = setup_info["pch_ips"][dut_iface]
    duthost.shell("sudo config interface ip remove {} {}/31".format(dut_iface, pch_ip))
    duthost.shell("sudo config interface ip add {} {}/{}".format(dut_iface, gw_ip, mask))
    nat_global_config(duthost)
def test_vnet_route_leak(configure_dut, duthosts, rand_one_dut_hostname):
    """
    Test case for VNET route leak check

    Gets a list of all VNET routes programmed to the DUT, and a list of all BGP neighbors
    Verifies that no VNET routes are being advertised to BGP neighbors

    Restarts the BGP service and checks for leaked routes again

    Performs `config reload` and checks for leaked routes again

    Args:
        configure_dut: Pytest fixture to prepare DUT for testing
        duthost: DUT host object
    """
    duthost = duthosts[rand_one_dut_hostname]

    leaked_routes = get_leaked_routes(duthost)
    pytest_assert(not leaked_routes,
                  LEAKED_ROUTES_TEMPLATE.format(leaked_routes))

    logger.info("Restarting BGP")
    duthost.shell(RESTART_BGP_CMD)

    pytest_assert(
        wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected, duthost),
        BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT))

    leaked_routes = get_leaked_routes(duthost)
    pytest_assert(not leaked_routes,
                  LEAKED_ROUTES_TEMPLATE.format(leaked_routes))

    logger.info("Saving and reloading CONFIG_DB")
    duthost.shell(CONFIG_SAVE_CMD)
    config_reload(duthost)

    pytest_assert(
        wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected, duthost),
        BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT))

    leaked_routes = get_leaked_routes(duthost)
    pytest_assert(not leaked_routes,
                  LEAKED_ROUTES_TEMPLATE.format(leaked_routes))
    def test_default_cfg_after_load_mg(self, duthosts, rand_one_dut_hostname):
        """
        Tests for checking if pfcwd gets started after load_minigraph

        Args:
            duthost(AnsibleHost): instance

        Returns:
            None
        """
        duthost = duthosts[rand_one_dut_hostname]
        config_reload(duthost, config_source='minigraph')
        # sleep 20 seconds to make sure configuration is loaded
        time.sleep(20)
        res = duthost.command('pfcwd show config')
        for l in res['stdout_lines']:
            if "ethernet" in l.lower():
                return
        # If no ethernet port existing in stdout, failed this case.
        pytest.fail("Failed to start pfcwd after load_minigraph")