def test_pfc_pause_single_lossy_prio_reboot( ixia_api, ixia_testbed_config, conn_graph_facts, fanout_graph_facts, localhost, duthosts, rand_one_dut_hostname, rand_one_dut_portname_oper_up, rand_lossy_prio, all_prio_list, prio_dscp_map, reboot_type): """ Test if PFC will impact a single lossy priority after various kinds of reboots Args: ixia_api (pytest fixture): IXIA session ixia_testbed_config (pytest fixture): testbed configuration information conn_graph_facts (pytest fixture): connection graph fanout_graph_facts (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs rand_one_dut_hostname (str): hostname of DUT rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0' rand_lossy_prio (str): lossy priority to test, e.g., 's6100-1|2' all_prio_list (pytest fixture): list of all the priorities prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). reboot_type (str): reboot type to be issued on the DUT Returns: N/A """ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|') dut_hostname2, lossy_prio = rand_lossy_prio.split('|') pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2, "Priority and port are not mapped to the expected DUT") testbed_config, port_config_list = ixia_testbed_config duthost = duthosts[rand_one_dut_hostname] lossy_prio = int(lossy_prio) pause_prio_list = [lossy_prio] test_prio_list = [lossy_prio] bg_prio_list = [p for p in all_prio_list] bg_prio_list.remove(lossy_prio) logger.info("Issuing a {} reboot on the dut {}".format( reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, testbed_config=testbed_config, port_config_list=port_config_list, conn_data=conn_graph_facts, fanout_data=fanout_graph_facts, duthost=duthost, dut_port=dut_port, global_pause=False, pause_prio_list=pause_prio_list, test_prio_list=test_prio_list, bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False)
def reboot_and_check(localhost, dut, interfaces, reboot_type=REBOOT_TYPE_WARM, reboot_kwargs=None): """ Perform the specified type of reboot and check platform status. @param localhost: The Localhost object. @param dut: The AnsibleHost object of DUT. @param interfaces: DUT's interfaces defined by minigraph @param reboot_type: The reboot type, pre-defined const that has name convention of REBOOT_TYPE_XXX. @param reboot_kwargs: The argument used by reboot_helper """ logging.info("Run %s reboot on DUT" % reboot_type) reboot(dut, localhost, reboot_type=reboot_type, reboot_helper=None, reboot_kwargs=reboot_kwargs) # Perform health-check check_services(dut) check_reboot_type(dut, reboot_type) check_interfaces_and_transceivers(dut, interfaces) check_neighbors(dut)
def test_trap_config_save_after_reboot(self, duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, ptfhost,check_image_version, copp_testbed, dut_type, backup_restore_config_db, request): """ Validates that the trap configuration is saved or not after reboot(reboot, fast-reboot, warm-reboot) 1. Set always_enabled of a trap(e.g. bgp) to true 2. Config save -y 3. Do reboot according to the specified parameter of copp_reboot_type (reboot/warm-reboot/fast-reboot/soft-reboot) 4. Verify configuration are saved successfully 5. Verify the trap status is installed by sending traffic """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] logger.info("Set always_enabled of {} to true".format(self.trap_id)) copp_utils.configure_always_enabled_for_trap(duthost, self.trap_id, "true") logger.info("Config save") duthost.command("sudo config save -y") reboot_type = request.config.getoption("--copp_reboot_type") logger.info("Do {}".format(reboot_type)) reboot(duthost, localhost, reboot_type=reboot_type, reboot_helper=None, reboot_kwargs=None) logger.info("Verify always_enable of {} == {} in config_db".format(self.trap_id, "true")) copp_utils.verify_always_enable_value(duthost, self.trap_id, "true") logger.info("Verify {} trap status is installed by sending traffic".format(self.trap_id)) pytest_assert( wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(self.trap_id))
def test_features_state(duthosts, enum_dut_hostname, localhost): """Checks whether the state of each feature is valid or not. Args: duthosts: Fixture returns a list of Ansible object DuT. enum_dut_hostname: Fixture returns name of DuT. Returns: None. """ duthost = duthosts[enum_dut_hostname] logger.info("Checking the state of each feature in 'CONFIG_DB' ...") if not wait_until(180, FEATURE_STATE_VERIFYING_INTERVAL_SECS, 0, verify_features_state, duthost): logger.warn( "Not all states of features in 'CONFIG_DB' are valid, rebooting DUT {}" .format(duthost.hostname)) reboot(duthost, localhost) # Some services are not ready immeidately after reboot wait_critical_processes(duthost) pytest_assert( wait_until(FEATURE_STATE_VERIFYING_THRESHOLD_SECS, FEATURE_STATE_VERIFYING_INTERVAL_SECS, 0, verify_features_state, duthost), "Not all service states are valid!") logger.info("The states of features in 'CONFIG_DB' are all valid.")
def cleanup_read_mac(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] initImage = duthost.shell( 'sonic-installer list | grep Current | cut -f2 -d " "')['stdout'] yield """ Recover the image to image2 which is the image before doing this case. """ currentImage = duthost.shell( 'sonic-installer list | grep Current | cut -f2 -d " "')['stdout'] if initImage != currentImage: logger.info("Re-install the image: {} to DUT".format(initImage)) duthost.copy(src=BINARY_FILE_ON_LOCALHOST_2, dest=BINARY_FILE_ON_DUTHOST) duthost.shell( "sonic-installer install -y {}".format(BINARY_FILE_ON_DUTHOST)) reboot(duthost, localhost, wait=120) logger.info('Remove temporary images') duthost.shell("rm -rf {}".format(BINARY_FILE_ON_DUTHOST)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_1)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_2)) backup_minigraph_exist = duthost.stat( path="/etc/sonic/minigraph.xml.backup")["stat"]["exists"] if backup_minigraph_exist: logger.info("Apply minigraph from backup") duthost.shell( "mv /etc/sonic/minigraph.xml.backup /etc/sonic/minigraph.xml") config_reload(duthost, config_source='minigraph')
def test_pfc_pause_multi_lossy_prio_reboot(ixia_api, ixia_testbed, conn_graph_facts, fanout_graph_facts, localhost, duthosts, rand_one_dut_hostname, rand_one_dut_portname_oper_up, lossless_prio_list, lossy_prio_list, prio_dscp_map, reboot_type): """ Test if PFC will impact multiple lossy priorities after various kinds of reboots Args: ixia_api (pytest fixture): IXIA session ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed conn_graph_facts (pytest fixture): connection graph fanout_graph_facts (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs rand_one_dut_hostname (str): hostname of DUT rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0' lossless_prio_list (pytest fixture): list of all the lossless priorities lossy_prio_list (pytest fixture): list of all the lossy priorities prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). reboot_type (str): reboot type to be issued on the DUT Returns: N/A """ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|') pytest_require(rand_one_dut_hostname == dut_hostname, "Port is not mapped to the expected DUT") duthost = duthosts[rand_one_dut_hostname] pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list bg_prio_list = lossless_prio_list logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, testbed_config=ixia_testbed, conn_data=conn_graph_facts, fanout_data=fanout_graph_facts, duthost=duthost, dut_port=dut_port, global_pause=False, pause_prio_list=pause_prio_list, test_prio_list=test_prio_list, bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False)
def check_reset_status_after_reboot(reboot_type, pre_reboot_status, post_reboot_status, duthost, localhost, construct_url): logger.info("Checking for RESTAPI reset status after " + reboot_type + " reboot") params = '{"reset_status":"false"}' r = restapi.post_reset_status(construct_url, params) pytest_assert(r.status_code == 200) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == pre_reboot_status) # Add extra wait for warm-reboot to ensure warmboot-finalizer is done # Otherwise, the warmboot-finalizer will write the testing vnet and vlan config # into config_db.json and cause unrecoverable errors wait_warmboot_finalizer = False if reboot_type == 'warm': wait_warmboot_finalizer = True reboot(duthost, localhost, reboot_type, wait_warmboot_finalizer=wait_warmboot_finalizer) apply_cert_config(duthost) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == post_reboot_status)
def add_fail_step_to_reboot(localhost, duthosts, rand_one_dut_hostname): duthost = duthosts[rand_one_dut_hostname] def add_exit_to_script(reboot_type): add_exit_to_script.params = tuple() if "warm" in reboot_type: reboot_script = "warm-reboot" elif "fast" in reboot_type: reboot_script = "fast-reboot" cmd_format = "sed -i 's/{}/{}/' {}" reboot_script_path = duthost.shell( 'which {}'.format(reboot_script))['stdout'] original_line = 'set +e' replaced_line = 'exit -1; set +e' replace_cmd = cmd_format.format(original_line, replaced_line, reboot_script_path) logging.info( "Modify {} to exit before set +e".format(reboot_script_path)) duthost.shell(replace_cmd) add_exit_to_script.params = (cmd_format, replaced_line, original_line, reboot_script_path, reboot_script_path) yield add_exit_to_script if add_exit_to_script.params: cmd_format, replaced_line, original_line, reboot_script_path, reboot_script_path = add_exit_to_script.params replace_cmd = cmd_format.format(replaced_line, original_line, reboot_script_path) logging.info("Revert {} script to original".format(reboot_script_path)) duthost.shell(replace_cmd) # cold reboot DUT to restore any bad state caused by negative test reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD)
def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, snappi_testbed_config, conn_graph_facts, fanout_graph_facts, localhost, duthosts, rand_one_dut_hostname, rand_one_dut_portname_oper_up, rand_one_dut_lossless_prio, prio_dscp_map, reboot_type, trigger_pfcwd): """ Verify PFC watchdog basic test works on a single lossless priority after various types of reboot Args: snappi_api (pytest fixture): SNAPPI session snappi_testbed_config (pytest fixture): testbed configuration information conn_graph_facts (pytest fixture): connection graph fanout_graph_facts (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs rand_one_dut_hostname (str): hostname of DUT rand_one_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0' rand_one_dut_lossless_prio (str): name of lossless priority to test, e.g., 's6100-1|3' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) reboot_type (str): reboot type to be issued on the DUT trigger_pfcwd (bool): if PFC watchdog is expected to be triggered Returns: N/A """ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|') dut_hostname2, lossless_prio = rand_one_dut_lossless_prio.split('|') pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2, "Priority and port are not mapped to the expected DUT") duthost = duthosts[rand_one_dut_hostname] skip_pfcwd_test(duthost=duthost, trigger_pfcwd=trigger_pfcwd) testbed_config, port_config_list = snappi_testbed_config lossless_prio = int(lossless_prio) logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=snappi_api, testbed_config=testbed_config, port_config_list=port_config_list, conn_data=conn_graph_facts, fanout_data=fanout_graph_facts, duthost=duthost, dut_port=dut_port, prio_list=[lossless_prio], prio_dscp_map=prio_dscp_map, trigger_pfcwd=trigger_pfcwd)
def run_reboot_testcase(self): result = self.advancedReboot.runRebootTest() if result is not True: # Create a failure report error = result.get("stderr") if error and "DUT is not ready for test" in error: # reboot test did not reboot the DUT, reboot externally to verify for image installation logging.warn("Reboot test failed to reboot the DUT. Trying again..") reboot(self.duthost, self.localhost, reboot_type=self.reboot_type, reboot_helper=None, reboot_kwargs=None) raise ContinuousRebootError("Reboot test failed with error: {}".format(error))
def reboot_dut(dut, localhost, cmd): logging.info('Reboot DUT to recover') if 'warm' in cmd: reboot_type = REBOOT_TYPE_WARM elif 'fast' in cmd: reboot_type = REBOOT_TYPE_FAST else: reboot_type = REBOOT_TYPE_COLD reboot(dut, localhost, reboot_type=reboot_type)
def test_setup_teardown(duthost, localhost): disable_ssh_timout(duthost) # There must be a better way to do this. # Reboot the DUT so that we guaranteed to login without ssh timeout. reboot(duthost, localhost, wait=120) yield enable_ssh_timout(duthost) # This test could leave DUT in a failed state or have syslog contaminations. # We should be able to cleanup with config reload, but reboot to make sure # we reset the connection on duthost for now. reboot(duthost, localhost, wait=120)
def do_reboot(duthost, localhost, duthosts, rw_user="", rw_pass=""): # occasionally reboot command fails with some kernel error messages # Hence retry if needed. # wait_time = 20 retries = 3 rebooted = False for i in range(retries): # try: # Reboot DUT using reboot function instead of using ssh_remote_run. # ssh_remote_run gets blocked due to console messages from reboot on DUT # Do not wait for ssh as next step checks if ssh is stopped to ensure DUT is # is rebooting. reboot(duthost, localhost, wait_for_ssh=False) localhost.wait_for(host=duthost.mgmt_ip, port=22, state="stopped", delay=5, timeout=60) rebooted = True break except RunAnsibleModuleFail as e: logger.error( "DUT did not go down, exception: {} attempt:{}/{}".format( repr(e), i, retries)) assert rebooted, "Failed to reboot" localhost.wait_for(host=duthost.mgmt_ip, port=22, state="started", delay=10, timeout=300) wait(wait_time, msg="Wait {} seconds for system to be stable.".format(wait_time)) assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \ "All critical services should fully started!" # If supervisor node is rebooted in chassis, linecards also will reboot. # Check if all linecards are back up. if duthost.is_supervisor_node(): for host in duthosts: if host != duthost: logger.info("checking if {} critical services are up".format( host.hostname)) assert wait_until(300, 20, 0, host.critical_services_fully_started), \ "All critical services of {} should fully started!".format(host.hostname)
def check_reset_status_after_reboot(reboot_type, pre_reboot_status, post_reboot_status, duthost, localhost, construct_url): logger.info("Checking for RESTAPI reset status after "+reboot_type+" reboot") params = '{"reset_status":"false"}' r = restapi.post_reset_status(construct_url, params) pytest_assert(r.status_code == 200) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == pre_reboot_status) reboot(duthost, localhost, reboot_type) apply_cert_config(duthost) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == post_reboot_status)
def test_reload_configuration_checks(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to test various system checks in config reload """ duthost = duthosts[rand_one_dut_hostname] if not config_force_option_supported(duthost): return reboot(duthost, localhost, reboot_type="cold", wait=5) logging.info("Reload configuration check") out = duthost.shell("sudo config reload -y", executable="/bin/bash") # config reload command shouldn't work immediately after system reboot assert "Retry later" in out['stdout'] assert wait_until(300, 20, 0, config_system_checks_passed, duthost) # After the system checks succeed the config reload command should not throw error out = duthost.shell("sudo config reload -y", executable="/bin/bash") assert "Retry later" not in out['stdout'] # Immediately after one config reload command, another shouldn't execute and wait for system checks logging.info("Checking config reload after system is up") out = duthost.shell("sudo config reload -y", executable="/bin/bash") assert "Retry later" in out['stdout'] assert wait_until(300, 20, 0, config_system_checks_passed, duthost) logging.info("Stopping swss docker and checking config reload") duthost.shell("sudo service swss stop") # Without swss running config reload option should not proceed out = duthost.shell("sudo config reload -y", executable="/bin/bash") assert "Retry later" in out['stdout'] # However with force option config reload should proceed logging.info("Performing force config reload") out = duthost.shell("sudo config reload -y -f", executable="/bin/bash") assert "Retry later" not in out['stdout'] assert wait_until(300, 20, 0, config_system_checks_passed, duthost)
def run_test_in_reinstall_loop(self): logger.info("Verify MAC in image reinstall loop") duthost = self.request.getfixturevalue('duthost') localhost = self.request.getfixturevalue('localhost') for counter in range(1, self.iteration + 1): current_minigraph = self.minigraph1 if counter % 2 == 1 else self.minigraph2 logger.info("Iteration #{}".format(counter)) if current_minigraph: logger.info( "Copy specified minigraph {} to the /etc/sonic folder". format(current_minigraph)) duthost.copy(src=current_minigraph, dest="/etc/sonic/minigraph.xml") loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="read_mac_metadata") loganalyzer.match_regex = [".*can't parse mac address 'None'*"] with loganalyzer: self.deploy_image_to_duthost(duthost, counter) reboot(duthost, localhost, wait=120) logger.info("Wait until system is stable") pytest_assert( wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") if current_minigraph: logger.info( "Execute cli 'config load_minigraph -y' to apply new minigraph" ) duthost.shell("config load_minigraph -y") duthost.shell("config save -y") logger.info("Remove old (not current) sonic image") duthost.reduce_and_add_sonic_images(disk_used_pcent=1) self.check_mtu_and_interfaces(duthost)
def tear_down(duthost1, duthost2, ptfhost, localhost, collect): """ Performs tear down of all configuration on PTF and DUTs Args: duthost1: DUT host object duthost2: DUT host object ptfhost: PTF host object localhost: localhost object collect: Fixture which collects main info about link connection """ yield mclag_interfaces = collect[duthost1.hostname]['mclag_interfaces'] cmds_to_del_lags = ['ip link del {}'.format(lag) for lag in mclag_interfaces] ptfhost.shell_cmds(cmds=cmds_to_del_lags) ptfhost.remove_ip_addresses() duthost1.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP)) reboot(duthost1, localhost) duthost2.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP)) reboot(duthost2, localhost)
def reboot_and_check(self, interfaces, reboot_type=REBOOT_TYPE_WARM, reboot_kwargs=None): """ Perform the specified type of reboot and check platform status. @param interfaces: DUT's interfaces defined by minigraph @param reboot_type: The reboot type, pre-defined const that has name convention of REBOOT_TYPE_XXX. @param reboot_kwargs: The argument used by reboot_helper """ logging.info("Run %s reboot on DUT" % reboot_type) reboot(self.duthost, self.localhost, reboot_type=reboot_type, reboot_helper=None, reboot_kwargs=reboot_kwargs) # Perform health-check self.check_services() self.check_reboot_type(reboot_type) self.check_interfaces_and_transceivers(interfaces) self.check_neighbors()
def mocker_factory(localhost, duthosts, rand_one_dut_hostname): """ Fixture for thermal control data mocker factory. :return: A function for creating thermal control related data mocker. """ mockers = [] duthost = duthosts[rand_one_dut_hostname] def _create_mocker(dut, mocker_name): """ Create vendor specified mocker object by mocker name. :param dut: DUT object representing a SONiC switch under test. :param mocker_name: Name of a mocker type. :return: Created mocker instance. """ platform = dut.facts['platform'] mocker_object = None if 'mlnx' in platform: from tests.platform_tests.mellanox import mellanox_thermal_control_test_helper mocker_type = BaseMocker.get_mocker_type(mocker_name) if mocker_type: mocker_object = mocker_type(dut) mockers.append(mocker_object) else: pytest.skip("No mocker defined for this platform %s") return mocker_object yield _create_mocker try: for m in mockers: m.deinit() except Exception as e: reboot(duthost, localhost) assert 0, "Caught exception while recovering from mock - {}".format( repr(e))
def reboot_tor(duthost, reboot_type=REBOOT_TYPE_COLD): torhost.append(duthost) logger.info("Issuing reboot of type {} on {}".format( reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type, wait_for_ssh=False)
def pfcwd_wb_helper(self, fake_storm, testcase_actions, setup_pfc_test, fanout_graph_facts, ptfhost, duthost, localhost, fanouthosts, two_queues): """ Helper method that initializes the vars and starts the test execution Args: fake_storm(bool): if fake storm is enabled or disabled testcase_actions(list): list of actions that the test will go through setup_pfc_test(fixture): module scoped autouse fixture fanout_graph_facts(fixture): fanout info ptfhost(AnsibleHost): PTF instance duthost(AnsibleHost): DUT instance localhost(AnsibleHost): local instance fanouthosts(AnsibleHost): fanout instance """ setup_info = setup_pfc_test self.fanout_info = fanout_graph_facts self.ptf = ptfhost self.dut = duthost self.fanout = fanouthosts self.timers = setup_info['pfc_timers'] self.ports = setup_info['selected_test_ports'] self.neighbors = setup_info['neighbors'] dut_facts = self.dut.facts self.peer_dev_list = dict() self.seed = int(datetime.datetime.today().day) self.two_queues = two_queues self.storm_handle = dict() bitmask = 0 storm_deferred = 0 storm_restored = 0 self.max_wait = 0 self.fake_storm = fake_storm self.oid_map = dict() self.storm_threads = [] for t_idx, test_action in enumerate(testcase_actions): if 'warm-reboot' in test_action: reboot(self.dut, localhost, reboot_type="warm") continue # one of the factors to decide if the storm needs to be started storm_restored = bitmask and (bitmask & 2) # if the action prior to the warm-reboot was a 'storm_defer', ensure that all the storms are # stopped storm_deferred = bitmask and (bitmask & 4) if storm_deferred: logger.info("Wait for all the deferred storms to start and stop ...") join_all(self.storm_threads, self.max_wait) self.storm_threads = [] self.storm_handle = dict() bitmask = (1 << ACTIONS[test_action]) for p_idx, port in enumerate(self.ports): logger.info("") logger.info("--- Testing on {} ---".format(port)) self.setup_test_params(port, setup_info['vlan'], p_idx) for q_idx, queue in enumerate(self.pfc_wd['queue_indices']): if not t_idx or storm_deferred: if not q_idx: self.storm_handle[port] = dict() self.storm_handle[port][queue] = None # setup the defer parameters if the storm is deferred currently if (bitmask & 4): self.storm_defer_setup() if not self.pfc_wd['fake_storm']: self.storm_setup(port, queue, storm_defer=(bitmask & 4)) else: self.oid_map[(port, queue)] = PfcCmd.get_queue_oid(self.dut, port, queue) self.traffic_inst = SendVerifyTraffic(self.ptf, dut_facts['router_mac'], self.pfc_wd, queue) self.run_test(port, queue, detect=(bitmask & 1), storm_start=not t_idx or storm_deferred or storm_restored, first_detect_after_wb=(t_idx == 2 and not p_idx and not q_idx and not storm_deferred), storm_defer=(bitmask & 4))
def reboot_duts(localhost, node=None, results=None): reboot(node, localhost, wait=60)
def warm_reboot(duthost, localhost): reboot(duthost, localhost, reboot_type=REBOOT_TYPE_SAI_WARM)