def acl_rules(self, duthost, testbed_devices, setup, acl_table): """ setup/teardown ACL rules based on test class requirements :param duthost: DUT host object :param localhost: localhost object :param setup: setup information :param acl_table: table creating fixture :return: """ localhost = testbed_devices['localhost'] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='acl_rules') loganalyzer.load_common_config() try: loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE] with loganalyzer: self.setup_rules(duthost, setup, acl_table) self.post_setup_hook(duthost, localhost) except LogAnalyzerError as err: # cleanup config DB in case of log analysis error self.teardown_rules(duthost, setup) raise err try: yield finally: loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE] with loganalyzer: self.teardown_rules(duthost, setup)
def acl_table(duthost, acl_table_config): """ fixture to apply ACL table configuration and remove after tests :param duthost: DUT object :param acl_table_config: ACL table configuration dictionary :return: forwards acl_table_config """ name = acl_table_config['name'] conf = acl_table_config['config_file'] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='acl') loganalyzer.load_common_config() try: loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE] with loganalyzer: logger.info('creating ACL table: applying {}'.format(conf)) # TODO: use sonic config CLI duthost.command('sonic-cfggen -j {} --write-to-db'.format(conf)) except LogAnalyzerError as err: # cleanup config DB if create failed duthost.command('config acl remove table {}'.format(name)) raise err try: yield acl_table_config finally: loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE] with loganalyzer: logger.info('removing ACL table {}'.format(name)) duthost.command('config acl remove table {}'.format(name)) # save cleaned configuration duthost.command('config save -y')
def acl(duthost, acl_setup): """ setup/teardown ACL rules based on test class requirements :param duthost: DUT host object :param acl_setup: setup information :return: """ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='acl') loganalyzer.load_common_config() try: loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE] with loganalyzer: setup_acl_rules(duthost, acl_setup) except LogAnalyzerError as err: # cleanup config DB in case of log analysis error teardown_acl(duthost, acl_setup) raise err try: yield finally: loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE] with loganalyzer: teardown_acl(duthost, acl_setup)
def run_test(self, duthost, storm_hndle, expect_regex, syslog_marker, action): """ Storm generation/restoration on all ports and verification Args: duthost (AnsibleHost): DUT instance storm_hndle (PFCMultiStorm): class PFCMultiStorm intance expect_regex (list): list of expect regexs to be matched in the syslog syslog_marker (string): marker prefix written to the syslog action (string): storm/restore action """ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=syslog_marker) ignore_file = os.path.join(TEMPLATES_DIR, "ignore_pfc_wd_messages") reg_exp = loganalyzer.parse_regexp_file(src=ignore_file) loganalyzer.ignore_regex.extend(reg_exp) loganalyzer.expect_regex = [] loganalyzer.expect_regex.extend(expect_regex) loganalyzer.match_regex = [] with loganalyzer: if action == "storm": storm_hndle.start_pfc_storm() elif action == "restore": storm_hndle.stop_pfc_storm() time.sleep(5)
def test_thermal_control_fan_status(testbed_devices, mocker_factory): """ @summary: Make FAN absence, over speed and under speed, check logs and LED color. """ dut = testbed_devices["dut"] loganalyzer = LogAnalyzer(ansible_host=dut, marker_prefix='thermal_control') loganalyzer.load_common_config() with ThermalPolicyFileContext(dut, THERMAL_POLICY_VALID_FILE): fan_mocker = mocker_factory(dut, 'FanStatusMocker') if fan_mocker is None: pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % dut.facts['asic_type']) logging.info('Mock FAN status data...') fan_mocker.mock_data() # make data random restart_thermal_control_daemon(dut) wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, fan_mocker.check_all_fan_speed, 60) check_thermal_algorithm_status(dut, mocker_factory, False) single_fan_mocker = mocker_factory(dut, 'SingleFanMocker') time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME) if single_fan_mocker.is_fan_removable(): loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_RE] with loganalyzer: logging.info('Mocking an absence FAN...') single_fan_mocker.mock_absence() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_CLEAR_RE] with loganalyzer: logging.info('Make the absence FAN back to presence...') single_fan_mocker.mock_presence() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) loganalyzer.expect_regex = [LOG_EXPECT_FAN_OVER_SPEED_RE] with loganalyzer: logging.info('Mocking an over speed FAN...') single_fan_mocker.mock_over_speed() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) loganalyzer.expect_regex = [LOG_EXPECT_FAN_OVER_SPEED_CLEAR_RE] with loganalyzer: logging.info('Make the over speed FAN back to normal...') single_fan_mocker.mock_normal_speed() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) loganalyzer.expect_regex = [LOG_EXPECT_FAN_UNDER_SPEED_RE] with loganalyzer: logging.info('Mocking an under speed FAN...') single_fan_mocker.mock_under_speed() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) loganalyzer.expect_regex = [LOG_EXPECT_FAN_UNDER_SPEED_CLEAR_RE] with loganalyzer: logging.info('Make the under speed FAN back to normal...') single_fan_mocker.mock_normal_speed() check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME)
def check_thermal_control_load_invalid_file(duthost, file_name): """ @summary: Load an invalid thermal policy file check thermal control daemon is up and there is an error log printed """ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control') with ThermalPolicyFileContext(duthost, file_name): loganalyzer.expect_regex = [LOG_EXPECT_POLICY_FILE_INVALID] with loganalyzer: restart_thermal_control_daemon(duthost)
def test_dynamic_minimum_table(duthost, mocker_factory): air_flow_dirs = ['p2c', 'c2p', 'unk'] max_temperature = 45000 # 45 C cooling_cur_state = get_cooling_cur_state(duthost) if cooling_cur_state >= COOLING_CUR_STATE_THRESHOLD: pytest.skip('The cooling level {} is higher than threshold {}.'.format( cooling_cur_state, COOLING_CUR_STATE_THRESHOLD)) mocker = mocker_factory(duthost, 'MinTableMocker') loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control') loganalyzer.load_common_config() for index in range(len(air_flow_dirs)): air_flow_index = random.randint(0, len(air_flow_dirs) - 1) air_flow_dir = air_flow_dirs[air_flow_index] air_flow_dirs.remove(air_flow_dir) temperature = random.randint(0, max_temperature) trust_state = True if random.randint(0, 1) else False logging.info( 'Testing with air_flow_dir={}, temperature={}, trust_state={}'. format(air_flow_dir, temperature, trust_state)) expect_minimum_cooling_level = mocker.get_expect_cooling_level( air_flow_dir, temperature, trust_state) loganalyzer.expect_regex = [ LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format( expect_minimum_cooling_level) ] with loganalyzer: mocker.mock_min_table(air_flow_dir, temperature, trust_state) time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME) temperature = random.randint(0, max_temperature) logging.info( 'Testing with air_flow_dir={}, temperature={}, trust_state={}'. format(air_flow_dir, temperature, not trust_state)) expect_minimum_cooling_level = mocker.get_expect_cooling_level( air_flow_dir, temperature, not trust_state) loganalyzer.expect_regex = [ LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format( expect_minimum_cooling_level) ] with loganalyzer: mocker.mock_min_table(air_flow_dir, temperature, not trust_state) time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
def mirroring(duthost, neighbor_ip, mirror_setup, gre_version): """ fixture gathers all configuration fixtures :param duthost: DUT host :param mirror_setup: mirror_setup fixture :param mirror_config: mirror_config fixture """ logger.info("Adding mirror_session to DUT") acl_rule_file = os.path.join(mirror_setup['dut_tmp_dir'], ACL_RULE_PERSISTENT_FILE) extra_vars = { 'acl_table_name': EVERFLOW_TABLE_NAME, } logger.info('Extra variables for MIRROR table:\n{}'.format( pprint.pformat(extra_vars))) duthost.host.options['variable_manager'].extra_vars.update(extra_vars) duthost.template(src=os.path.join(TEMPLATE_DIR, ACL_RULE_PERSISTENT_TEMPLATE), dest=acl_rule_file) duthost.command('config mirror_session add {} {} {} {} {} {} {}'.format( SESSION_INFO['name'], SESSION_INFO['src_ip'], neighbor_ip, SESSION_INFO['dscp'], SESSION_INFO['ttl'], SESSION_INFO['gre'], SESSION_INFO['queue'])) logger.info('Loading acl mirror rules ...') load_rule_cmd = "acl-loader update full {} --session_name={}".format( acl_rule_file, SESSION_INFO['name']) duthost.command('{}'.format(load_rule_cmd)) try: yield finally: loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='acl') loganalyzer.load_common_config() try: loganalyzer.expect_regex = [LOG_EXCEPT_MIRROR_SESSION_REMOVE] with loganalyzer: teardown_mirroring(duthost, mirror_setup['dut_tmp_dir']) except LogAnalyzerError as err: raise err
def execute_test(self, duthost, syslog_marker, ignore_regex=None, expect_regex=None, expect_errors=False): """ Helper function that loads each template on the DUT and verifies the expected behavior Args: duthost (AnsibleHost): instance syslog_marker (string): marker prefix name to be inserted in the syslog ignore_regex (string): file containing regexs to be ignored by loganalyzer expect_regex (string): regex pattern that is expected to be present in the syslog expect_erros (bool): if the test expects an error msg in the syslog or not. Default: False Returns: None """ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=syslog_marker) if ignore_regex: ignore_file = os.path.join(TEMPLATES_DIR, ignore_regex) reg_exp = loganalyzer.parse_regexp_file(src=ignore_file) loganalyzer.ignore_regex.extend(reg_exp) if expect_regex: loganalyzer.expect_regex = [] loganalyzer.expect_regex.extend(expect_regex) loganalyzer.match_regex = [] with loganalyzer(fail=not expect_errors): cmd = "sonic-cfggen -j {}/{}.json --write-to-db".format( DUT_RUN_DIR, syslog_marker) out = duthost.command(cmd) pytest_assert( out["rc"] == 0, "Failed to execute cmd {}: Error: {}".format( cmd, out["stderr"]))
def test_check_sfp_status_and_configure_sfp(testbed_devices, conn_graph_facts): """ @summary: Check SFP status and configure SFP This case is to use the sfputil tool and show command to check SFP status and configure SFP. Currently the only configuration is to reset SFP. Commands to be tested: * sfputil show presence * show interface transceiver presence * sfputil show eeprom * show interface transceiver eeprom * sfputil reset <interface name> """ ans_host = testbed_devices["dut"] if ans_host.facts["asic_type"] in ["mellanox"]: loganalyzer = LogAnalyzer(ansible_host=ans_host, marker_prefix='sfp_cfg') loganalyzer.load_common_config() loganalyzer.ignore_regex.append("kernel.*Eeprom query failed*") marker = loganalyzer.init() cmd_sfp_presence = "sudo sfputil show presence" cmd_sfp_eeprom = "sudo sfputil show eeprom" cmd_sfp_reset = "sudo sfputil reset" cmd_xcvr_presence = "show interface transceiver presence" cmd_xcvr_eeprom = "show interface transceiver eeprom" portmap = get_port_map(testbed_devices) logging.info("Got portmap {}".format(portmap)) logging.info("Check output of '%s'" % cmd_sfp_presence) sfp_presence = ans_host.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check output of '%s'" % cmd_xcvr_presence) xcvr_presence = ans_host.command(cmd_xcvr_presence) parsed_presence = parse_output(xcvr_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_xcvr_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check output of '%s'" % cmd_sfp_eeprom) sfp_eeprom = ans_host.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" assert parsed_eeprom[intf] == "SFP EEPROM detected" logging.info("Check output of '%s'" % cmd_xcvr_eeprom) xcvr_eeprom = ans_host.command(cmd_xcvr_eeprom) parsed_eeprom = parse_eeprom(xcvr_eeprom["stdout_lines"]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_eeprom, "Interface is not in output of '%s'" % cmd_xcvr_eeprom assert parsed_eeprom[intf] == "SFP EEPROM detected" logging.info("Test '%s <interface name>'" % cmd_sfp_reset) tested_physical_ports = set() for intf in conn_graph_facts["device_conn"]: phy_intf = portmap[intf][0] if phy_intf in tested_physical_ports: logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) continue tested_physical_ports.add(phy_intf) logging.info("resetting {} physical interface {}".format(intf, phy_intf)) reset_result = ans_host.command("%s %s" % (cmd_sfp_reset, intf)) assert reset_result["rc"] == 0, "'%s %s' failed" % (cmd_sfp_reset, intf) time.sleep(5) logging.info("Wait some time for SFP to fully recover after reset") time.sleep(60) logging.info("Check sfp presence again after reset") sfp_presence = ans_host.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") mg_facts = ans_host.minigraph_facts(host=ans_host.hostname)["ansible_facts"] intf_facts = ans_host.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"]) if ans_host.facts["asic_type"] in ["mellanox"]: loganalyzer.analyze(marker)
def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): """ @summary: Check SFP low power mode This case is to use the sfputil tool command to check and set SFP low power mode * sfputil show lpmode * sfputil lpmode off * sfputil lpmode on """ ans_host = testbed_devices["dut"] if ans_host.facts["asic_type"] in ["mellanox"]: loganalyzer = LogAnalyzer(ansible_host=ans_host, marker_prefix='sfp_lpm') loganalyzer.load_common_config() loganalyzer.ignore_regex.append("Eeprom query failed") marker = loganalyzer.init() cmd_sfp_presence = "sudo sfputil show presence" cmd_sfp_show_lpmode = "sudo sfputil show lpmode" cmd_sfp_set_lpmode = "sudo sfputil lpmode" portmap = get_port_map(testbed_devices) logging.info("Got portmap {}".format(portmap)) logging.info("Check output of '%s'" % cmd_sfp_show_lpmode) lpmode_show = ans_host.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) original_lpmode = copy.deepcopy(parsed_lpmode) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Try to change SFP lpmode") tested_physical_ports = set() for intf in conn_graph_facts["device_conn"]: phy_intf = portmap[intf][0] if phy_intf in tested_physical_ports: logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) continue tested_physical_ports.add(phy_intf) logging.info("setting {} physical interface {}".format(intf, phy_intf)) new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on" lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) logging.info("Check SFP lower power mode again after changing SFP lpmode") lpmode_show = ans_host.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Try to change SFP lpmode") tested_physical_ports = set() for intf in conn_graph_facts["device_conn"]: phy_intf = portmap[intf][0] if phy_intf in tested_physical_ports: logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) continue tested_physical_ports.add(phy_intf) logging.info("restoring {} physical interface {}".format(intf, phy_intf)) new_lpmode = original_lpmode[intf].lower() lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) logging.info("Check SFP lower power mode again after changing SFP lpmode") lpmode_show = ans_host.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Check sfp presence again after setting lpmode") sfp_presence = ans_host.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") mg_facts = ans_host.minigraph_facts(host=ans_host.hostname)["ansible_facts"] intf_facts = ans_host.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"]) if ans_host.facts["asic_type"] in ["mellanox"]: loganalyzer.analyze(marker)
def test_turn_on_off_psu_and_check_psustatus(duthost, psu_controller): """ @summary: Turn off/on PSU and check PSU status using 'show platform psustatus' """ loganalyzer = LogAnalyzer( ansible_host=duthost, marker_prefix='turn_on_off_psu_and_check_psustatus') loganalyzer.load_common_config() loganalyzer.ignore_regex.append( "Error getting sensor data: dps460.*Kernel interface error") marker = loganalyzer.init() psu_line_pattern = re.compile(r"PSU\s+\d+\s+(OK|NOT OK|NOT PRESENT)") psu_num = get_psu_num(duthost) if psu_num < 2: pytest.skip( "At least 2 PSUs required for rest of the testing in this case") logging.info("Create PSU controller for testing") psu_ctrl = psu_controller if psu_ctrl is None: pytest.skip( "No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) logging.info( "To avoid DUT being shutdown, need to turn on PSUs that are not powered" ) turn_all_psu_on(psu_ctrl) logging.info("Initialize test results") psu_test_results = {} if not check_all_psu_on(duthost, psu_test_results): pytest.skip( "Some PSU are still down, skip rest of the testing in this case") assert len(psu_test_results.keys()) == psu_num, \ "In consistent PSU number output by '%s' and '%s'" % (CMD_PLATFORM_PSUSTATUS, cmd_num_psu) logging.info("Start testing turn off/on PSUs") all_psu_status = psu_ctrl.get_psu_status() for psu in all_psu_status: psu_under_test = None logging.info("Turn off PSU %s" % str(psu["psu_id"])) psu_ctrl.turn_off_psu(psu["psu_id"]) time.sleep(5) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: assert psu_line_pattern.match(line), "Unexpected PSU status output" fields = line.split() if fields[2] != "OK": psu_under_test = fields[1] check_vendor_specific_psustatus(duthost, line) assert psu_under_test is not None, "No PSU is turned off" logging.info("Turn on PSU %s" % str(psu["psu_id"])) psu_ctrl.turn_on_psu(psu["psu_id"]) time.sleep(5) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: assert psu_line_pattern.match(line), "Unexpected PSU status output" fields = line.split() if fields[1] == psu_under_test: assert fields[ 2] == "OK", "Unexpected PSU status after turned it on" check_vendor_specific_psustatus(duthost, line) psu_test_results[psu_under_test] = True for psu in psu_test_results: assert psu_test_results[psu], "Test psu status of PSU %s failed" % psu loganalyzer.analyze(marker)