def parse_equipment_catalog(self): """ This function parses the equipment catalog XML file into a dictionary. """ # Instantiate empty dictionaries eqt_type_dic = {} # Get the xml doc equipment_catalog_path = os.path.join(self._equipment_catalog_path, self._equipment_catalog_name) if not os.path.isfile(equipment_catalog_path): error_msg = "Equipment catalog file : %s does not exist" % equipment_catalog_path raise AcsConfigException(AcsConfigException.FILE_NOT_FOUND, error_msg) try: equipment_catalog_doc = et.parse(equipment_catalog_path) except et.XMLSyntaxError: _, error_msg, _ = Utils.get_exception_info() error_msg = "{}; {}".format(equipment_catalog_path, error_msg) raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR, error_msg) root_node = equipment_catalog_doc.xpath('/Equipment_Catalog') if not root_node: raise AcsConfigException(AcsConfigException.FILE_NOT_FOUND, "Wrong XML: could not find expected document root node: " "'Equipment_Catalog'") # Parse EquipmentTypes list_eq_types = root_node[0].xpath('./EquipmentType') for eq_type in list_eq_types: eqt_type_dic.update(self._load_equipment_type(eq_type)) self._global_config.equipmentCatalog = eqt_type_dic.copy()
def _load_bench_conf(self): """ Loads The Bench Configuration from its XML representation into a dict """ self._bench_conf = AttributeDict() # New way of defining device parameters == as defined in the device catalog try: self._bench_tree_node = etree.parse(self.bench_conf_filename) except XMLParsingErrors: _error("Corrupted file {0}: {1}".format( self.bench_conf_filename, Utils.get_exception_info()[1])) node = self.bench_device_node if node is not None and len(node) > 0: # To load proper device config, need to get device model name device_conf = self.device_conf if device_conf: self._device_model_name = device_conf.get("DeviceModel") if self._device_model_name in ('', "", None, 'multi'): self._device_model_name = node[0].get("deviceModel") conf = self._parse_bench_node(node[0]) if conf: self._bench_conf.update(conf)
def _parse_campaign_file_path(self, campaign_file_path): # XML parser parses the campaign config file try: return et.parse(campaign_file_path) except et.XMLSyntaxError: _, error_msg, _ = Utils.get_exception_info() self._logger.error( "Campaign file is corrupted : parsing-reading issue in {0}". format(campaign_file_path)) raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR, error_msg)
def _load_device_conf(self): """ Loads The Device Configuration from its XML representation into a dict """ try: self._device_tree_node = etree.parse(self.device_conf_filename) except XMLParsingErrors: _error("Corrupted file {0}: {1}".format( self.device_conf_filename, Utils.get_exception_info()[1])) self._parse_device_node() self._device_schema = self.extract_schema( self.device_root_node, schema_folder=Paths.FWK_DEVICE_MODELS_CATALOG, file_only=False)
def execute(self, is_arg_checking=True, **kwargs): """ This function is the entry point of ACS solution when called by Test Runner. It parses the arguments given to CampaignEngine, parses XML files associated & read the campaign content for the TestCaseManager to execute. :param is_arg_checking: Whether or not ACS arguments are checked :type is_arg_checking: bool :param kwargs: ACS arguments :type kwargs: dict """ error = None global_results = Util.ACSResult(verdict=Util.ExitCode.FAILURE) execution_iteration = 1 # Index of test case inside loop on campaign tc_order = 1 stop_execution = False verdicts = {} acs_outcome_verdicts = {} acs_outcome_status = False self.__campaign_metrics.campaign_start_datetime = datetime.now() try: arg_checker = ArgChecker(**kwargs) if is_arg_checking: error = arg_checker.check_args(False) if error: raise AcsBaseException("INVALID_PARAMETER", error) params = arg_checker.args campaign_name = params["campaign_name"] params["campaign_relative_path"] = os.path.dirname(campaign_name) execution_request_nb = params["execution_request_nb"] random_mode = params["random_mode"] device_parameters = params["device_parameter_list"] Paths.FLASH_FILES = params["flash_file_path"] # Log acs param self.__log_acs_param(params) # Check if device parameters is a list if not isinstance(device_parameters, list): device_parameters = [] # Set test campaign status : campaign is in setup phase global_results.status = Util.Status.INIT setup_status = self._setup(**params) # setup successfully completed if setup_status is None: total_tc_to_execute = execution_request_nb * len(self.__test_case_conf_list) if total_tc_to_execute > MAX_TC_NB_AUTHORIZED: self.__logger.warning("Total number of TCs ({0}) exceeds maximum number authorized ({1})." .format(total_tc_to_execute, MAX_TC_NB_AUTHORIZED)) self.__logger.warning("Only first {0} TCs will be executed".format(MAX_TC_NB_AUTHORIZED)) total_tc_to_execute = MAX_TC_NB_AUTHORIZED self.__campaign_metrics.total_tc_count = total_tc_to_execute # Send live report if enabled self._send_create_testcase_info(execution_request_nb) # Log extra acs param for metrics self._log_acs_param_extra(params) # Execute test cases of campaign # Set test campaign status : campaign is starting global_results.status = Util.Status.ONGOING while execution_iteration <= execution_request_nb and not stop_execution: stop_execution, tc_order = self._execute_test_cases(verdicts, tc_order, acs_outcome_verdicts) execution_iteration += 1 if random_mode: self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list) if tc_order > MAX_TC_NB_AUTHORIZED: break if not stop_execution: LOGGER_FWK_STATS.info("event=STOP_ON_EOC") # Set test campaign status : campaign is completed global_results.status = Util.Status.COMPLETED else: # Set test campaign status : campaign has been interrupted during test suite execution global_results.status = Util.Status.ABORTED # Exception occurred during setup else: self.__log_stop_campaign(setup_status) # Set test campaign status global_results.status = Util.Status.ABORTED (status, acs_outcome_status) = self._all_tests_succeed(verdicts, acs_outcome_verdicts) if status: global_results.verdict = Util.ExitCode.SUCCESS except (KeyboardInterrupt): LOGGER_FWK_STATS.info("event=STOP_ON_USER_INTERRUPT") self.__log_stop_campaign("USER INTERRUPTION") # Set test campaign status global_results.status = Util.Status.ABORTED except (SystemExit): LOGGER_FWK_STATS.info("event=STOP_ON_SYSTEM INTERRUPT") self.__log_stop_campaign("SYSTEM INTERRUPTION") # Set test campaign status global_results.status = Util.Status.ABORTED except Exception as exception: if isinstance(exception, AcsBaseException): error = str(exception) LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(error)) if self.__logger is not None: self.__logger.error(error) else: print(error) else: ex_code, ex_msg, ex_tb = Util.get_exception_info(exception) LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(ex_msg)) if self.__logger is not None: self.__logger.error(ex_msg) self.__logger.debug("Traceback: {0}".format(ex_tb)) self.__logger.debug("return code is {0}".format(ex_code)) else: print (ex_msg) print ("Traceback: {0}".format(ex_tb)) print ("return code is {0}".format(ex_code)) # add an explicit message in the last executed TC's comment if self.__test_report is not None: self.__test_report.add_comment(tc_order, str(exception)) self.__test_report.add_comment(tc_order, ("Fatal exception : Test Campaign will be stopped. " "See log file for more information.")) # Set test campaign status global_results.status = Util.Status.ABORTED finally: # Sending Campaign Stop info to remote server (for Live Reporting control) self._live_reporting_interface.send_stop_campaign_info(verdict=global_results.verdict, status=global_results.status) if self.__test_case_manager is not None: campaign_error = bool(global_results.verdict) try: cleanup_status, global_results.dut_state = self.__test_case_manager.cleanup(campaign_error) except AcsBaseException as e: cleanup_status = False global_results.dut_state = Util.DeviceState.UNKNOWN error = str(e) if self.__logger is not None: if error: self.__logger.error(error) self.__logger.info("FINAL DEVICE STATE : %s" % (global_results.dut_state,)) else: if error: print error print ("FINAL DEVICE STATE : %s" % (global_results.dut_state,)) else: cleanup_status = True if not cleanup_status: global_results.verdict = Util.ExitCode.FAILURE for verdict in verdicts: if not Util.Verdict.is_pass(verdicts[verdict]): tc_name = str(verdict).split(self.VERDICT_SEPARATOR)[0] tc_verdict = verdicts[verdict] msg = "ISSUE: %s=%s\n" % (tc_name, tc_verdict) sys.stderr.write(msg) # Wait for last LiveReporting action requests self._live_reporting_interface.wait_for_finish() if self.__test_report: # write data in report files self.__write_report_info() # update the metacampaign result id in xml report file # this action is done at the end because the connection retry with live reporting server will done # throughout campaign execution self.__test_report.write_metacampaign_result_id(self._live_reporting_interface.campaign_id) if self.campaign_report_path is not None: # Archive test campaign XML report self.__logger.info("Archive test campaign report...") # Compute checksum _, archive_file = zip_folder(self.campaign_report_path, self.campaign_report_path) self._live_reporting_interface.send_campaign_resource(archive_file) # Display campaign metrics information to the user self._display_campaign_metrics(self.__campaign_metrics) # Close logger ACSLogging.close() if acs_outcome_status and cleanup_status: global_results.verdict = Util.ExitCode.SUCCESS else: global_results.verdict = Util.ExitCode.FAILURE return global_results
def _extract_test_cases(self, campaign_config_doc, parent_campaign_list, group_id): """ This function creates a set of test cases objects from parsing and returns the test cases list + the subcampaign list :type Document :param campaign_config_doc a Etree :param group_id a reference number for test group list inside test case list :type list :return test_cases a TestCaseConf list + subcampaigns a CampaignConf list """ def create_test_case_element(node, last_parent, random=False, group_id=None): parse_status = True tcelement = TestCaseConf(node, random, group_id) error_msg = None try: tcelement = self._unify_path(tcelement, last_parent) except AcsConfigException as ex_msg: error_msg = "Error while reading-parsing TC item in " + \ str(last_parent) + " file => ignore TestCase item (exception = " + \ str(ex_msg) + ")" self._logger.warning(error_msg) self._logger.error( "Test case not found, it will not be executed") parse_status = False try: tcelement = self._load_usecase_of_testcase(tcelement) except AcsConfigException as ex_msg: error_msg = "Error while reading-parsing TC item in " + \ str(last_parent) + " file => ignore TestCase item (exception = " + \ str(ex_msg) + ")" self._logger.warning(error_msg) self._logger.error("Test case based on unknown usecase (%s)," % (tcelement.get_name(), ) + " it will not be executed") parse_status = False tcelement.add_message(error_msg) tcelement.set_valid(parse_status) return tcelement # Get the list of Test cases to execute test_case_list = [] sub_campaign_list = [] tcs_node = campaign_config_doc.xpath('//TestCases') if not tcs_node: # Inform the user that the campaign config template is bad error_msg = "Error while reading-parsing campaign item " + \ str(parent_campaign_list[-1]) + " file => no <TestCases> ... </TestCases> node found " self._logger.warning(error_msg) tc_nodes = campaign_config_doc.xpath('//TestCases/*') if tc_nodes: for node in tc_nodes: last_parent = parent_campaign_list[ -1] if parent_campaign_list else "" if node.tag == "TestCase": tcelement = create_test_case_element(node, last_parent) if tcelement is not None: test_case_list.append(tcelement) elif node.tag == "RANDOM": for subnode in node: if True: # subnode.nodeType == subnode.ELEMENT_NODE: if subnode.tag == "TestCase": tcelement = create_test_case_element( subnode, last_parent, random=True) if tcelement is not None: test_case_list.append(tcelement) elif subnode.tag == "GROUP": group_id += 1 for group_node in subnode: if group_node.tag == "TestCase": tcelement = create_test_case_element( group_node, last_parent, random=True, group_id=group_id) if tcelement is not None: test_case_list.append(tcelement) elif node.tag == "SubCampaign": # Parse sub campaign config and check arguments # Check also that we do not fall into infinite loop by calling again a parent campaign name try: sub_campaign_config = CampaignConf( node, parent_campaign_list) # unify path according to the closest parent campaign sub_campaign_config = self._unify_path( sub_campaign_config, last_parent) sub_campaign_config.check_campaign_sanity() except AcsConfigException as ex_msg: error_msg = "Error while reading-parsing campaign item in " + \ str(last_parent) + " file => ignore SubCampaign item (exception = " + \ str(ex_msg) + ")" self._logger.warning(error_msg) continue # Compose relative file path for sub campaign config file sub_campaign_file_path = sub_campaign_config.get_name() campaign_path = os.path.join( Paths.EXECUTION_CONFIG, sub_campaign_file_path + self._file_extension) if not os.path.isfile(campaign_path): error_msg = "Campaign file not found %s !" % ( campaign_path, ) raise AcsConfigException( AcsConfigException.FILE_NOT_FOUND, error_msg) try: sub_campaign_config_doc = et.parse( os.path.abspath(campaign_path)) except et.XMLSyntaxError: _, error_msg, _ = Utils.get_exception_info() raise AcsConfigException( AcsConfigException.XML_PARSING_ERROR, error_msg) # Parse of the Sub Campaign node is OK # in parent campaign file + Parse of the file sub Campaign is OK # add sub campaign item to sub campaign list # (for debug purpose - configuration file copy in AWR) sub_campaign_list.append(sub_campaign_config) # After parsing sub Campaign node, we shall update the sub campaign parent campaign list parent_sub_campaign_list = sub_campaign_config.get_parent_campaign_list( )[:] parent_sub_campaign_list.append(sub_campaign_file_path) # we call a sub campaign, parse it by a recursive call to _extract_test_cases() method try: test_case_subset_list, sub_campaign_subset_list = self._extract_test_cases( sub_campaign_config_doc, parent_sub_campaign_list, group_id) except Exception: # pylint: disable=W0703 _, error_msg, _ = Utils.get_exception_info() self._logger.warning(error_msg) continue # Repeat test case subset list runNumber of time in the current test case list if test_case_subset_list: exec_number = int(sub_campaign_config.get_run_number()) test_case_list.extend(test_case_subset_list * exec_number) # add sub campaign subset list to sub campaign list (for debug purpose - # configuration file copy in AWR) if sub_campaign_subset_list: sub_campaign_list.extend(sub_campaign_subset_list) else: # other case of parsing error continue the campaign execution error_msg = "Error while reading-parsing campaign item in " + \ str(last_parent) + " file => node <" + node.tag + \ "> is not parsed according to campaign config template " self._logger.warning(error_msg) else: # Inform the user that the campaign config template is bad error_msg = "Campaign item " + str( parent_campaign_list[-1]) + " is empty" self._logger.warning(error_msg) return test_case_list, sub_campaign_list
def parse_bench_config(self): """ This function parses the bench config XML file into a dictionary. """ def __parse_node(node): """ This private function parse a node from bench_config parsing. :rtype: dict :return: Data stocked into a dictionnary. """ dico = {} name = node.get('name', "") if name: # store all keys (except 'name')/value in a dict for key in [x for x in node.attrib if x != "name"]: dico[key] = node.attrib[key] node_list = node.xpath('./*') if node_list: for node_item in node_list: name = node_item.get('name', "") if name: dico[name] = __parse_node(node_item) return dico def __parse_bench_config(document): """ Last version of function parsing bench_config adapted for Multiphone. :type document: object :param document: xml document parsed by etree :rtype: dict :return: Data stocked into a dictionary. """ # parse bench_config (dom method) bench_config = {} node_list = document.xpath('/BenchConfig/*/*') for node in node_list: name = node.get('name', "") if name: bench_config[name] = __parse_node(node) return bench_config # body of the parse_bench_config() function. if not os.path.isfile(self._bench_config_name): error_msg = "Bench config file : %s does not exist" % self._bench_config_name raise AcsConfigException(AcsConfigException.FILE_NOT_FOUND, error_msg) try: document = et.parse(self._bench_config_name) except et.XMLSyntaxError: _, error_msg, _ = Utils.get_exception_info() error_msg = "{}; {}".format(self._bench_config_name, error_msg) raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR, error_msg) result = __parse_bench_config(document) bench_config_parameters = Utils.BenchConfigParameters(dictionnary=result, bench_config_file=self._bench_config_name) return bench_config_parameters