def __log_stop_campaign(self, msg, tc_order=None): LOGGER_FWK_STATS.info("event=STOP_ON_{0}".format(msg.replace(" ", "_").upper())) msg = "CAMPAIGN STOPPED ON %s !" % str(msg).upper() self.__logger.info("") self.__logger.info(msg) self.__logger.info("") if tc_order is not None: self.__test_report.add_comment(tc_order, msg)
def _log_acs_param_extra(self, acs_params): """ Log all ACS input parameters """ params = self.__log_acs_param(acs_params, False) if platform.system() == "Windows": release = platform.release() elif platform.dist(): release = "{0}_{1}".format(platform.dist()[0], platform.dist()[1]) params += "; os={0}_{1}".format(platform.system(), release) params += "; python_vers={0}_{1}".format(platform.python_version(), platform.architecture()[0]) params += "; hostname={0}".format(socket.getfqdn()) params += "; version={0}".format(Util.get_acs_release_version()) user_home = os.path.split(os.path.expanduser('~')) if user_home: params += "; user={0}".format(user_home[-1]) LOGGER_FWK_STATS.info("event=START; {0}".format(params))
def _display_campaign_metrics(self, campaign_metrics): """ Displays Campaign metrics information to Info log level :type campaign_metrics: CampaignMetrics :param campaign_metrics: campaign metrics from where to get the information :return: None """ if self.__logger is not None: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "---EXECUTION METRICS---") self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Tests Number = {0}".format(campaign_metrics.total_tc_count)) if campaign_metrics.pass_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Passed = {0}".format( campaign_metrics.pass_verdict_count)) if campaign_metrics.fail_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Failed = {0}".format( campaign_metrics.fail_verdict_count)) if campaign_metrics.blocked_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Blocked = {0}".format( campaign_metrics.blocked_verdict_count)) if campaign_metrics.valid_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Valid = {0}".format( campaign_metrics.valid_verdict_count)) if campaign_metrics.invalid_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Invalid = {0}".format( campaign_metrics.invalid_verdict_count)) if campaign_metrics.inconclusive_verdict_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Inconclusive = {0}".format( campaign_metrics.inconclusive_verdict_count)) if campaign_metrics.tc_not_executed_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Not Executed = {0}".format( campaign_metrics.tc_not_executed_count)) if campaign_metrics.total_boot_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "DUT Boot(s) = {0}".format( int(campaign_metrics.total_boot_count))) if campaign_metrics.unexpected_reboot_count: self.__logger.log(ACSLogging.MINIMAL_LEVEL, "DUT Unexpected Reboots = {0}".format( int(campaign_metrics.unexpected_reboot_count))) time_delta = datetime.now() - campaign_metrics.campaign_start_datetime if hasattr(time_delta, "total_seconds"): campaign_duration = time_delta.total_seconds() else: campaign_duration = ( (time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10 ** 6) / 10 ** 6) run_time = "%.2d:%.2d:%.2d" % (campaign_duration // 3600, (campaign_duration // 60) % 60, campaign_duration % 60) self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Exec Time = {0}".format(run_time)) self.__logger.log(ACSLogging.MINIMAL_LEVEL, "Local Report = {0}".format( self._get_campaign_report_filename_path())) event_msg = ["event=STOP", "test_number={0}".format(campaign_metrics.total_tc_count), "test_passed={0}".format(campaign_metrics.pass_verdict_count), "test_failed={0}".format(campaign_metrics.fail_verdict_count), "test_blocked={0}".format(campaign_metrics.blocked_verdict_count), "test_valid={0}".format(campaign_metrics.valid_verdict_count), "test_invalid={0}".format(campaign_metrics.invalid_verdict_count), "test_inconclusive={0}".format(campaign_metrics.inconclusive_verdict_count), "test_na={0}".format(campaign_metrics.tc_not_executed_count), "test_time={0}".format(run_time)] LOGGER_FWK_STATS.info(";".join(event_msg))
def execute(self, is_arg_checking=True, **kwargs): """ This function is the entry point of ACS solution when called by Test Runner. It parses the arguments given to CampaignEngine, parses XML files associated & read the campaign content for the TestCaseManager to execute. :param is_arg_checking: Whether or not ACS arguments are checked :type is_arg_checking: bool :param kwargs: ACS arguments :type kwargs: dict """ error = None global_results = Util.ACSResult(verdict=Util.ExitCode.FAILURE) execution_iteration = 1 # Index of test case inside loop on campaign tc_order = 1 stop_execution = False verdicts = {} acs_outcome_verdicts = {} acs_outcome_status = False self.__campaign_metrics.campaign_start_datetime = datetime.now() try: arg_checker = ArgChecker(**kwargs) if is_arg_checking: error = arg_checker.check_args(False) if error: raise AcsBaseException("INVALID_PARAMETER", error) params = arg_checker.args campaign_name = params["campaign_name"] params["campaign_relative_path"] = os.path.dirname(campaign_name) execution_request_nb = params["execution_request_nb"] random_mode = params["random_mode"] device_parameters = params["device_parameter_list"] Paths.FLASH_FILES = params["flash_file_path"] # Log acs param self.__log_acs_param(params) # Check if device parameters is a list if not isinstance(device_parameters, list): device_parameters = [] # Set test campaign status : campaign is in setup phase global_results.status = Util.Status.INIT setup_status = self._setup(**params) # setup successfully completed if setup_status is None: total_tc_to_execute = execution_request_nb * len(self.__test_case_conf_list) if total_tc_to_execute > MAX_TC_NB_AUTHORIZED: self.__logger.warning("Total number of TCs ({0}) exceeds maximum number authorized ({1})." .format(total_tc_to_execute, MAX_TC_NB_AUTHORIZED)) self.__logger.warning("Only first {0} TCs will be executed".format(MAX_TC_NB_AUTHORIZED)) total_tc_to_execute = MAX_TC_NB_AUTHORIZED self.__campaign_metrics.total_tc_count = total_tc_to_execute # Send live report if enabled self._send_create_testcase_info(execution_request_nb) # Log extra acs param for metrics self._log_acs_param_extra(params) # Execute test cases of campaign # Set test campaign status : campaign is starting global_results.status = Util.Status.ONGOING while execution_iteration <= execution_request_nb and not stop_execution: stop_execution, tc_order = self._execute_test_cases(verdicts, tc_order, acs_outcome_verdicts) execution_iteration += 1 if random_mode: self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list) if tc_order > MAX_TC_NB_AUTHORIZED: break if not stop_execution: LOGGER_FWK_STATS.info("event=STOP_ON_EOC") # Set test campaign status : campaign is completed global_results.status = Util.Status.COMPLETED else: # Set test campaign status : campaign has been interrupted during test suite execution global_results.status = Util.Status.ABORTED # Exception occurred during setup else: self.__log_stop_campaign(setup_status) # Set test campaign status global_results.status = Util.Status.ABORTED (status, acs_outcome_status) = self._all_tests_succeed(verdicts, acs_outcome_verdicts) if status: global_results.verdict = Util.ExitCode.SUCCESS except (KeyboardInterrupt): LOGGER_FWK_STATS.info("event=STOP_ON_USER_INTERRUPT") self.__log_stop_campaign("USER INTERRUPTION") # Set test campaign status global_results.status = Util.Status.ABORTED except (SystemExit): LOGGER_FWK_STATS.info("event=STOP_ON_SYSTEM INTERRUPT") self.__log_stop_campaign("SYSTEM INTERRUPTION") # Set test campaign status global_results.status = Util.Status.ABORTED except Exception as exception: if isinstance(exception, AcsBaseException): error = str(exception) LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(error)) if self.__logger is not None: self.__logger.error(error) else: print(error) else: ex_code, ex_msg, ex_tb = Util.get_exception_info(exception) LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(ex_msg)) if self.__logger is not None: self.__logger.error(ex_msg) self.__logger.debug("Traceback: {0}".format(ex_tb)) self.__logger.debug("return code is {0}".format(ex_code)) else: print (ex_msg) print ("Traceback: {0}".format(ex_tb)) print ("return code is {0}".format(ex_code)) # add an explicit message in the last executed TC's comment if self.__test_report is not None: self.__test_report.add_comment(tc_order, str(exception)) self.__test_report.add_comment(tc_order, ("Fatal exception : Test Campaign will be stopped. " "See log file for more information.")) # Set test campaign status global_results.status = Util.Status.ABORTED finally: # Sending Campaign Stop info to remote server (for Live Reporting control) self._live_reporting_interface.send_stop_campaign_info(verdict=global_results.verdict, status=global_results.status) if self.__test_case_manager is not None: campaign_error = bool(global_results.verdict) try: cleanup_status, global_results.dut_state = self.__test_case_manager.cleanup(campaign_error) except AcsBaseException as e: cleanup_status = False global_results.dut_state = Util.DeviceState.UNKNOWN error = str(e) if self.__logger is not None: if error: self.__logger.error(error) self.__logger.info("FINAL DEVICE STATE : %s" % (global_results.dut_state,)) else: if error: print error print ("FINAL DEVICE STATE : %s" % (global_results.dut_state,)) else: cleanup_status = True if not cleanup_status: global_results.verdict = Util.ExitCode.FAILURE for verdict in verdicts: if not Util.Verdict.is_pass(verdicts[verdict]): tc_name = str(verdict).split(self.VERDICT_SEPARATOR)[0] tc_verdict = verdicts[verdict] msg = "ISSUE: %s=%s\n" % (tc_name, tc_verdict) sys.stderr.write(msg) # Wait for last LiveReporting action requests self._live_reporting_interface.wait_for_finish() if self.__test_report: # write data in report files self.__write_report_info() # update the metacampaign result id in xml report file # this action is done at the end because the connection retry with live reporting server will done # throughout campaign execution self.__test_report.write_metacampaign_result_id(self._live_reporting_interface.campaign_id) if self.campaign_report_path is not None: # Archive test campaign XML report self.__logger.info("Archive test campaign report...") # Compute checksum _, archive_file = zip_folder(self.campaign_report_path, self.campaign_report_path) self._live_reporting_interface.send_campaign_resource(archive_file) # Display campaign metrics information to the user self._display_campaign_metrics(self.__campaign_metrics) # Close logger ACSLogging.close() if acs_outcome_status and cleanup_status: global_results.verdict = Util.ExitCode.SUCCESS else: global_results.verdict = Util.ExitCode.FAILURE return global_results
def _create_test_step_instance(self, pars, ts_name=None): """ Create test step instance given its class name :type pars: dict :param pars: the test step's parameters :type ts_name: str :param ts_name: the test step's parent name :rtype: :py:class:`~acs.Core.TestStep.TestStepBase` :return: the new instance of the test step. """ teststep_instance = None exception_code = None exception_msg = "" # Gets the test name and its class name teststep_name = pars.get(TestStepConstants.STR_TS_ID, "") if not teststep_name: raise AcsConfigException( AcsConfigException.INSTANTIATION_ERROR, "'Id' attribute is mandatory to identify the test step in the test step catalogs." ) if teststep_name in self._teststep_dictionary: try: LOGGER_FWK_STATS.info( "Create test_step={0}".format(teststep_name)) cls_name = self._teststep_dictionary[teststep_name][ "ClassName"] teststep_instance = get_class(cls_name)(self._conf, self._global_conf, pars, self._factory) teststep_instance.name = "{0}.{1}".format( ts_name, teststep_name) if ts_name else teststep_name except KeyError: exception_code = AcsConfigException.INVALID_PARAMETER exception_msg = "Unable to find class name of '{0}' test step.".format( teststep_name) exception_msg += " Check that the 'ClassName' of the test step, is not empty in the test step catalogs." except Exception as generic_exception: exception_code = AcsConfigException.INSTANTIATION_ERROR exception_msg = "Unable to instantiate '{0}' test step.".format( teststep_name) exception_msg += " Following error occurred : {0}".format( generic_exception) except AcsBaseException: raise else: exception_code = AcsConfigException.INVALID_PARAMETER exception_msg = \ "Unable to find '{0}' test step in any test step catalogs (official or external).".format(teststep_name) exception_msg += " Check that it is declared in the test step catalogs" if teststep_instance: teststep_instance.call_by_engine() else: raise AcsConfigException(exception_code, exception_msg) return teststep_instance