def __init__(self, report_folder=None):

        ACSLogging.initialize()

        self.__test_case_manager = None
        self.__file_parsing_manager = None
        self.__test_case_conf_list = []
        self.__logger = LOGGER_FWK
        self.__global_config = None
        self.__test_report = None
        self.__debug_report = None
        self.__sub_campaigns_list = []

        # Local Files
        self.__equipment_catalog = "Equipment_Catalog"

        # Local paths
        self.__campaign_report_tree = None
        self._campaign_report_path = report_folder

        self._campaign_elements = {}
        self._live_reporting_interface = LiveReporting.instance()
        self.__campaign_metrics = CampaignMetrics.instance()
        self.__stop_on_critical_failure = False
        self.__stop_on_first_failure = False
        self._credentials = ""
 def __init_logger(self, hw_variant_name, serial_number, campaign_report_path, session_id):
     # Initialize the logger
     log_file_name = '{0}_{1}{2}.log'.format(Util.get_timestamp(), hw_variant_name, str(serial_number))
     logfile = os.path.join(campaign_report_path, log_file_name)
     Files.acs_output_name = logfile[:-4]
     ACSLogging.set_session_id(session_id)
     ACSLogging.set_output_path(logfile)
    def execute(self, is_arg_checking=True, **kwargs):
        """
            This function is the entry point of ACS solution when called by Test Runner.
            It parses the arguments given to CampaignEngine,
            parses XML files associated & read the campaign content
            for the TestCaseManager to execute.

            :param is_arg_checking: Whether or not ACS arguments are checked
            :type is_arg_checking: bool

            :param kwargs: ACS arguments
            :type kwargs: dict

        """

        error = None
        global_results = Util.ACSResult(verdict=Util.ExitCode.FAILURE)
        execution_iteration = 1
        # Index of test case inside  loop on campaign
        tc_order = 1
        stop_execution = False
        verdicts = {}
        acs_outcome_verdicts = {}
        acs_outcome_status = False
        self.__campaign_metrics.campaign_start_datetime = datetime.now()

        try:

            arg_checker = ArgChecker(**kwargs)

            if is_arg_checking:
                error = arg_checker.check_args(False)
                if error:
                    raise AcsBaseException("INVALID_PARAMETER", error)

            params = arg_checker.args

            campaign_name = params["campaign_name"]
            params["campaign_relative_path"] = os.path.dirname(campaign_name)
            execution_request_nb = params["execution_request_nb"]
            random_mode = params["random_mode"]
            device_parameters = params["device_parameter_list"]
            Paths.FLASH_FILES = params["flash_file_path"]

            # Log acs param
            self.__log_acs_param(params)

            # Check if device parameters is a list
            if not isinstance(device_parameters, list):
                device_parameters = []

            # Set test campaign status : campaign is in setup phase
            global_results.status = Util.Status.INIT
            setup_status = self._setup(**params)
            # setup successfully completed
            if setup_status is None:
                total_tc_to_execute = execution_request_nb * len(self.__test_case_conf_list)
                if total_tc_to_execute > MAX_TC_NB_AUTHORIZED:
                    self.__logger.warning("Total number of TCs ({0}) exceeds maximum number authorized ({1})."
                                          .format(total_tc_to_execute, MAX_TC_NB_AUTHORIZED))
                    self.__logger.warning("Only first {0} TCs will be executed".format(MAX_TC_NB_AUTHORIZED))
                    total_tc_to_execute = MAX_TC_NB_AUTHORIZED

                self.__campaign_metrics.total_tc_count = total_tc_to_execute
                # Send live report if enabled
                self._send_create_testcase_info(execution_request_nb)
                # Log extra acs param for metrics
                self._log_acs_param_extra(params)

                # Execute test cases of campaign
                # Set test campaign status : campaign is starting
                global_results.status = Util.Status.ONGOING
                while execution_iteration <= execution_request_nb and not stop_execution:
                    stop_execution, tc_order = self._execute_test_cases(verdicts, tc_order, acs_outcome_verdicts)
                    execution_iteration += 1
                    if random_mode:
                        self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list)
                    if tc_order > MAX_TC_NB_AUTHORIZED:
                        break
                if not stop_execution:
                    LOGGER_FWK_STATS.info("event=STOP_ON_EOC")
                    # Set test campaign status : campaign is completed
                    global_results.status = Util.Status.COMPLETED
                else:
                    # Set test campaign status : campaign has been interrupted during test suite execution
                    global_results.status = Util.Status.ABORTED
            # Exception occurred during setup
            else:
                self.__log_stop_campaign(setup_status)
                # Set test campaign status
                global_results.status = Util.Status.ABORTED

            (status, acs_outcome_status) = self._all_tests_succeed(verdicts, acs_outcome_verdicts)
            if status:
                global_results.verdict = Util.ExitCode.SUCCESS
        except (KeyboardInterrupt):
            LOGGER_FWK_STATS.info("event=STOP_ON_USER_INTERRUPT")
            self.__log_stop_campaign("USER INTERRUPTION")
            # Set test campaign status
            global_results.status = Util.Status.ABORTED
        except (SystemExit):
            LOGGER_FWK_STATS.info("event=STOP_ON_SYSTEM INTERRUPT")
            self.__log_stop_campaign("SYSTEM INTERRUPTION")
            # Set test campaign status
            global_results.status = Util.Status.ABORTED
        except Exception as exception:
            if isinstance(exception, AcsBaseException):
                error = str(exception)
                LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(error))
                if self.__logger is not None:
                    self.__logger.error(error)
                else:
                    print(error)
            else:
                ex_code, ex_msg, ex_tb = Util.get_exception_info(exception)
                LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(ex_msg))
                if self.__logger is not None:
                    self.__logger.error(ex_msg)
                    self.__logger.debug("Traceback: {0}".format(ex_tb))
                    self.__logger.debug("return code is {0}".format(ex_code))
                else:
                    print (ex_msg)
                    print ("Traceback: {0}".format(ex_tb))
                    print ("return code is {0}".format(ex_code))

            # add an explicit message in the last executed TC's comment
            if self.__test_report is not None:
                self.__test_report.add_comment(tc_order, str(exception))
                self.__test_report.add_comment(tc_order,
                                               ("Fatal exception : Test Campaign will be stopped. "
                                                "See log file for more information."))
            # Set test campaign status
            global_results.status = Util.Status.ABORTED
        finally:
            # Sending Campaign Stop info to remote server (for Live Reporting control)
            self._live_reporting_interface.send_stop_campaign_info(verdict=global_results.verdict,
                                                                   status=global_results.status)

            if self.__test_case_manager is not None:
                campaign_error = bool(global_results.verdict)
                try:
                    cleanup_status, global_results.dut_state = self.__test_case_manager.cleanup(campaign_error)
                except AcsBaseException as e:
                    cleanup_status = False
                    global_results.dut_state = Util.DeviceState.UNKNOWN
                    error = str(e)
                if self.__logger is not None:
                    if error:
                        self.__logger.error(error)
                    self.__logger.info("FINAL DEVICE STATE : %s" % (global_results.dut_state,))
                else:
                    if error:
                        print error
                    print ("FINAL DEVICE STATE : %s" % (global_results.dut_state,))
            else:
                cleanup_status = True

            if not cleanup_status:
                global_results.verdict = Util.ExitCode.FAILURE

            for verdict in verdicts:
                if not Util.Verdict.is_pass(verdicts[verdict]):
                    tc_name = str(verdict).split(self.VERDICT_SEPARATOR)[0]
                    tc_verdict = verdicts[verdict]
                    msg = "ISSUE: %s=%s\n" % (tc_name, tc_verdict)
                    sys.stderr.write(msg)

            # Wait for last LiveReporting action requests
            self._live_reporting_interface.wait_for_finish()

            if self.__test_report:
                # write  data in report files
                self.__write_report_info()

                # update the metacampaign result id in xml report file
                # this action is done at the end because the connection retry with live reporting server will done
                # throughout campaign execution
                self.__test_report.write_metacampaign_result_id(self._live_reporting_interface.campaign_id)

            if self.campaign_report_path is not None:
                # Archive test campaign XML report
                self.__logger.info("Archive test campaign report...")
                # Compute checksum
                _, archive_file = zip_folder(self.campaign_report_path, self.campaign_report_path)
                self._live_reporting_interface.send_campaign_resource(archive_file)

            # Display campaign metrics information to the user
            self._display_campaign_metrics(self.__campaign_metrics)

            # Close logger
            ACSLogging.close()

            if acs_outcome_status and cleanup_status:
                global_results.verdict = Util.ExitCode.SUCCESS
            else:
                global_results.verdict = Util.ExitCode.FAILURE

        return global_results
    def _setup(self, **kwargs):
        """
            This function initializes all global variables used in acs execution.
            It parses the arguments given to CampaignEngine,
            parses XML files associated & read the campaign content
            for the TestCaseManager to execute.

            :param device_name: Device model under test.
            :type device_name: str

            :param serial_number: Device id or serial number of the DUT.
            :type serial_number: str

            :param campaign_name: Campaign xml file to execute.
            :type campaign_name: str

            :param campaign_relative_path: Campaign relative path.
            :type campaign_relative_path: str

            :param bench_config: Bench Config file to use.
            :type bench_config: str

            :param device_parameters: List of device parameters to override default values in Device_Catalog.
            :type device_parameters: list

            :param flash_file_path: Flash file full path.
            :type flash_file_path: str

            :param random_mode: Enable random mode if your campaign is configured to run random TC.
            :type random_mode: bool

            :param user_email: Valid user email.
            :type user_email: str

            :param credentials: Credentials in User:Password format.
            :type credentials: str

            :rtype: bool
            :return: True if setup is correctly done, else False
        """

        status = None

        device_name = kwargs["device_name"]
        serial_number = kwargs["serial_number"]
        campaign_name = kwargs["campaign_name"]
        campaign_relative_path = kwargs["campaign_relative_path"]
        device_parameters = kwargs["device_parameter_list"]
        random_mode = kwargs["random_mode"]
        user_email = kwargs["user_email"]
        credentials = kwargs["credentials"]
        log_level_param = kwargs["log_level"]

        # In case the uuid is not set, generate it to ensure that the campaign has an id
        # This id is used for reporting purpose
        self.__logger.info('Checking metacampaign UUID integrity...')
        metacampaign_uuid = kwargs["metacampaign_uuid"]
        valid_uuid = is_uuid4(metacampaign_uuid)
        if not valid_uuid:
            self.__logger.warning("Metacampaign UUID is empty or not a valid UUID4; a new one is generated ...")
        metacampaign_uuid = metacampaign_uuid if valid_uuid else str(uuid.uuid4())
        self.__logger.info("Metacampaign UUID is {0}".format(metacampaign_uuid))

        self.__init_configuration(**kwargs)

        # Init Campaign report path
        self.__init_report_path(campaign_name)
        # Instantiate a live reporting interface
        campaign_name = os.path.splitext(os.path.basename(campaign_name))[0]
        self.__init_live_reporting(campaign_name,
                                   metacampaign_uuid,
                                   user_email,
                                   kwargs.get("live_reporting_plugin"))

        self.__stop_on_critical_failure = Util.str_to_bool(
            self.__global_config.campaignConfig.get("stopCampaignOnCriticalFailure", "False"))
        self.__stop_on_first_failure = Util.str_to_bool(
            self.__global_config.campaignConfig.get("stopCampaignOnFirstFailure", "False"))

        # Provide the global configuration for equipment manager and device manager
        # They will use it to retrieve or set values in it.
        EquipmentManager().set_global_config(self.__global_config)
        DeviceManager().set_global_config(self.__global_config)

        # Initialize equipments necessary to control DUT (io card, power supply, usb hub)
        EquipmentManager().initialize()

        # Read serial number if given as ACS command line
        if serial_number not in ["", None]:
            # Priority to serialNumber from --sr parameter
            device_parameters.append("serialNumber=%s" % str(serial_number))
        # Load the device
        device = DeviceManager().load(device_name, device_parameters)[Util.AcsConstants.DEFAULT_DEVICE_NAME]
        # store the device config file
        device_conf_list = []
        for dev in DeviceManager().get_all_devices():
            device_config_file = dev.get_config("DeviceConfigPath")
            if device_config_file:
                device_conf_list.append(device_config_file)
        self._campaign_elements.update({"devices": device_conf_list})

        # Init the logger
        self.__init_logger(device.hw_variant_name, serial_number, self.campaign_report_path, metacampaign_uuid)

        self.__logger.info('Checking acs version : %s' % str(Util.get_acs_release_version()))

        if self.__test_case_conf_list:
            if random_mode:
                self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list)
            # Parse parameter catalog
            parameter_catalog_parser = ParameterCatalogParser()
            self.__global_config.__setattr__("parameterConfig", parameter_catalog_parser.parse_catalog_folder())

            # Retrieve MTBF custom parameter to align logging level between the console and the log file
            is_logging_level_aligned = Util.str_to_bool(
                self.__global_config.campaignConfig.get("isLoggingLevelAligned", "False"))
            # Set log level according to global_config file content
            if log_level_param:
                logging_level = log_level_param
            else:
                logging_level = self.__global_config.campaignConfig.get("loggingLevel", "DEBUG")
            ACSLogging.set_log_level(logging_level, is_logging_level_aligned)

            # Set campaign_type when it exists
            campaign_type = self.__global_config.campaignConfig.get("CampaignType")

            # Set credentials
            self.__global_config.__setattr__("credentials", credentials)

            # Init reports
            self.__init_reports(self.campaign_report_path,
                                device_name, campaign_name, campaign_relative_path,
                                campaign_type, user_email, metacampaign_uuid)

            # Creates Test case Manager object
            self.__test_case_manager = TestCaseManager(self.__test_report,
                                                       live_reporting_interface=self._live_reporting_interface)

            # Setup Test Case Manager
            tcm_stop_execution = self.__test_case_manager.setup(self.__global_config,
                                                                self.__debug_report,
                                                                self.__test_case_conf_list[0].do_device_connection)
            status = tcm_stop_execution
        else:
            status = AcsBaseException.NO_TEST

        return status