def create_md5sum_file(self, file_path): """ Create md5sum file of input file :param file_path: The full file path :type file_path: str :rtype: tuple :return: status and output log (The md5sum file path and hash value) """ try: md5hash = self._get_md5sum(file_path)[1] md5sum_file_name = file_path + ".md5sum" file_name = os.path.basename(file_path) md5sum_file = open(md5sum_file_name, 'w') md5sum_file.write(md5hash) md5sum_file.write(' ') md5sum_file.write(file_name) md5sum_file.write('\r\n') md5sum_file.close() except Exception as ex: # pylint: disable=W0703 error_msg = "CREATE_MD5_FILE: Fail, " + str(ex) LOGGER.error(error_msg) return Global.FAILURE, error_msg LOGGER.info("MD5SUM FILE: %s" % md5sum_file_name) return Global.SUCCESS, md5sum_file_name
def load(self): """ Public method which acts as device configuration loader. :rtype: dict :return: A dict containing all device parameters """ self._load_cli_conf() self._load_bench_conf() self._load_device_conf() # Load the device config from the device catalog if not self.device_conf: _error( "ACS single mode : No device config found for device {0} !". format(self._device_model_name), AcsConfigException.INVALID_PARAMETER) # Override Current DUT parameters from Bench ... self._override_parameters_bench() # ... and CLI self._override_parameters_cli() LOGGER_FWK.info('Checking device model integrity...') # Asserts All Device Model is valid (Parameters) self._validate() return self.device_only_params
def _get_md5sum(cls, file_path, block_size=256 * 128): """ Gets md5sum of input file :param file_path: The full file path :type file_path: str :param block_size: define block size for reading file by chunk by chunk (mandatory for huge files) :type block_size: int :rtype: tuple :return: status and output log (The md5 hash) """ try: hex_value = None md5hash = hashlib.md5() # pylint: disable=E1101 with open(file_path, 'rb') as f: for chunk in iter(lambda: f.read(block_size), b''): md5hash.update(chunk) hex_value = md5hash.hexdigest() except Exception as ex: # pylint: disable=W0703 error_msg = "GET_MD5: Fail, %s" % str(ex) LOGGER.error(error_msg) return Global.FAILURE, error_msg LOGGER.info("MD5 HASH: " + hex_value) return Global.SUCCESS, hex_value
def start_campaign(self, header, payload): # Build the test report link to display in the logs campaign_id = header['requestId'] user_agent = self.__print_user_agent(header) rest_api_url = "{0}{1}".format(self._api_url, '/campaigns') headers = {'content-type': 'application/json', 'User-Agent': "{}".format(user_agent)} # first check if campaign exists campaign_url = rest_api_url + '/' + campaign_id response = self.send_get_request(url=campaign_url, headers=headers) if 'errorCode' in response and '404' in response.get('errorCode'): # Not found, create it response = self.send_start_event(url=rest_api_url, payload=payload, timeout=PUSH_EVENT_TIMEOUT, headers=headers) else: # already exists, just update response = self.send_update_event(url=campaign_url, payload=payload, timeout=PUSH_EVENT_TIMEOUT, headers=headers) # Display the campaign url if the request is correctly sent if response and response.get('id') == campaign_id: self.campaign_url = "{0}/campaigns/{1}/detail".format(self._web_reporting_url, campaign_id) LOGGER_FWK.info("Meta Campaign UUID will be: {0}".format(campaign_id)) LOGGER_FWK.info("TEST_REPORT_URL: {0}".format(self.campaign_url)) return response
def load_json_info(self, json_path): """ Load all public class attributes from a json file. :type json_path: str :param json_path: path to a json file :rtype: None :return: None """ def remove_protected_and_private_attrib(obj): filtered_dict = dict((key, value) for key, value in obj.iteritems() if not key.startswith("_")) return filtered_dict if os.path.exists(json_path): with open(json_path, 'r') as json_file: try: self.__dict__.update( json.load( json_file, object_hook=remove_protected_and_private_attrib)) except Exception as ex: LOGGER.error("Cannot load the json file %s (%s)" % (json_file, str(ex))) return None return True
def add_file_to_zip(zip_file, file_name, file_path): full_path = os.path.abspath(os.path.join(file_path, file_name)) path_inside_zip = os.path.relpath(full_path, os.path.abspath(file_path)) LOGGER.info('File added: {0} as {1}'.format(full_path, path_inside_zip)) zip_file.write(full_path, path_inside_zip)
def __zip_analysis(self, zip_status, zip_output): if zip_status != Global.SUCCESS: LOGGER.error( "Archiving file FAIL, please check log file for more details") status = Global.FAILURE else: LOGGER.info("Archiving file OK, zip file size is {}".format( self._get_file_size(zip_output))) # Create md5sum file and retrieve md5sum file path LOGGER.info( "Create md5sum file from previously created archive ...") (status, output) = self.create_md5sum_file(zip_output) if status == Global.SUCCESS: LOGGER.info("Md5sum file creation OK") self.md5_acs_report_tcr = output else: LOGGER.error( "Md5sum file creation FAIL, please check log file for more details" ) status = Global.FAILURE msg = "File are not ready to upload!" if status == Global.SUCCESS: msg = "File are ready to upload!" return status, msg
def send_cmd(self, cmd, args={}): """ Send a command to embedded server via socket :type cmd: string :param cmd: command name :type args: dict :param args: command arguments :rtype: tuple :return: the status and the result of the command """ request = json.dumps([cmd, [], args]) LOGGER_FWK.info("Sending command %s %s" % (cmd, str(args))) try: conn = self._proto_factory.create() conn.connect() conn.send(request) status, result = json.loads(conn.receive()) self._check_cmd_status(status, result) except: msg = "Failed to communicate with embedded command server" raise DeviceException(DeviceException.OPERATION_FAILED, msg) finally: conn.disconnect() if status == CommandServerApi.SRV_CMD_DEFERRED: LOGGER_FWK.debug("Command ID: %d" % result) return status, result
def zip_tcr_campaign_data(original_folder, dest_folder, folders_and_files): """ This archive contains: dut log, acs logs """ import zipfile try: acs_logfile_name = folders_and_files["acs_logfile_name"] tcr_live_reporting_logfile = folders_and_files[ "TCR_LIVE_REPORTING"] aplog_folder = folders_and_files["AP_LOGS"] bplog_folder = folders_and_files["LOGCAT_LOGS"] dbglog_folder = folders_and_files["DEBUG_LOGS"] logcat_folder = folders_and_files["BP_LOGS"] pti_folder = folders_and_files["PTI_LOGS"] serial_folder = folders_and_files["SERIAL_LOGS"] root_dut_name = folders_and_files["ROOT_DUT_NAME"] report_style_filename = folders_and_files["REPORT_STYLE_FILENAME"] filename = "{0}.zip".format(dest_folder) zip_file = zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) LOGGER.info('Create TCR campaign zip file: {0}'.format(filename)) ZipFolderUtilities.add_file_to_zip( zip_file, os.path.basename("{0}.log".format(acs_logfile_name)), original_folder) ZipFolderUtilities.add_file_to_zip( zip_file, os.path.basename("{0}.xml".format(acs_logfile_name)), original_folder) if os.path.exists(tcr_live_reporting_logfile): ZipFolderUtilities.add_file_to_zip(zip_file, tcr_live_reporting_logfile, original_folder) ZipFolderUtilities.add_file_to_zip(zip_file, report_style_filename, original_folder) for f in [ f for f in get_subdirectories(original_folder) if f.startswith(root_dut_name) ]: for local_path in [ "{0}{2}{1}".format(f, folder, os.path.sep) for folder in (aplog_folder, bplog_folder, logcat_folder, dbglog_folder, pti_folder, serial_folder) ]: ZipFolderUtilities.add_folder_to_zip( zip_file, os.path.join(original_folder, local_path), local_path) zip_file.close() status = Global.SUCCESS out_file = os.path.abspath(filename) except IOError as error: LOGGER.error('Cannot create zip file: {0} - {1}'.format( filename, error)) status = Global.FAILURE out_file = "" return status, out_file
def create_url_shortcut(self, campaign_url): """ Create a shortcut to open given url :rtype: tuple :return: Status and output log """ try: if os.path.exists(Folders.REPORTS): output_path = os.path.join(Folders.REPORTS, self._report_name + ".html") if not os.path.isfile(output_path): LOGGER_FWK.info( "CREATE_URL_SHORTCUT: Creating url shortcut to campaign result" ) html_content = "<html>\n" html_content += "<head>\n" html_content += "<meta http-equiv=\"refresh\" content=\"0; URL=%s\">\n" % campaign_url html_content += "</head>\n" html_content += "<body></body>\n" html_content += "</html>" with open(output_path, "w") as fd: fd.write(html_content) except Exception as ex: # pylint: disable=W0703 error_msg = "CREATE_URL_SHORTCUT: Fail, " + str(ex) LOGGER_FWK.error(error_msg) return Global.FAILURE, error_msg msg = "CREATE_URL_SHORTCUT: Created link to %s" % str(campaign_url) return Global.SUCCESS, msg
def addFolderToZip(zip_file, folder): for root, _, files in os.walk(folder): for f in files: full_path = os.path.abspath(os.path.join(root, f)) path_inside_zip = os.path.relpath(full_path, os.path.abspath(folder)) LOGGER.info('File added: {0} as {1}'.format( full_path, path_inside_zip)) zip_file.write(full_path, path_inside_zip)
def add_folder_to_zip(zip_file, folder, path_inside_zip): for root, _, files in os.walk(folder): for f in files: full_path = os.path.abspath(os.path.join(root, f)) f_loc = full_path.rsplit(path_inside_zip, 1)[1].strip(os.path.sep) file_path = os.path.join(path_inside_zip, f_loc) LOGGER.info('File added: {0} as {1}'.format( full_path, file_path)) zip_file.write(full_path, file_path)
def _parse_bench_node(self, node): """ Parses XML `Phone` node(s) from Bench Catalog file and maps it into a python structure (dict) :return: A dict mapping XML configuration :rtype: AttributeDict :raise AcsConfigException.INVALID_BENCH_CONFIG: If a (or more) deprecated parameter(s) is/are found in a Phone node """ LOGGER_FWK.info( 'Loading optional device model parameters from CLI and/or BenchConfig ' 'for {0} ({1})...'.format(self._device_name, self._device_model_name)) buf_params = AttributeDict() # Storing value to avoid recomputing each call device_model_name = self.device_model_name if device_model_name: buf_params["Name"] = device_model_name if self.device_conf_filename: buf_params["DeviceConfigPath"] = self.device_conf_filename # Get phone properties for attrs in node.xpath(".//Parameter"): name, value, description = attrs.get("name"), attrs.get( "value"), attrs.get("description") if name in self.PROHIBITIVE_KEYS: # Do not allow to override internal keys # as it would lead to nasty & unexpected behavior !! continue # Not supported anymore, raise AcsConfigException.INVALID_BENCH_CONFIG !! if name and value: buf_params[name] = value self._bench_conf_deprecated[name] = (value, description) else: buf_params.update(attrs.attrib) # Report Errors if so if self.bench_contains_errors: LOGGER_FWK.error(self._report_errors()) _error( 'Invalid Bench Parameters format found! {0}'.format(', '.join( self._bench_conf_deprecated.keys())), AcsConfigException.INVALID_BENCH_CONFIG) # Extracting device modules if so buf_params.device_modules = self.extract_device_modules(node) return buf_params
def _check_cmd_status(self, status, result): """ Check server command status: """ if status == CommandServerApi.SRV_CMD_UNKNWON: msg = "Wrong embedded agent output" LOGGER_FWK.error(msg) raise DeviceException(DeviceException.OPERATION_FAILED, msg) elif status == CommandServerApi.SRV_CMD_FAILURE: # Just log the error now, let UEcmd decide of what to do in case of failure # In this case, result contains the error message to log LOGGER_FWK.error(result)
def setup(self): """ Setup files which will be push to TCR server :rtype: tuple :return: Status and output log """ # Check original acs report path status = Global.FAILURE msg = "" if not os.path.exists(self.original_report_path): msg = "Cannot retrieve original ACS results: %s" % self.original_report_path LOGGER.error(msg) status = Global.FAILURE else: # compute the report folder name (used to generate html file) if not self.report_name: # If folder ending character is present, should remove it before treatment if self.original_report_path[-1:] in ('\\\\', '\\', '/'): self.report_name = str( os.path.basename(self.original_report_path[:-1])) else: self.report_name = str( os.path.basename(self.original_report_path)) if not self.new_report_path: (status, output) = self.build_file_name(self.original_report_path) if status == Global.SUCCESS: LOGGER.info("Building file name OK") # each push must have its own dedicated directory # it will be easier to manage reports cache like this : each folder # contains zip + md5 file + json info sub_folder_push = os.path.join( self.cache_reports, "%s_%s" % (CACHE_PUSH_BASE_FOLDER, time.strftime("%Y-%m-%d_%Hh%M.%S"))) self._lock_file = os.path.join(sub_folder_push, LOCK) if not os.path.isdir(sub_folder_push): os.makedirs(sub_folder_push) with open(self._lock_file, 'w') as lock: lock.write("locked") # output contains computed report dirname self.new_report_path = os.path.join( sub_folder_push, output) # create report zip file & md5 file associated status, msg = self.prepare_files() else: msg = "Building file name FAIL: %s" % output status = Global.FAILURE if status == Global.SUCCESS: msg = "TCR push SETUP : OK" return status, msg
def prepare_files(self): """ Create report zip file and md5 files :rtype: tuple :return: Status and output log """ # Zip folder and retrieve zip file path LOGGER.info("Create TCR archive") status, self.zip_acs_report_tcr = ZipFolderUtilities.zip_tcr_campaign_data( self.original_report_path, self.new_report_path, self._log_folders_and_files) return self.__zip_analysis(status, self.zip_acs_report_tcr)
def send_test_case_chart(self, chart_info, iteration=False): """ Attach a chart to the current test case :param dict chart_info: data to build the chart on the server (title, series, axis ...) :param bool iteration (optional): True if the test case has at least two iterations """ if self._tcr_instance: # Push a resource to TCR Reporting tool via REST API interface (Test case level) self._tcr_instance.send_testcase_chart(chart_info, iteration=iteration) else: LOGGER_FWK.warning('Only TCR offers REST API for charts!')
def start(self): """ Upload md5 & report zip files to specified url Generate in report dir a html file which redirects to the TCR campaign result :rtype: tuple :return: Status and output log """ msg = "" (status, _) = self.upload_all_files() # Create the shortcut if status != Global.SUCCESS: status = Global.FAILURE msg = "Could not upload files to TCR website" LOGGER.info(msg) return status, msg
def get_config_value(config_dict, config_dict_name, config_name, default_value=None, default_cast_type=str): """ Return the value of the given config name The type of the value can be checked before assignment A default value can be given in case the config name does not exist :type config_name: string :param config_name: name of the property value to retrieve :type default_value: string :param default_value: default_value of the property :type default_cast_type: type object :param default_cast_type: type to cast (int, str, list ...) By default cast into str type. :rtype: string or type of default_cast_type :return: config value """ # Read the config value from dut config dictionary config_value = config_dict.get(config_name, default_value) # In case the config value is not None, trying to cast the value if config_value is not None: # Cast the value to the given type # Stripping is done to suppress end and start spaces of values try: if default_cast_type == "str_to_bool": config_value = str_to_bool(str(config_value).strip()) elif default_cast_type == "str_to_dict": config_value = str_to_dict(str(config_value).strip()) else: config_value = default_cast_type(config_value) except ValueError: debug_msg = "Wrong value used for dictionary %s entry: '%s'. Returning default value '%s' !" \ % (str(config_dict_name), str(config_name), str(default_value)) LOGGER_FWK.debug(debug_msg) config_value = default_value return config_value
def timezone(): """ Return host timezone :rtype: str :return: Timezone (i.e: 'Europe/Paris') """ # Trying to get local timezone try: import tzlocal host_localtimezone = str(tzlocal.get_localzone()) except Exception as tzlocal_exception: # Set default host time host_localtimezone = DEFAULT_TIMEZONE LOGGER_FWK.warning("Cannot get host timezone ! " "Use default timezone ('{0}') => {1}".format( host_localtimezone, str(tzlocal_exception))) return host_localtimezone
def get_value(self, key, default_value=None, default_cast_type=str): """ Return the value of the given device config name The type of the value can be checked before assignment A default value can be given in case the config name does not exist :type key: string :param key: name of the property value to retrieve :type default_value: object :param default_value: default_value of the property :type default_cast_type: type object :param default_cast_type: type to cast (int, str, list ...) By default cast into str type. :rtype: string or type of default_cast_type :return: config value """ value = self.get(key, default_value) # In case the config value is not None, trying to cast the value if value is not None: # Cast the value to the given type # Stripping is done to suppress end and start spaces of values try: if default_cast_type == "str_to_bool": value = str_to_bool(str(value).strip()) elif default_cast_type == "str_to_dict": value = str_to_dict(str(value)) else: value = default_cast_type(value) except ValueError: LOGGER_FWK.debug( "Cannot convert {0} to {1}, return {2}".format( key, default_cast_type, default_value)) value = default_value # Store new value # TODO: do not store the value for now because of side effects, need to see if it's expected behavior # self[key] = value return value
def check_keys(dictionary, keys): """ Check if keys are in given dictionary, raise an error if not. :type dictionary: dict :param dictionary: dict to test :type keys: string :param keys: keys to check :rtype: list :return: list of missing keys """ key_list = [] for element in keys: if element not in dictionary: LOGGER_FWK.error("KEY %s missing on your dictionary" % element) key_list.append(element) return key_list
def update_report_file(self): """ Update the xml report file """ try: temp_test_report_file = os.path.join(tempfile.gettempdir(), "Temporary_TestReport.xml") processing_instruction = etree.ProcessingInstruction( "xml-stylesheet", "type=\"text/xsl\" href=\"report.xsl\"") with open(temp_test_report_file, 'w') as f_test_report: f_test_report.write(etree.tostring(processing_instruction, pretty_print=True, xml_declaration=True)) f_test_report.write(etree.tostring(self.document, pretty_print=True)) # Copy the temporary file into the test report shutil.move(temp_test_report_file, self.filename) # copy the XSL file in the same folder ad the XML file shutil.copy(self._xsl_path, self._base) except Exception as report_exception: LOGGER_FWK.warning("Fail to update test report '%s' ! (%s)" % (str(self.filename), str(report_exception)))
def _override_parameters_bench(self): """ Override device config with device parameters available in bench config if applicable. """ device_model_name = self.device_model_name device_name = self._device_name if self.bench_conf: do_the_override = False if device_name == AcsConstants.DEFAULT_DEVICE_NAME: if "Name" not in self.bench_conf: # No device specified in the bench config for PHONE1 # Do the override then do_the_override = True elif self.bench_conf.Name == device_model_name: # Same device specified on the command line then in the bench config # Do the override do_the_override = True else: warning_msg = ( "Different device model specified on the command line ({0}) " "then in the bench config ({1}) for {2}! Related parameters specified " "in bench config will be ignored !").format( device_model_name, self.bench_conf.Name, AcsConstants.DEFAULT_DEVICE_NAME) LOGGER_FWK.warning(warning_msg) else: # For other phones (PHONE2, ...) we do the override every time do_the_override = True if do_the_override: for key, value in self.bench_conf.iteritems(): if key == "device_modules": for module, module_conf in value.iteritems(): self.device_conf.device_modules[ module] = module_conf else: self._override_through_sections( self.device_conf, key, value, self._bench_unknown_parameters)
def get_result(self, cmd_id): """ Get result of deferred command :type cmd_id: int :param cmd_id: ID of the command :rtype: tuple :return: the status and the result of the command """ request = json.dumps(["join", [cmd_id], {}]) LOGGER_FWK.debug("Joining command server %d" % cmd_id) try: conn = self._proto_factory.create() conn.connect() conn.send(request) status, result = json.loads(conn.receive()) self._check_cmd_status(status, result) except socket.error as E: err, err_msg = E msg = "Socket error (%d): %s" % (err, err_msg) LOGGER_FWK.error(msg) raise DeviceException(DeviceException.OPERATION_FAILED, msg) except: msg = "Failed to communicate with embedded command server" LOGGER_FWK.error(msg) raise DeviceException(DeviceException.OPERATION_FAILED, msg) finally: conn.disconnect() return status, result
def safe_remove_file(file_path, max_retry=5): """ Removes the file. .. note:: Due to windows limitation, sometimes, the file cannot be removed immediately so implements retry loop. :type file_path: str :param file_path: The path to the folder/file to remove. :type max_retry: int :param max_retry: Max remove retry. :rtype: tuple :return: Status and output log """ remove_ok = False retry = 1 status = Global.FAILURE output = '' file_name = os.path.basename(file_path) while not remove_ok and retry <= max_retry: try: if os.path.isfile(file_path): os.remove(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) remove_ok = True status = Global.SUCCESS output = 'Remove {0} OK after {1}/{2} tries'.format( file_name, str(retry), str(max_retry)) LOGGER.info(output) except Exception as ex: # pylint: disable=W0703 time.sleep(1) retry += 1 output = 'Fail to remove {0} after {1}/{2} tries ({ex})'.format( file_name, str(retry), str(max_retry), ex=ex) LOGGER.error(output) return status, output
def send_campaign_resource(self, campaign_uuid, resource_info): """ Push a resource onto (TCR Only) REST API for a given Test Case. :param dict campaign_uuid: id of the campaign. :param dict resource_info: Local resource to be pushed onto TCR at Test Case level. """ if self._tcr_instance: # Archive file to push context_dict = { "acs_logfile_name": Files.acs_output_name, "TCR_LIVE_REPORTING": REPORT_FILE_NAME, "ROOT_DUT_NAME": Folders.ROOT_DUT_NAME, "AP_LOGS": Folders.AP_LOGS, "BP_LOGS": Folders.BP_LOGS, "PTI_LOGS": Folders.PTI_LOGS, "SERIAL_LOGS": Folders.SERIAL_LOGS, "LOGCAT_LOGS": Folders.LOGCAT_LOGS, "DEBUG_LOGS": Folders.DEBUG_LOGS, "REPORT_STYLE_FILENAME": Files.REPORT_STYLE } tcr_push = TCRpush( original_acs_report=resource_info["campaign_report_path"], metacampaign_uuid=campaign_uuid, user_mail=resource_info["user_email"], cache_reports=Paths.CACHE_PUSH_REPORTS, dev_campaign=resource_info["dev_campaign"], log_folders_and_files=context_dict) # Build archive to push status, msg = tcr_push.setup() if status == Global.SUCCESS: # Push a resource to TCR Reporting tool via REST API interface (Test case level) self._tcr_instance.send_campaign_resource( resource=tcr_push.zip_acs_report_tcr) else: LOGGER_FWK.error( "Error when building campaign result archive! {}".format( msg)) else: LOGGER_FWK.warning('Only TCR offers REST API for resources!')
def send_test_case_resource(self, resource, display_name=None, retention="SHORT", iteration=False): """ Push a resource onto (TCR Only) REST API for a given Test Case. :param str resource: Local resource to be pushed onto TCR at Test Case level. :param str display_name: (optional) Filename to be displayed in the UI :param str retention: (optional) "SHORT" or "LONG" :param bool iteration: (optional) True if the test case has at least two iterations """ if self._tcr_instance: # Push a resource to TCR Reporting tool via REST API interface (Test case level) self._tcr_instance.send_testcase_resource( resource, display_name=display_name, retention=retention, iteration=iteration) else: LOGGER_FWK.warning('Only TCR offers REST API for resources!')
def compute_timeout_from_file_size(file_path, min_timeout=0): """ Compute a timeout depending the file's size. :type file_path: str :param file_path: File from which a timeout will be computed :type min_timeout: int :param min_timeout: Minimum timeout (in sec) to set if the file size is too small :rtype: int :return: timeout (in sec) computed from the file size """ if os.path.isfile(file_path): app_size = os.path.getsize(file_path) timeout = int(app_size / 1024 / 4) if timeout < min_timeout: # Set a minimum installation timeout timeout = min_timeout LOGGER_FWK.debug("app size: %dB, timeout: %ds" % (app_size, timeout)) else: timeout = min_timeout return timeout
def create(module_name, device, global_conf): """ Create and return list of device module. :type module_name: str :param module_name: name of the module to be created :type device: py:class:`~src.Device.Model.IDevice.py` :param module_name: device instance that request the module creation :type global_conf: dict :param global_conf: ACS global configuration :rtype: list of module """ modules = [] if device.config.device_modules and module_name in device.config.device_modules: module_configurations = device.config.device_modules[module_name] if not module_configurations: raise AcsConfigException( "Cannot load \"{0}\" device module".format(module_name), "Cannot find module configuration.") for module_configuration in module_configurations: module = DeviceModuleFactory._instantiate_module( module_name, module_configuration) module.device = device module.logger = logging.getLogger( "%s.%s" % (ACS_LOGGER_NAME, module_name.upper())) module.global_conf = global_conf module.name = module_name module.load_conf() DeviceModuleFactory._update_parameter(module, module_configuration, module_name) LOGGER_FWK.debug("Create Module '{0}' based on : {1}".format( module_name, module_configuration.class_name)) LOGGER_FWK.debug("Module default parameters values are:") for key, value in module.configuration.items(): LOGGER_FWK.debug("\t {0} : {1}".format(key, value)) LOGGER_DEVICE_STATS.info( "Create device_module={0}; device_module_class={1}; device_module_conf={2}" .format(module_name, module_configuration.class_name, module_configuration.config)) modules.append(module) return modules