class DbTestRunDelete(object): """ Db TestRun Data Delete Utility Class """ def __init__(self): self.__dbfile = None self.__masterdbdsn = None self.__masterdbdbq = None self.__masterdbuser = None self.__masterdbpassword = None self.__masterdbschemaprefix = None self.__db_connector = None self.__masterdbdrv = None self.__trname = None self.__checkpoint = None self.__projname = None self.__limit = 10 self.__trids = [] self.__logger = Logger(self.__class__.__name__, level=MODE) self._dbgbl = None self._dbval = None def __initialize(self, line=None): """ Initialize Export process with Establishing connection and parsing argument """ self.__parse_arguements(line) if self.__dbfile is None: self._dbval = BaseValResDB( "uid=%s;pwd=%s" % (self.__masterdbuser, self.__masterdbpassword), table_prefix="%s." % (self.__masterdbuser), error_tolerance=ERROR_TOLERANCE_NONE) self._dbgbl = BaseGblDB( "uid=%s;pwd=%s" % (self.__masterdbuser, self.__masterdbpassword), table_prefix="%s." % (self.__masterdbuser), error_tolerance=ERROR_TOLERANCE_NONE) else: self._dbval = BaseValResDB(self.__dbfile, error_tolerance=ERROR_TOLERANCE_NONE) self._dbgbl = BaseGblDB(self.__dbfile, table_prefix="%s." % (self.__masterdbuser), error_tolerance=ERROR_TOLERANCE_NONE) def __terminate(self): """ Terminating method with closing database connections """ self._dbval.close() self._dbgbl.close() def __parse_arguements(self, line=None): """ Parsing commandline arguements """ optparser = OptionParser(usage="usage: %prog [options] command") optparser.add_option("-f", "--dbfile", dest="dbfile", help="The name of the Sqlite database file. ") optparser.add_option("-b", "--master-db-dsn", dest="masterdbdsn", help="The name of the DSN.") optparser.add_option("-q", "--master-db-dbq", dest="masterdbdbq", help="The name of the DBQ.") optparser.add_option("-u", "--master-db-user", dest="masterdbuser", help="The name of the oracle database user.") optparser.add_option("-p", "--master-db-password", dest="masterdbpassword", help="The name of the oracle database password.") optparser.add_option( "-c", "--master-db-schema-prefix", dest="masterdbschemaprefix", help="The name of the oracle database schema prefix.") optparser.add_option( "-l", "--limit", dest="limit", help= "MAX no. of parent testrun deleted e.g. default:10, -1 all deleted testrun" ) optparser.add_option("-t", "--trname", dest="trname", help="Testrun to import export") optparser.add_option("-v", "--checkpoint", dest="checkpoint", help="Checkpoint") optparser.add_option("-n", "--prname", dest="prname", help="Project Name e.g. ARS400_PR") if line is not None: cmd_options = optparser.parse_args(line.split()) else: cmd_options = optparser.parse_args() self.__dbfile = cmd_options[0].dbfile self.__masterdbdsn = cmd_options[0].masterdbdsn self.__masterdbdbq = cmd_options[0].masterdbdbq self.__masterdbuser = cmd_options[0].masterdbuser self.__masterdbpassword = cmd_options[0].masterdbpassword self.__masterdbschemaprefix = cmd_options[0].masterdbschemaprefix if cmd_options[0].limit is not None: self.__limit = int(cmd_options[0].limit) self.__trname = cmd_options[0].trname self.__checkpoint = cmd_options[0].checkpoint self.__projname = cmd_options[0].prname def delete_test_run_data(self, line=None): """ Main function of DB Delete Testrun """ start_date = datetime.now() self.__logger.info("Starting TestRun Delete at %s" % start_date.strftime("%d-%m-%Y %H:%M:%S")) self.__initialize(line) if self.__projname is not None: pid = self._dbgbl.GetProjectId(self.__projname.upper()) else: pid = None self.__trids = self._dbval.get_deleted_testrun_ids( name=self.__trname, checkpoint=self.__checkpoint, pid=pid, limit=self.__limit, distinct=False) for trid in self.__trids: self._dbval.delete_testrun(tr_id=trid) for trid in reversed(self.__trids): tr_rec = self._dbval.get_testrun(tr_id=trid) if len(tr_rec) > 0: self.__logger.error( "Testrun with Id = %d delete attempt failed" % trid) self.__logger.error( "Delete operation Aborted with no Commit Changes in Database" ) raise StandardError("Operation Aborted") end_date = datetime.now() duration = end_date - start_date self._dbval.commit() print str(tuple(self.__trids)) self.__logger.info("Delete Finshed with Total Duration = %s " % str(duration)) self.__logger.info("Total Testrun deleted = %s " % str(len(self.__trids))) print "exit"
class ProcessManager(object): r""" valf internal class to provide essential processing for observers - initialize - start logger - initialize data_manager - search classes based on class BaseComponentInterface - load configuration - import declared observer modules - set data ports - run validation - call all methods of all observers sequentially - use bpl_reader or similar to run through all recordings This class also is responsible to read out configuration and interpretation from config file. general used ports on bus ``Global``: - set "ConfigFileVersions" dict with file name as key and version as value for each loaded config file - read "FileCount" to show progress bar - read "IsFinished" to continue with next state when all sections of a recording are validated (set by `SignalExtractor`) Also setting ports as defined in ``InputData`` for the named bus. """ def __init__(self, plugin_dir, fail_on_error=False): """init essencials :param plugin_dir: path or list of paths where to start search for observers :type plugin_dir: string or list of strings :param fail_on_error: flag to break immediately if an exception is found :type fail_on_error: boolean """ self._logger = Logger(self.__class__.__name__) self._logger.debug() self._component_list = [] self._version = "$Revision: 1.11 $" self._progressbar = None self._file_count = 0 self._object_map_list = [] self._config_file_loaded = False self._fail_on_error = fail_on_error self._configfiles = [] # used as stack to load configs recursively self._config_file_versions = {} self._uncrepl = UncRepl() plugin_dir.extend([ self._uncrepl(dir_) for dir_ in OBS_DIRS if dir_ not in plugin_dir ]) self._logger.info("Searching for plug-ins. Please wait...") class_map_list, self._plugin_error_list = find_class( bci, plugin_dir, with_error_list=True) if class_map_list is None: self._logger.error("No plug-ins found.") return self._logger.debug("%d plug-ins found: %s." % (len(class_map_list), ", ".join( [i['name'] for i in class_map_list]))) self._plugin_map = { plugin['name']: plugin["type"] for plugin in class_map_list } # Create data manager object try: self._data_manager = DataManager() except: self._logger.exception("Couldn't instantiate 'DataManager' class.") if self._fail_on_error: raise sexit(bci.RET_VAL_ERROR) def _initialize(self): """calls initialize and post_initialize of ordered observers """ self._logger.debug() # Calls Initialize for each component in the list for component in self._component_list: try: if component.Initialize() != bci.RET_VAL_OK: self._logger.error( "Class '%s' returned with error from Initialize() method." % component.__class__.__name__) return bci.RET_VAL_ERROR except: self._logger.exception( 'EXCEPTION during Initialize of %s:\n%s' % (component.__class__.__name__, format_exc())) if self._fail_on_error: raise return bci.RET_VAL_ERROR # Calls PostInitialize for each component in the list for component in self._component_list: try: if component.PostInitialize() != bci.RET_VAL_OK: self._logger.error( "Class '%s' returned with error from PostInitialize() method." % component.__class__.__name__) return bci.RET_VAL_ERROR except: self._logger.exception( 'EXCEPTION during PostInitialize of %s:\n%s' % (component.__class__.__name__, format_exc())) if self._fail_on_error: raise return bci.RET_VAL_ERROR self._file_count = self.get_data_port("FileCount") if self._file_count > 0: self._progressbar = ProgressBar(0, self._file_count, multiline=True) else: self._file_count = 0 self._logger.debug("all components ready to run!") self._logger.mem_usage() return bci.RET_VAL_OK def _process_data(self): """calls load_data, process_data as well as post_process_data of ordered observers """ self._logger.debug() if self._file_count == 0: self._logger.debug( str(_getframe().f_code.co_name) + "No files to process.") return RET_VAL_OK ret = bci.RET_VAL_ERROR counter = 0 while not self.get_data_port("IsFinished"): # update progressbar position self._progressbar(counter) counter += 1 # Calls LoadData for each component in the list for component in self._component_list: try: ret = component.LoadData() if ret is bci.RET_VAL_ERROR: self._logger.error( "Class '%s' returned with error from LoadData() method, " "continue with next sim file." % component.__class__.__name__) break except: self._logger.exception( 'exception raised during LoadData of %s:\n%s, ' 'continue with next sim file.' % (component.__class__.__name__, format_exc())) ret = bci.RET_VAL_ERROR if self._fail_on_error: raise break if ret is bci.RET_VAL_ERROR: continue # Calls ProcessData for each component in the list for component in self._component_list: try: ret = component.ProcessData() if ret is bci.RET_VAL_ERROR: self._logger.error( "Class '%s' returned with error from ProcessData() method, " "continue with next sim file." % component.__class__.__name__) break except: self._logger.exception( 'EXCEPTION during ProcessData of %s:\n%s, ' 'continue with next sim file.' % (component.__class__.__name__, format_exc())) ret = bci.RET_VAL_ERROR if self._fail_on_error: raise break if ret is bci.RET_VAL_ERROR: continue # Calls PostProcessData for each component in the list for component in self._component_list: try: ret = component.PostProcessData() if ret is bci.RET_VAL_ERROR: self._logger.error( "Class '%s' returned with error from PostProcessData() method, " "continue with next sim file." % component.__class__.__name__) break except: self._logger.exception( 'EXCEPTION during PostProcessData of %s:\n%s, ' 'continue with next sim file.' % (component.__class__.__name__, format_exc())) ret = bci.RET_VAL_ERROR if self._fail_on_error: raise break if ret is bci.RET_VAL_ERROR: continue # we have processed correctly at least a file, # set _process_data return value to OK in order to finish it's process self._logger.mem_usage() ret = bci.RET_VAL_OK if counter > 0: self._progressbar(counter) return ret def _terminate(self): """calls pre_terminate and terminate of ordered observers """ self._logger.debug() # Calls PreTerminate for each component in the list for component in self._component_list: try: if component.PreTerminate() != bci.RET_VAL_OK: self._logger.error( "Class '%s' returned with error from PreTerminate() method." % component.__class__.__name__) return bci.RET_VAL_ERROR except Exception: self._logger.exception( 'EXCEPTION during PreTerminate of observer %s:\n%s' % (component.__class__.__name__, format_exc())) if self._fail_on_error: raise return bci.RET_VAL_ERROR # Calls Terminate for each component in the list for component in self._component_list: try: if component.Terminate() != bci.RET_VAL_OK: self._logger.exception( "Class '%s' returned with error from Terminate() method." % component.__class__.__name__) return bci.RET_VAL_ERROR except: self._logger.exception( 'EXCEPTION during Terminate of observer %s:\n%s' % (component.__class__.__name__, format_exc())) if self._fail_on_error: raise return bci.RET_VAL_ERROR return bci.RET_VAL_OK def get_data_port(self, port_name, bus_name="Global"): """gets data from a bus/port :param port_name: port name to use :param bus_name: bus name to use :return: data from bus/port """ return self._data_manager.get_data_port(port_name, bus_name) def set_data_port(self, port_name, port_value, bus_name="Global"): """sets data to a bus/port :param port_name: port name to use :param port_value: data value to be set :param bus_name: bus name to use :return: data from bus/port """ self._data_manager.set_data_port(port_name, port_value, bus_name) def _get_err_trace(self): """returns error trace from error list """ if self._plugin_error_list: err_trace = '\n'.join('++ file: {0}.py -- {1}\n'.format( e[0], e[1].replace('\n', '\n--> ')) for e in self._plugin_error_list) else: err_trace = 'no detailed info about failure' return err_trace def load_configuration(self, configfile): """loads configuration from cfg-file see more details in `Valf.LoadConfig` :param configfile: path/to/file.cfg :return: success (bool) """ configfile = self._uncrepl(configfile) cls_obj = None if not opath.exists(configfile): raise ValfError( "Configuration file '%s' doesn't exist or is invalid." % configfile) # self._logger.error("Configuration file '%s' doesn't exist or is invalid." % configfile) # return False self.set_data_port(CFG_FILE_VERSION_PORT_NAME, self._config_file_versions) autoorder = [-1] component_map = self._read_config(configfile) self._logger.info( "loading version: '%s' of config file '%s'" % (self._config_file_versions.get(configfile, ""), configfile)) for componentname in component_map: try: # retrieve details class_name = eval(component_map[componentname].get( "ClassName", "None")) # port_in_list = component_map[componentname].get("PortIn") port_out_list = eval(component_map[componentname].get( "PortOut", "[]")) input_data_list = eval(component_map[componentname].get( "InputData", "[]")) connect_bus_list = eval(component_map[componentname].get( "ConnectBus", "Bus#1")) order = component_map[componentname].get( "Order", max(autoorder) + 1) if order in autoorder: self._logger.info( "order %d for component %s already in use!" % (order, componentname)) autoorder.append(order) # check them, they should be there all! if (componentname != "Global" and (class_name is None or port_out_list is None or input_data_list is None or connect_bus_list is None)): msg = "Invalid port value or syntax wrong on component: '%s' with parsed settings\n" \ "ClassName: %s, PortOut: %s,\n" \ "InputData: %s, \n" \ "ConnectBus: %s\n"\ " only ClassName for 'Global' can be None, compare parsed settings with defines in config." \ % (componentname, class_name, port_out_list, input_data_list, connect_bus_list) raise ValueError(msg) except Exception, err: self._logger.error(err) if self._fail_on_error: raise continue if type(connect_bus_list) not in (list, tuple): connect_bus_list = [connect_bus_list] if class_name in self._plugin_map: # Observer can be loaded -> Everything fine. # self._logger.debug("Loading plug-in: '%s'." % componentname) cls_obj = self._plugin_map[class_name](self._data_manager, componentname, connect_bus_list) elif componentname != "Global": # Observer can NOT be loaded -> Create Log Entry and raise Exception ! err_trace = self._get_err_trace() # Create Log Entry self._logger.error('some python modules have coding errors') self._logger.error( 'Please check following list for more details:') self._logger.error(err_trace) msg = "Observer with ClassName %s not found, please check log for more info!" % class_name self._logger.error(msg) self._logger.error("File: \"valf.log\"") raise ValfError(msg, ValfError.ERR_OBSERVER_CLASS_NOT_FOUND) for port_out in port_out_list: for bus_name in connect_bus_list: tmp = "Register port: Provider=" tmp += "'%s', PortName='%s', Bus='%s'." % ( componentname, port_out, bus_name) self._logger.debug(tmp) self.set_data_port(port_out, None, bus_name) if type(input_data_list) == list: # do it the usual way for input_data in input_data_list: param_name = input_data[0] param_value = input_data[1] for bus_name in connect_bus_list: tmp = "Setting input data.[Component='%s', " % componentname tmp += "Bus='%s', PortName='%s', " % (bus_name, param_name) tmp += "PortValue=%s]" % str(param_value) self._logger.debug(tmp) self.set_data_port(param_name, param_value, bus_name) elif type(input_data_list ) == dict: # we've got key value pairs already for param_name, param_value in input_data_list.iteritems(): for bus_name in connect_bus_list: tmp = "Setting input data.[Component='%s', " % componentname tmp += "Bus='%s', PortName='%s', " % (bus_name, param_name) tmp += "PortValue=%s]" % str(param_value) self._logger.debug(tmp) self.set_data_port(param_name, param_value, bus_name) if componentname != "Global": self._object_map_list.append({ "Order": order, "ComponentName": componentname, "ClsObj": cls_obj }) # If whole Observer loading is done successfully, # we write anyway all found coding errors into the Log File as warnings if self._plugin_error_list: err_trace = self._get_err_trace() self._logger.warning('some python modules have coding errors') self._logger.warning( 'Please check following list for more details:') self._logger.warning(err_trace) self._component_list = [] if len(self._object_map_list): self._object_map_list.sort(key=lambda x: x["Order"]) for object_map in self._object_map_list: self._component_list.append(object_map["ClsObj"]) if not self._component_list: self._logger.error( "No component loaded. Please check config file '%s'." % str(configfile)) return False self._config_file_loaded = True return True
class MtsCfgSection(object): """ MTS config section class to model a measurement object (MO) """ def __init__(self, string): """Init instance of MtsCfgSection :param string: MTS config-like multi-line string containing the MO definition :type string: str|unicode """ super(MtsCfgSection, self).__init__() self._params = OrderedDict() self._tag = None self._logger = Logger(self.__class__.__name__) self._parse(string) def _parse(self, string): """ Parses the given string and stores all information in the instance :param string: MTS config-like multi-line string containing the MO definition :type string: str|unicode """ # Get MO tag. e.g. [SIM VFB] try: self._tag = match(r'\[(.+)\]\s*\n', string).group(1) except AttributeError: raise MtsSectionError("The given string to be parsed does not specify a correct tag for the section.") # Get body body = resub(r'\\\s*\n\s*', '', resub(r'.+\]\s*\n', '', string)) sub = lambda value: resub(r'^"', '', resub(r'"$', '', value)) # Get parameters from within the body params_list = split(r'\s*\n\s*', body) for param in params_list: # If not is an empty line if not match(r'\s*$', param): # print param var, values = match(r'^(.+?)=(.+)$', param).groups() # Split values into a list values_list = split(r',\s*', values) # Store the parameter self._add_param(var, [sub(i) for i in values_list]) def _add_param(self, var, values_list): """Add a new parameter to the instance :param var: Name of the parameter :type var: str|unicode :param values_list: List of values for the given parameter :type values_list: list """ self._params[var] = values_list if len(values_list) > 1 else values_list[0] @property def tag(self): """MO name """ return self._tag @property def params(self): """Dict of parameters """ return self._params @property def mo_class(self): """Class name of the MO. None if not known. """ try: return self._params["Class"] except KeyError: self._logger.info("Section {tag} does not provide 'Class' info".format(tag=self.tag)) return None def __getitem__(self, item): return self.params[item] def __len__(self): return len(self.params) def __iter__(self): return self.params.__iter__() def __str__(self): return self.tag + ": " + str(self.params) def __ne__(self, other): return self.tag != other.tag or self.params != other.params def __eq__(self, other): return not self.__ne__(other)
def main(): """main function""" logger = Logger(str(sys._getframe().f_code.co_name), INFO) # Parse command line parameters tmp = 'usage: %prog [options] <cfg_files_in> \n with <cfg_files_in> = ' tmp += '"<path\\filename>, <path\\filename>, ..."' optparser = OptionParser(usage=tmp) tmp = "The output files to write. [default=<cfg_file_in>_sorted.cfg]" optparser.add_option("-o", "--out-file", dest="outfiles", help=tmp) tmp = "The sort mode to use. [0 = default = only sections, 1 = sections + properties]" optparser.add_option("-m", "--mode", dest="mode", default='0', help=tmp) cmd_options = optparser.parse_args() if not cmd_options[1]: # call help optparser.print_help() else: # prepare infiles infiles = split_strip_string(cmd_options[1][0], ',') if cmd_options[0].mode not in list(MODES.keys()): logger.error("Sort mode %s unknown, possible modes: \n %s!" % (cmd_options[0].mode, MODES)) else: # prepare outfiles if cmd_options[0].outfiles is None: outfiles = [] else: outfiles = split_strip_string(cmd_options[0].outfiles, ',') # start for filecount in range(len(infiles)): logger.info("Start sorting file %d: %s\n ..." % (filecount, infiles[filecount])) # outfile name if not outfiles or (len(outfiles) < filecount + 1): split_result = infiles[filecount].rsplit('.', 1) outfiles.append(split_result[0] + '_sorted.' + split_result[1]) # check outfile name if outfiles[filecount] in infiles: # never overwrite infiles logger.error( 'Overwrite existing infile is not allowed: %s.' % infiles[filecount]) # exc_type, exc_value, exc_traceback = sys.exc_info() logger.error('The original problem occured here: %s' % str(sys.exc_info())) raise IOError( 'Overwrite existing infile is not allowed: %s.' % infiles[filecount]) elif isfile(outfiles[filecount]): # ask to overwrite if oufile already exists print(' You are going to overwrite the file %s.' % outfiles[filecount]) print(' Do you really want to continue?') go_on = str( input( ' press Enter to continue or any key to break\n') ) if go_on: print('stopped by user') continue # sorting mts_cfg = MtsConfig(infiles[filecount], outfiles[filecount], logger) mts_cfg.sort(cmd_options[0].mode) # done logger.info("Done.")
class GenReport(object): """ generate pdf report and excel table """ def __init__(self): self.__report_level = None self.__report_type = None self.__testrun_id = None self.__reftest_id = None self.__outfile = None self.__db_gbl = None self.__db_val = None self.__db_cat = None self.__dbfile = None self.__dbtech = None self.__masterdbdrv = None self.__masterdbdsn = None self.__masterdbdbq = None self.__masterdbuser = None self.__masterdbpassword = None self.__masterdbschemaprefix = None self.__db_connector = None self.__logger = Logger(self.__class__.__name__, level=MODE) self.excel_header = [] # addon for testing this script: self.__val_obs_name = "UNIT_TEST_DEMO_TYPE" self.__coll_id = 0 self.__coll_name = 'TestResultsAPIDemo' self.__meas_ids = [] def __parse_arguments(self): """ get user options usage: gen_report.py [-h] [-m | -d | -a] [-f DBFILE | -t SENSOR_TECH | -u MASTERDB_USER] [-p MASTERDB_PASSWORD] [-c MASTERDB_SCHEMAPREFIX] [-b MASTERDB_DSN | -q MASTERDB_DBQ] testrun_id out_file """ opts = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) # mandatory settings: opts.add_argument('testrun_id', type=str, help='testrun id as stored in val-db') opts.add_argument( 'out_file', type=str, help='path/name of report file to generate (*.xls or *.pdf)') opts.add_argument( '-r', '--reftest_id', dest='reftest_id', type=str, help='reference test id as in val-db for regression report') # optional: set report type: level of details [-m|-d|-f] sel_type = opts.add_mutually_exclusive_group() sel_type.add_argument( '-m', '--management', dest='rep_type', action='store_const', const=AlgoTestReport.REP_MANAGEMENT, help='generate management report (no details, no errorlists)') sel_type.add_argument( '-d', '--detailed', dest='rep_type', action='store_const', const=AlgoTestReport.REP_DETAILED, help='generate detailed report (default: details, errorlists)') sel_type.add_argument( '-a', '--all', dest='rep_type', action='store_const', const=AlgoTestReport.REP_DEVELOPER, help='generate all chapters for developer report') # database settings - [-f|[-u,-p,-c,[-b|-q]] db_set = opts.add_argument_group('db settings', 'select either SqLite or Oracle') db_opts = db_set.add_mutually_exclusive_group() db_opts.add_argument("-f", "--dbfile", dest="dbfile", help="The name of the SQlite database file. ") db_opts.add_argument( "-t", "--techname", dest="dbtech", help="Oracle sensor tech schema name like ARS4XX, MFC4XX or VGA") db_opts.add_argument("-u", "--master-db-user", dest="masterdb_user", type=str, help="The name of the oracle database user.") db_conn = opts.add_argument_group('oracle db', '') db_conn.add_argument("-p", "--master-db-password", dest="masterdb_password", type=str, help="The name of the oracle database password.") db_conn.add_argument( "-c", "--master-db-schema-prefix", dest="masterdb_schemaprefix", type=str, default=DB_MASTER_SCHEMA_PREFIX, help="The name of the oracle database schema prefix.") dbtype = db_conn.add_mutually_exclusive_group() dbtype.add_argument("-b", "--master-db-dsn", dest="masterdb_dsn", help="The name of the DSN, opt.") dbtype.add_argument("-q", "--master-db-dbq", dest="masterdb_dbq", help="The name of the DBQ, default: %s" % DB_MASTER_DBQ) args = opts.parse_args() # default report type: detailed if args.rep_type is None: args.rep_type = AlgoTestReport.REP_DETAILED self.__report_level = args.rep_type self.__testrun_id = args.testrun_id self.__reftest_id = args.reftest_id self.__outfile = args.out_file ext = opath.splitext(args.out_file) if '.xlsx' == ext[1]: self.__report_type = EXCEL_REPORT elif ext[1] == '.pdf': self.__report_type = PDF_REPORT else: self.__logger.error( 'wrong output file extension! Use "*.xlsx" or ".pdf" only!') sexit(ERROR) # db settings if not args.masterdb_dsn and not args.masterdb_dbq: args.masterdb_dbq = DB_MASTER_DBQ if args.dbfile is not None: self.__dbfile = args.dbfile elif args.dbtech is not None: self.__dbtech = args.dbtech elif args.masterdb_user is not None: self.__masterdbdsn = args.masterdb_dsn self.__masterdbdbq = args.masterdb_dbq self.__masterdbuser = args.masterdb_user self.__masterdbpassword = args.masterdb_password self.__masterdbschemaprefix = args.masterdb_schemaprefix else: self.__logger.error( 'no connection to Result DB specified,' ' enter either sqlite file or DB connection settings (-u -p -c)!' ) sexit(ERROR) if args.reftest_id: self.__logger.info( 'generate Regression Test report with reference test id %s' % args.reftest_id) return def _initialize(self): """ parse arguments, establish connection """ self.__parse_arguments() if release() == "XP": self.__masterdbdrv = db_common.DEFAULT_MASTER_DRV_XP else: self.__masterdbdrv = db_common.DEFAULT_MASTER_DRV if self.__dbfile is None and self.__dbtech is None: conn_str = "DBQ={};Uid={};Pwd={}".format(self.__masterdbdbq, self.__masterdbuser, self.__masterdbpassword) elif self.__dbtech is not None: conn_str = self.__dbtech else: conn_str = self.__dbfile self.__db_val = val.BaseValResDB(conn_str) self.__db_cat = cat.BaseRecCatalogDB(self.__db_val.db_connection) self.__db_gbl = gbl.BaseGblDB(self.__db_val.db_connection) def _terminate(self): """ close database connections """ self.__db_val.close() self.__db_cat.close() self.__db_gbl.close() def gererate_report(self): """ generate the pdf and excel report, main method """ self._initialize() if self.__report_level == AlgoTestReport.REP_MANAGEMENT: self.__logger.info('generate management report for TestRun %s' % self.__testrun_id) if self.__report_level == AlgoTestReport.REP_DETAILED: self.__logger.info('generate detailed report for TestRun %s' % self.__testrun_id) if self.__report_level == AlgoTestReport.REP_DEVELOPER: self.__logger.info( 'generate full developer report for TestRun %s, all chapters' % self.__testrun_id) testrun = TestRun() testrun.Load(self.__db_val, self.__db_gbl, self.__db_cat, self.__testrun_id) # for testing CR 220008 before saving of RuntimeJob is implemented # testrun.AddRuntimeJob(3988) # 3988: 5/0/0 3445:66/66/67 # testrun.AddRuntimeJob(3445) reftest = None if self.__reftest_id: reftest = TestRun() if reftest.Load(self.__db_val, self.__db_gbl, self.__db_cat, self.__reftest_id) is False: self.__logger.error( '!! Reference Testrun not found with id: %s !!' % self.__reftest_id) self.__logger.error( 'Generating normal report instead Regression Test Report!') reftest = None if testrun is not None: for testcase in testrun.GetTestcases(): testcase.Load(self.__db_val, self.__db_gbl, self.__db_cat, testrun.GetId(), level=ValSaveLoadLevel.VAL_DB_LEVEL_ALL) for job in testrun.runtime_details: job.LoadHpcIncidents() if reftest: for testcase in reftest.GetTestcases(): testcase.Load(self.__db_val, self.__db_gbl, self.__db_cat, reftest.GetId(), level=ValSaveLoadLevel.VAL_DB_LEVEL_ALL) if self.__report_type == PDF_REPORT: self.generate_pdf(testrun, self.__outfile, reftest) elif self.__report_type == EXCEL_REPORT: self.generate_excel(testrun, self.__outfile) return def generate_pdf(self, testrun, outfile, reftest=None): """ generate pdf report as specified in call options :param testrun: testrun as loaded from ResultDB :type testrun: `TestRun` """ report = AlgoTestReport(testrun, reftest) report.build(outfile, level=self.__report_level) def generate_excel(self, testrun, outfile): """ generate excel report table as specified in call options :param testrun: TestRun Id as in resultDb :type testrun: int :param outfile: path and filename of the report file :type outfile: str """ # init the excel stuff try: unlink(outfile) except Exception: # pylint: disable=W0703 pass xls = Excel() xls.create_workbook() xls.create_worksheet('testruns') # str(testrun.name)) # insert table header self.excel_header = [ '_TestID', '_TestDate', '_TesterName', '_TestResult', '_test_status', '_FR_Number' ] column_widths = [30, 15, 15, 30, 15, 30] for iheader in xrange(len(self.excel_header)): xls.set_cell_value(1, iheader + 1, self.excel_header[iheader]) xls.set_cell_font_style(1, iheader + 1, bold=True) xls.set_column_width(1, iheader + 1, column_widths[iheader]) row = 2 row = self.__add_excel_testrun_rows(xls, row, testrun) xls.set_format(1, 1, row_to=row, col_to=len(self.excel_header), wrap_text=True) try: xls.save_workbook(outfile) print("Test run successfully exported to '%s'" % outfile) except Exception: # pylint: disable=W0703 print(":-( couldn't save the workbook to '%s'" % outfile) xls.close_workbook() def __add_excel_testrun_rows(self, xls, row, testrun): """ fill rows for a test run, can be called recursively for child testruns template: ['_TestID', '_Test Date', '_TesterName', '_Test Result', '_test_status', '_FR_Number'] :param xls: Excel workbook :type xls: `Excel` as in stk.rep.excel :param row: start row for this test run :type row: int :param testrun: (child-) test run :type testrun: `TestRun` :returns: current (next empty) row :rtype: int """ # test run line: xls.set_data([testrun.name], row, 1) xls.set_cell_color(row, 1, row, len(self.excel_header), 'Light Orange') row += 1 # go through test run childs for trun in testrun.GetChildTestRuns(): row = self.__add_excel_testrun_rows(xls, row, trun) # go through test cases for tcase in testrun.GetTestcases(inc_child_tr=False): tc_result = tcase.test_result xls.set_data([tcase.id, '', '', '', tc_result], row, 1) xls.set_cell_color(row, 1, row, len(self.excel_header), 'Light Yellow') row += 1 # go trough test steps for tstep in tcase.GetTestSteps(): # todo: JHo add tstep.asmt.userid, tstep.asmt.date and # change last col to asmt.info xls.set_data([ str(tstep.id), tstep.date, tstep.user_account, str(tstep.meas_result), tstep.test_result, tstep.issue ], row, 1) row += 1 return row
class Valf(object): """ class defining methods to easily start validation suites by calling a python script without additional option settings (double click in win) mandatory settings: - outputpath (as instantiation parameter) - config file with `LoadConfig` - sw version of sw under test with `SetSwVersion` see `__init__` for additional options returns error level:: RET_VAL_OK = 0 suite returned without error RET_GEN_ERROR = -1 general error RET_SYS_EXIT = -2 sys.exit called RET_CFG_ERROR = -3 error in direct settings or configuration file **Example:** .. python:: # Import valf module from stk.valf import valf # set output path for logging ect., logging level and directory of plugins (if not subdir of current HEADDIR): vsuite = valf.Valf(getenv('HPCTaskDataFolder'), 10) # logging level DEBUG, default level: INFO # mandatory: set config file and version of sw under test vsuite.LoadConfig(r'demo\\cfg\\bpl_demo.cfg') vsuite.SetSwVersion('AL_STK_V02.00.06') # additional defines not already set in config files or to be overwritten: vsuite.SetBplFile(r'cfg\\bpl.ini') vsuite.SetSimPath(r'\\\\Lifs010.cw01.contiwan.com\\data\\MFC310\\SOD_Development') # start validation: vsuite.Run() :author: Joachim Hospes :date: 29.05.2013 """ def __init__(self, outpath, *args, **kwargs): """ initialise all needed variables and settings - creates/cleans output folder - start process manager - start logging of all events, therefore the output path must be given :param outpath: path to output directory, can be relative to calling script :type outpath: str :param args: additional argument list which are also covered by keywords in order of occurrence :keyword logging_level: level of details to be displayed. default: info (10=debug, 20=info, 30=warning, 40=error, 50=critical, 60=exception) :type logging_level: int [10|20|30|40|50] :keyword plugin_search_path: default: parent dir of stk folder, normally parallel to validation scripts :type plugin_search_path: str :keyword clean_folder: default ``True``, set to ``False`` if the files in output folder should not be deleted during instantiation of Valf :type clean_folder: bool :keyword logger_name: name of logger is used for logfile name and printed in log file as base name, if not set name/filename of calling function/module is used :type logger_name: str :keyword fail_on_error: Switch to control exception behaviour, if set exceptions will be re-thrown rather than omitted or logged. :type fail_on_error: bool :keyword deprecations: set me to False to remove any deprecation warning outputs inside log :type deprecations: bool """ self.__version = "$Revision: 1.6 $" self._uncrepl = UncRepl() self.__data_bus_names = [ ] # store all names of generated data busses like bus#0 self.__process_mgr = None opts = arg_trans( [['logging_level', INFO], ['plugin_search_path', None], ['clean_folder', True], ['logger_name', None], ['fail_on_error', False], ['deprecations', True]], *args, **kwargs) self._fail_on_error = opts['fail_on_error'] # prep output directory: create or clear content outpath = self._uncrepl(opath.abspath(outpath)) clear_folder(outpath, opts['clean_folder']) logger_name = opts['logger_name'] if logger_name is None: # get name of calling module frm = currentframe().f_back # : disable=W0212 if frm.f_code.co_filename: logger_name = opath.splitext( opath.basename(frm.f_code.co_filename))[0] else: logger_name = 'Valf' # start logger, first with default level, idea for extension: can be changed later self.__logger = Logger(logger_name, opts['logging_level'], filename=opath.join(outpath, logger_name + ".log")) self.__logger.info("Validation started at %s." % strftime('%H:%M:%S', localtime(time()))) self.__logger.info("Validation based on %s STK %s-%s of %s, CP: %s." % ("original" if stk_checksum(True) else "adapted", RELEASE, INTVERS, RELDATE, MKS_CP)) self.__logger.info("Logging level is set to %s." % next( i for i, k in LEVEL_CALL_MAP.items() if k == opts['logging_level'])) self.__logger.info("Validation arguments have been:") for k, v in opts.iteritems(): self.__logger.info(" %s: %s" % (k, str(v))) if not opts['deprecations']: self.__logger.warning( "Deprecation warnings have been switched off!") DeprecationUsage().status = False # find all observers down current path plugin_search_path = opts['plugin_search_path'] plugin_folder_list = [] if plugin_search_path is None: plugin_search_path = [HEAD_DIR] # take care of fast connections plugin_search_path = [self._uncrepl(i) for i in plugin_search_path] for spath in plugin_search_path: plugin_folder_list.extend([ dirPath for dirPath in list_folders(spath) if "\\stk\\" not in dirPath ]) # left over from testing??? found in vers.1.14, introduced in 1.6 # else: # print folder_path self.__logger.info('added to plugin search path:' + spath) # and add all observers down calling script's path stk_plugins = [ opath.join(HEAD_DIR, "stk", "valf"), opath.join(HEAD_DIR, "stk", "valf", "obs"), opath.join(HEAD_DIR, "stk", "val") ] plugin_folder_list.extend(plugin_search_path) for spath in stk_plugins: plugin_folder_list.append(spath) self.__logger.debug('added to plugin search path:' + spath) # start process manager try: self.__process_mgr = ProcessManager(plugin_folder_list, self._fail_on_error) except: # pylint: disable=W0702 self.__logger.exception( "Couldn't instantiate 'ProcessManager' class.") if self._fail_on_error: raise sys.exit(RET_GEN_ERROR) self.__process_mgr.set_data_port(OUTPUTDIRPATH_PORT_NAME, outpath) self.__logger.debug("OutputDirPath: '%s'" % outpath) # set still needed default settings as have been in valf.main self.SetMasterDbPrefix(DEFAULT_MASTER_SCHEMA_PREFIX) self.SetErrorTolerance(ERROR_TOLERANCE_NONE) # should be activated some day, for now not all validation suites can be parallelised # if set on default we should invent a method DeactivateHpcAutoSplit to run the remaining or old suites # self.SetDataPort("HpcAutoSplit", True, "Global") def _check_mandatory_settings(self): """ private method check if additional mandatory settings are done does not run complete sanity check for config, here we just check additional mandatory settings that do not prevent the validation to run if they are missing e.g. no test if db connection is defined for cat reader, if not set cat reader will stop the initialisation :return: number of missing settings, 0 if settings completed :rtype: integer """ error_cnt = 0 if self.GetDataPort("SWVersion", "Global") is None: self.__logger.error("version of test sw not defined!") error_cnt += 1 if (self.GetDataPort("HpcAutoSplit", "Global") is True and self.GetDataPort("SimSelection", "Global") is not None): self.__logger.error( "DataPort 'SimSelection' used by HPC, not available if 'HpcAutoSplit' is active!" ) self.__logger.error( "Set either 'HpcAutoSplit' to False or don't set 'SimSelection'!" ) error_cnt += 1 return error_cnt def _set_hpc_selection(self): """ private method if the start script is running as HPC task on an HPC machine then set SimSelection to use only the entry given by the task number. e.g. for HPC task003: set SimSelection to [2] """ # check HPC usage if self.GetDataPort("HpcAutoSplit", "Global") is True: task_name = getenv("TaskName") try: # T0000x task ids start with 1, bpl list index with 0 task_id = int(match(r'T(\d+)', str(task_name)).group(1)) - 1 except AttributeError: self.__logger.exception( "can't set Hpc Auto Split value as HPC environment variable Task Id" " is empty or not valid: %s" % task_name) if self._fail_on_error: raise sys.exit(RET_CFG_ERROR) self.__logger.info( "HpcAutoSplit: using entry %d of the sim collection" % task_id) self.SetDataPort("SimSelection", "[%d]" % task_id, "Global") def LoadConfig(self, filepath): # pylint: disable=C0103 """ load configuration from path/filename, path can be relative to calling script Valid configuration properties are: - version: string defining version of config file, added to dict on port "ConfigFileVersions" - ClassName: quoted string to determine observer class to include in run (not in section "Global") - PortOut: list of port values (quoted strings) which should be exported to given bus name - InputData: pythonic list of tuples/lists which are taken and given as input for observer to be configured - ConnectBus: list of bus names to connect / register observer to (first one is taken actually) - Active: True/False value weather observer should be enabled or not - include: file (quoted) to include herein, chapter should be repeated there, if include is used within global scope, all chapters from included file are used config file example:: # valf_basic.cfg # config for testing Valf class, based on valf_demo settings, [Global] ; version string will be added to dict on port "ConfigFileVersions": version="$Revision: 1.6 $" ;PortOut: Informs the name of the port that are set by the component PortOut=["ProjectName", "SWVersion", "FunctionName", "Device_Prefix"] ;InputData: Declares all input parameters InputData=[('ProjectName', 'VALF-test'), ('FunctionName', 'STK_moduletest'), ('SimName', 'N/A'), ('Multiprocess', True ), ('ValName', 'N/A')] ;ConnectBus: Specifies the bus connect to the component ConnectBus=["Global"] ; db connection is needed for the catalog reader only, **deactivated** here!! ; connection parameters passed to validation_main.py as options because it will differ for projects [DBConnector] ClassName="DBConnector" InputData=[("UseAllConnections", "True")] PortOut=[ "DataBaseObjects"] ConnectBus=["DBBus#1"] Active=False ;Order: Specifies the calling order Order=0 ; bpl reader can be used to read simulation results, but in future the cat_reader should be used ; to test the difference switch Activate setting for BPLReader and CATReader [VALF_BPL_test] ClassName="BPLReader" PortOut=["CurrentMeasFile", "CurrentSimFile"] InputData=[("SimFileExt", "bin")] ConnectBus=["bus#1"] ; read additional config file data for this section, can overwrite complete setting before ; so e.g. InputData needs to list all input values, ; the values from include-cfg are not added but replace former set! Include="..\..\..\04_Test_Data\01a_Input\valf\valf_include_VALF_BPL_test.cfg" Active=True ;Order: Specifies the calling order Order=1 ; cat reader needs db connector to setup connection to catalog db! [VALF_CAT_REF] ClassName="CATReader" PortOut=[ "CurrentMeasFile", "CurrentSimFile"] InputData=[("SimFileExt", "bsig"),("SimFileBaseName", "") ] ConnectBus=["Bus#1"] Active=False Order=1 general used ports on bus ``Global`` (set by `ProjectManager`): - set "ConfigFileVersions" dict with file name as key and version as value for each loaded config file - read "FileCount" to show progress bar - read "IsFinished" to continue with next state when all sections of a recording are validated (set by `SignalExtractor`) Also setting ports as defined in ``InputData`` for the named bus. usage (example): .. python:: from stk.valf import Valf vrun = stk.valf.Valf() vrun.load_config(r'conf/validation.cfg') :param filepath: path and filename of the config file to load :type filepath: string """ absfile = self._uncrepl(opath.abspath(filepath)) # preset of port ConfigFileName currently not supported!!! what was it used for?? # config_filename = self.__process_mgr.get_data_port(CFG_FILE_PORT_NAME) # if config_filename is None: # config_filename = absfile # else: # config_filename += ', ' + absfile self.__process_mgr.set_data_port(CFG_FILE_PORT_NAME, absfile) if self.__logger is not None: self.__logger.info("Using configuration file: '%s'" % absfile) try: if not self.__process_mgr.load_configuration(absfile): sys.exit(RET_CFG_ERROR) except ValfError: msg = 'Validation error during configuration load' if self.__process_mgr.last_config is not None: msg += (" (%s)" % self.__process_mgr.last_config) self.__logger.exception(msg) if self._fail_on_error: raise sys.exit(RET_SYS_EXIT) except SystemExit: msg = 'system exit by one module during configuration load' if self.__process_mgr.last_config is not None: msg += (" (%s)" % self.__process_mgr.last_config) self.__logger.exception(msg) self.__logger.error(msg) if self._fail_on_error: raise sys.exit(RET_SYS_EXIT) except: msg = "unexpected error (%s) during configuration load" % str( sys.exc_info) if self.__process_mgr.last_config is not None: msg += (" (%s)" % self.__process_mgr.last_config) self.__logger.exception(msg) self.__logger.exception(msg) if self._fail_on_error: raise sys.exit(RET_GEN_ERROR) def SetBplFile(self, filepath): # pylint: disable=C0103 """ set data port ``BplFilePath`` to path/filename of bpl file (.ini or .bpl) path can be relative to starting script, checks existence of file and stops in case of errors :param filepath: path/filename of batch play list :type filepath: string """ absfilepath = self._uncrepl(opath.abspath(filepath)) self.__logger.debug("BplFilePath: '%s'" % absfilepath) if filepath is not None and opath.isfile(absfilepath): self.__process_mgr.set_data_port(PLAY_LIST_FILE_PORT_NAME, absfilepath) else: self.__logger.error( "Missing mts batch play list: can not open bpl file '%s'" % absfilepath) sys.exit(RET_CFG_ERROR) def SetCollectionName(self, collection_name): # pylint: disable=C0103 """ set data port ``RecCatCollectionName`` giving the collection name of rec files in catalog db used by the cat reader to select the recording list for a project :param collection_name: name of the collection :type collection_name: string """ self.__process_mgr.set_data_port(COLLECTION_NAME_PORT_NAME, collection_name) self.__logger.debug("Rec file cataloge collection name is: '%s'" % collection_name) def SetDataPort(self, port_name, value, bus_name='Global'): # pylint: disable=C0103 """ set named valf data port at named bus with given value, can be repeated for different ports and bus names in general these ports should be set using the config file ``InputData`` entry! :param port_name: valf data port name, not case sensitiv :type port_name: string :param value: port value, type depends on port usage :type value: user defined :param bus_name: valf data bus name, default: ``Global``, not case sensitiv :type bus_name: string """ self.__process_mgr.set_data_port(port_name, value, bus_name) self.__logger.debug('valf script setting port "%s" :' % port_name + str(value)) def SetDbFile(self, filepath): # pylint: disable=C0103 """ set data port ``dbfile`` to define name of sqlite data base file to be used instead of oracle db checks existence of the file and raises an error if it's not readable :param filepath: path/name of the database file :type filepath: string """ database_filename = self._uncrepl(opath.abspath(filepath)) if not opath.exists(database_filename): self.__logger.error("defined db file '%s' not found" % database_filename) sys.exit(RET_CFG_ERROR) self.__process_mgr.set_data_port(DB_FILE_PORT_NAME, database_filename, 'DBBus#1') def SetErrorTolerance(self, tolerance): # pylint: disable=C0103 """ set data port ``ErrorTolerance`` to a value as defined in `db_commmon` :param tolerance: error tolerance value :type tolerance: integer """ self.__process_mgr.set_data_port(ERROR_TOLERANCE_PORT_NAME, tolerance, "Bus#1") @deprecated() def SetMasterDbDbq(self, dbq): # pylint: disable=C0103 """ set data port "masterdbdbq" (name defined in `valf.db_connector`) to given name default value defined in db.db_common by DEFAULT_MASTER_DBQ :param dbq: data base qualifier for oracle data bases :type dbq: string :note: don't use together with DSN setting """ self.__process_mgr.set_data_port(MASTER_DB_DBQ_PORT_NAME, dbq, "DBBus#1") @deprecated() def SetMasterDbDsn(self, dsn): # pylint: disable=C0103 """ set data port ``masterdbdsn`` (name defined in `valf.db_connector`) to given name default value defined in db.db_common by DEFAULT_MASTER_DSN :param dsn: data source name for odbc interface connections :type dsn: string :note: don't use together with DBQ setting """ self.__process_mgr.set_data_port(MASTER_DB_DSN_PORT_NAME, dsn, "DBBus#1") def SetMasterDbUser(self, user): # pylint: disable=C0103 """ set data port ``masterdbuser`` (name defined in `valf.db_connector`) to given name :param user: name of data base user :type user: string """ self.__process_mgr.set_data_port(MASTER_DB_USR_PORT_NAME, user, "DBBus#1") def SetMasterDbPwd(self, passwd): # pylint: disable=C0103 """ set data port ``masterdbpassword`` (name defined in `valf.db_connector`) to given name :param passwd: password for data base user :type passwd: string """ self.__process_mgr.set_data_port(MASTER_DB_PW_PORT_NAME, passwd, "DBBus#1") def SetMasterDbPrefix(self, prefix): # pylint: disable=C0103 """ set data port ``masterdbschemaprefix`` (name defined in `valf.db_connector`) to given name :param prefix: schema prefix for data base table :type prefix: string """ self.__process_mgr.set_data_port(MASTER_DB_SPX_PORT_NAME, prefix, "DBBus#1") def SetSimPath(self, pathname, bus_name="Bus#1"): # pylint: disable=C0103 """ set data port ``SimOutputPath`` at named bus (default:``Bus#0``) to given path where measurement files are stored checks if path exists and raises an `ValfError` if not for historical reasons the bus_name is set as default to ``bus#0`` make sure your config sets the similar busses for bpl/cat reader(s)! :param pathname: absolute path where simulation result files are stored :type pathname: string :param bus_name: data bus name of the bpl/cat reader, default ``bus#0``, not case sensitiv :type bus_name: string """ pathname = self._uncrepl(pathname) if opath.exists(pathname): self.__process_mgr.set_data_port(SIM_PATH_PORT_NAME, pathname, bus_name) self.__logger.debug( "Setting input data. [ Bus='{0}', " "PortName='SimOutputPath', PortValue={1}]".format( bus_name, pathname)) if bus_name not in self.__data_bus_names: self.__data_bus_names.append(bus_name) self.__process_mgr.set_data_port(DATA_BUS_NAMES, self.__data_bus_names) else: exception_msg = "Sim Output folder providing bsig/csv files does not exist:\n" +\ "{}\nPlease check your setup".format(pathname) self.__logger.exception(exception_msg) raise ValfError(exception_msg) def SetSwVersion(self, version): # pylint: disable=C0103 """ set data port ``SWVersion`` to given value currently mandatory setting!! :param version: sw version of sw under test :type version: string """ self.__process_mgr.set_data_port(SWVERSION_PORT_NAME, version) def SetRefSwVersion(self, version): # pylint: disable=C0103 """ set data port ``SWVersion_REG`` to given value (optional) :param version: sw version of regression sw under test :type version: string """ self.__process_mgr.set_data_port(SWVERSION_REG_PORT_NAME, version) def SetSaveResults(self, saveit=True): # pylint: disable=C0103 """ set data port ``SaveResultInDB`` to given value (optional) :param saveit: Save the results into the database, default = True :type saveit: boolean """ self.__process_mgr.set_data_port(SAVE_RESULT_IN_DB, saveit) def GetDataPort(self, port_name, bus_name='Global'): # pylint: disable=C0103 """ get named valf data port at named bus, can be repeated for different ports and bus names :param port_name: valf data port name, not case sensitiv :type port_name: string :param bus_name: valf data bus name, default: ``Global``, not case sensitiv :type bus_name: string :return: port data :rtype: undefined """ return self.__process_mgr.get_data_port(port_name, bus_name) def ActivateHpcAutoSplit(self): # pylint: disable=C0103 r""" activate auto splitting of bpl/cat list on HPC Running on HPC a validation can run in parallel on several tasks. This method sets data port ``HpcAutoSplit`` to ``True`` so each validation suite running on one task/machine only reads the sim results of one recording:: bpl / cat list HPC TaskID ---------------------- ---------- recording_entry_0.rec T00001 recording_entry_1.rec T00002 recording_entry_2.rec T00003 ... ... **The tasks must be created during job submit,** this is not done by Valf!! Example to create an own task for each bpl entry: .. python:: # Create the Validation Tasks reclist = bpl.Bpl(BPL_FILE).read() task = hpc.TaskFactory(job) for rec in reclist: task.create_task(r"D:\data\%JobName%\1_Input\valf_tests\custom\demo\run_valf_demo_bpl.py") """ self.SetDataPort(HPC_AUTO_SPLIT_PORT_NAME, True, 'global') def Run(self): """ start the validation after all needed preparations :return: success or error value during validation run :rtype: error codes: RET_VAL_OK = 0 RET_GEN_ERROR = -1 RET_SYS_EXIT = -2 RET_CFG_ERROR = -3 """ if LooseVersion(sqlite_version) <= LooseVersion(MIN_SQLITE_VERSION): self.__logger.error( "error in setup: please update your sqlite3.dll!\n" "Just call batch script listed on Validation wiki -> needed tools." ) sys.exit(RET_CFG_ERROR) if self._check_mandatory_settings() is not 0: self.__logger.error("error in setup: mandatory settings missing") sys.exit(RET_CFG_ERROR) tstart = time() self._set_hpc_selection() try: ret_val = self.__process_mgr.run() except Exception: self.__logger.exception("unexpected runtime error") if self._fail_on_error: raise sys.exit(RET_GEN_ERROR) if ret_val is not RET_VAL_OK: self.__logger.error( "runtime error in validation suite, error level %d" % ret_val) self.__logger.info("Test duration(hh:mm:ss): " + strftime('%H:%M:%S', gmtime(time() - tstart))) self.__logger.info("Logging statistics: " + ", ".join([ "%s: %d" % (k, v) for k, v in self.__logger.get_statistics().items() if v > 0 ])) print('val run ended with result', ret_val) return ret_val
class BplUpdate(object): r""" **Update existing bpl files with changes in catalog db collections** Class provides methods to - read a config, - find all bpl files in the subfolders - compare the bpl files with collections - create a new bpl file if needed - check in the changed files - update member revisions for changed files It returns an error code to be executed as scheduled task, error code '0' shows execution without problems. Additionally the status is logged to the file ``bpl_update.log`` in same path as the config file. see more details in module description `bpl_update.py` **usage example** (see also function `main`): .. python:: bpl_upd = BplUpdate(config_file) result = bpl_upd.update_directories() """ def __init__(self, config_file): """ read config and prepare update :param config_file: path/file name of config file :type config_file: string """ self.error_status = ERR_OK self.bpl_top_dir = dirname(config_file) self._logger = Logger('BplUpdate', INFO, join(self.bpl_top_dir, 'bpl_update.log')) self._config = self._read_config(config_file) self.db_conn = None self.cat_db = None # setup db connection, # explicitly set default values for parameters that don't set None as default in DBconnect # unused for now: error_tolerance=ERROR_TOLERANCE_NONE, use_cx_oracle=False if self._config.get('connection') is None: self._logger.error( 'No parameter "connection" in section "[db_connection]" of %s' % config_file) self.error_status = ERR_DB_CONNECTION_CONFIG else: try: connection = str(self._config.get('connection')) if connection.endswith('.sqlite'): connection = join(self.bpl_top_dir, connection) self.cat_db = BaseRecCatalogDB( connection) # self.db_conn.Connect(cat) except Exception as err: self.error_status = ERR_DB_CONNECTION_CONFIG self._logger.error( 'can not setup db connection with configured settings: %s\n%s' % (connection, err)) # get all bpl files in the top dir and all sub dirs self.bpl_dict = self.get_bpl_files() def _read_config(self, config_file, incl_sect=None): """ private method to read config, check some requirements and return dict with config :param config_file: path/file name to read :type config_file: string :param incl_sect : section name to include from other config file, for recursive calls :type incl_sect : string """ raw_config = RawConfigParser() try: raw_config.read(abspath(config_file)) except ParseError as err: self.error_status = ERR_CONFIG_FILE_READ self._logger.error(err) return {} section_names_list = raw_config.sections() if not len(section_names_list): self.error_status = ERR_CONFIG_FILE_CONTENT self._logger.error( 'No sections defined in config file %s - min: [db_connection] and [collections].' % config_file) return {} include_section = section_names_list if incl_sect is None else incl_sect include_config = [] sections_list = OrderedDict() try: for section_name in section_names_list: # don't import if not inside specific chapter if section_name not in include_section: continue # sections_list[section_name] = {} try: include = raw_config.get(section_name, "include").strip('"\' ') if len(include): include_config.append([include, section_name]) except ParseError: pass if section_name == "db_connection": sections_list["connection"] = eval( raw_config.get(section_name, "connection")) elif section_name == 'collections': sections_list["update_list"] = eval( raw_config.get(section_name, 'update_list')) elif section_name == 'mks_settings': if raw_config.has_option('mks_settings', 'task_id'): sections_list['task_id'] = raw_config.get( section_name, 'task_id') # iterate through additional configs from includes now for inc in include_config: if not isabs(inc[0]): inc[0] = join(dirname(config_file), inc[0]) incl_lst = self._read_config(inc[0], inc[1]) for incl_sct in incl_lst: if incl_sct not in sections_list: sections_list[incl_sct] = incl_lst[incl_sct] else: sections_list[incl_sct].update(incl_lst[incl_sct]) except ParseError as err: self.error_status = ERR_CONFIG_FILE_CONTENT self._logger.error('Parse error during config file reading:\n %s' % err) return sections_list def get_bpl_files(self): """ find all bpl files starting from set directory :return: dict { 'basename': {'path': relpath, 'status': 'old'}} """ bpl_files = {} for root, _, files in walk(self.bpl_top_dir): for bpl_file in files: if splitext(bpl_file)[1] != '.bpl': continue bpl_path = relpath(root, self.bpl_top_dir) # print r'found file %s\%s' % (bpl_path, bpl_file) bpl_file_name = str(splitext(bpl_file)[0]).lower() bpl_files[bpl_file_name] = { 'path': bpl_path, 'filename': join(root, bpl_file), 'status': 'old' } return bpl_files @staticmethod def compare_col_bpl(col_recs, bpl_list): """ compare rec files in passed lists :param col_recs: all recording names of a collection :type col_recs: list of names :param bpl_list: all rec files in batch play list :type bpl_list: `BplList` - list of `BplListEntries` with 'filepath' and sectionlist :return: True if similar lists """ # first check length if len(col_recs) != len(bpl_list): return False # then check if all bpl entries have matching collection entry bpl_rec_names = [r.filepath for r in bpl_list] for rec in bpl_rec_names: if rec not in col_recs: return False return True def create_fct_dir(self, col_name): """ create the directory for the function named in the collection based on the current dir bpl_top_dir :param col_name: name of the collection :type col_name: string :return: name of function """ if len(col_name.split('_')) > 1: funct = col_name.split('_')[1] else: funct = '' # prep: create path if needed bpl_path = join(self.bpl_top_dir, funct) if not exists(bpl_path): makedirs(bpl_path) return funct def generate_bpl_file(self, col_name, rec_list): """ generate a bpl file for a given collection uses existing connection to cat db and creates a bpl file with: - file name like collection name - in a folder named after the function coded in collection name <project>_<function>_<param> a missing folder is also generated starting at current bpl_top_dir :param col_name: name of collection listing the recordings :type col_name: string :param rec_list: list of recordings :type rec_list: list :return: path/file name of generated file """ dir_name = self.create_fct_dir(col_name) bpl_file_name = join(self.bpl_top_dir, dir_name, col_name + '.bpl') # make sure this file is not locked by mks or whatever if isfile(bpl_file_name): chmod(bpl_file_name, S_IWUSR) bpl_writer = Bpl(str(bpl_file_name)) for rec in rec_list: bpl_writer.append(BplListEntry(rec)) bpl_writer.write() return bpl_file_name def update_directories(self): """run through all subfolders and update existing bpl files """ # get all collections to update # for each collection: collections = self._config.get('update_list') for col_name in collections: # print 'search for collection "%s"' % col_name try: _ = self.cat_db.get_collection_id(col_name) except AdasDBError as db_err: self._logger.warning(db_err) self.error_status = ERR_DB_COLL_MISSING continue # get directory for function fct_name = self.create_fct_dir(col_name) # create the new bpl file bpl_file_name_new = join(self.bpl_top_dir, fct_name, col_name + '_new.bpl') try: self.cat_db.export_bpl_for_collection(col_name, bpl_file_name_new, True, True) except AdasDBError as err: self._logger.error('problems writing bpl file %s:\n%s' % (bpl_file_name_new, err)) self.error_status = ERR_BPL_FILE_CREATION continue # compare the new bpl file with an existing one (if there is one) bpl_file_name = join(self.bpl_top_dir, fct_name, col_name + '.bpl') if isfile(bpl_file_name): same = fcmp(bpl_file_name, bpl_file_name_new) if not same: self._logger.info('update bpl file %s for collection %s' % (bpl_file_name, col_name)) chmod(bpl_file_name, S_IWUSR) remove(bpl_file_name) rename(bpl_file_name_new, bpl_file_name) self.bpl_dict[col_name.lower()]['status'] = 'updated' else: self._logger.info('bpl for collection "%s" up to date' % col_name) remove(bpl_file_name_new) self.bpl_dict[col_name.lower()]['status'] = 'match' else: # bpl file didn't exist before self.bpl_dict[col_name.lower()] = { 'status': 'new', 'filename': join(self.bpl_top_dir, col_name + '.bsig') } rename(bpl_file_name_new, bpl_file_name) self._logger.info( 'created new bpl file "%s" for collection %s' % (bpl_file_name, col_name)) # check if collections are removed but bpl files exist for that collection # and list bpl files that have no matching collections all_col_names = self.cat_db.get_all_collection_names() for bpl_name in [ b.lower() for b in self.bpl_dict if self.bpl_dict[b]['status'] == 'old' ]: bpl_file_name = relpath(self.bpl_dict[bpl_name]['filename'], self.bpl_top_dir) if bpl_name in all_col_names: self.bpl_dict[bpl_name]['status'] = 'rem_col?' self._logger.warning( 'collection removed from config? - file %s has matching collection "%s"' % (bpl_file_name, bpl_name)) else: self.bpl_dict[bpl_name]['status'] = 'junk' self._logger.warning( 'found bpl file with no matching collection: %s' % bpl_file_name) # create table with all bpl update results with open(join(self.bpl_top_dir, 'bpl_update_result.csv'), 'w') as res_file: res_file.write('collection; status; bpl file\n') for bpl_name in self.bpl_dict: res_file.write(bpl_name + '; ' + self.bpl_dict[bpl_name]['status'] + '; ' + relpath(self.bpl_dict[bpl_name]['filename'], self.bpl_top_dir) + '\n') return self.error_status def checkin_updated_files(self): """ use internal bpl dict to check in all updated files :TODO: currently stk.mks.si does not return sufficient error messages checkin_updated_files() does not recognize errors during checkin/checkout """ # first check if bpl top dir contains a mks project file, make sure we have a sandbox error = ERR_OK task_id = self._config.get('task_id') if not task_id: self._logger.warning( 'no mks task configured, if the updates should be checked in define the "task_id" ' 'string in a config section "[mks_settings]"') return ERR_OK if not exists(join(self.bpl_top_dir, 'project.pj')): self._logger.error( 'bpl files not in a sandbox, can not find file project.pj with mks information.' ) return ERR_NO_SANDBOX mks = mks_si.Si() mks.setChangePackageId(task_id) for name in [ b.lower() for b in self.bpl_dict if self.bpl_dict[b]['status'] == 'updated' ]: print 'checking in %s' % self.bpl_dict[name]['filename'] try: if mks.co(self.bpl_dict[name]['filename']): error = ERR_CO_ERROR self._logger.error( 'can not check out %s: returned error %s' % (self.bpl_dict[name]['filename'], error)) continue except mks_si.SiException as err: self._logger.error('can not check out %s:%s' % (self.bpl_dict[name]['filename'], err)) error = ERR_CO_ERROR continue try: if mks.ci(self.bpl_dict[name]['filename'], 'modified by bpl_update tool'): error = ERR_CO_ERROR self._logger.error( 'check in problems with %s - returned error %s' % (self.bpl_dict[name]['filename'], error)) continue except mks_si.SiException as err: self._logger.error('check in problems with %s:%s' % (self.bpl_dict[name]['filename'], err)) error = ERR_CO_ERROR continue self._logger.info('update in mks for %s' % self.bpl_dict[name]['filename']) return error