class PluginManager(object):
    """
    class to search for pluging classes based on 'BaseComponentInterface'
    to be used as observer components

    can check for dublicated class names to throw an error if it finds one
    """
    def __init__(self, folder_path_list, cls):
        """
        initialise a new object, adds existing folders of folder_path_list to sys.path

        :param folder_path_list: list [] of folders to check recursively
        :param cls: base class of which to find subclasses
        """
        self._uncrepl = UncRepl()
        self.__folder_path_list = [
            self._uncrepl(fpl) for fpl in folder_path_list
        ]
        self.__cls = cls

        self.__logger = Logger(self.__class__.__name__)

        self.__folder_path_list = folder_path_list
        for folder_path in self.__folder_path_list:
            if folder_path not in sys.path:
                sys.path.append(folder_path)

    def __get_plugin_list(self, module_name_list):
        """
        returns list with plugins

        :param module_name_list: list of modules to search in
        :return: list of plugin classes

        """
        plugin_list = []

        for module_name in module_name_list:
            self.__logger.debug("Checking: %s.py..." % module_name)
            try:
                # use relative or absolute (for all stk modules) import method
                if isinstance(module_name, (list, tuple)):
                    module = __import__(module_name[0], globals(), locals(),
                                        module_name[1], 0)
                else:
                    module = __import__(module_name)
            except Exception as msg:
                self.__logger.warning(
                    "Couldn't import module '%s' due to '%s'" %
                    (str(module_name), str(msg)))
                continue

            # look through this dictionary for classes
            # that are subclass of PluginInterface but are not PluginInterface itself
            module_candidates = list(module.__dict__.items())

            for class_name, entry in module_candidates:
                if class_name == self.__cls.__name__:
                    continue

                if entry is None:
                    continue

                if str(entry).find("PyQt4") > -1:
                    continue

                try:
                    if issubclass(entry, self.__cls):
                        self.__logger.debug(
                            "Found plugin.[Module: '%s', Class: '%s']." %
                            (module_name, class_name))
                        plugin_list.append({"type": entry, "name": class_name})
                except TypeError:
                    # this happens when a non-type is passed in to issubclass. We
                    # don't care as it can't be a subclass of PluginInterface if
                    # it isn't a type
                    continue

        if len(plugin_list) > 0:
            return plugin_list

        return None

    def get_plugin_class_list(self, remove_duplicates=False):
        """searches stk path to find classes

        :param remove_duplicates: wether duplicates should be removed
        :return: list of classes
        """
        module_name_list = []
        for folder_path in self.__folder_path_list:
            try:
                file_list = os.listdir(folder_path)
            except OSError:
                continue

            # For all modules within the stk use absolute module path to
            # avoid problems with dublicate package names
            lst = []
            stk_found = False
            path = folder_path
            module_path = ""
            while stk_found is False:
                head, tail = os.path.split(path)

                if tail == '':
                    if head != '':
                        lst.insert(0, head)
                    break
                else:
                    lst.insert(0, tail)
                    path = head
                    if tail == 'stk':
                        stk_found = True
                        for p_k in lst:
                            module_path += p_k + "."

            for file_name in file_list:
                if file_name.endswith(".py") and not file_name.startswith(
                        "__") and not file_name.startswith("stk"):
                    module_name = file_name.rsplit('.', 1)[0]
                    if module_path == "":
                        module_name_list.append(module_name)
                    else:
                        # add stk path to module name
                        module_name_list.append(
                            [module_path + module_name, module_name])

        plugin_list = self.__get_plugin_list(module_name_list)
        if len(plugin_list) > 0:
            check_duplicates = self.__check_for_duplicate_classes(plugin_list)
            if check_duplicates == -1 and remove_duplicates is True:
                plugin_list = self.__remove_duplicate_classes(plugin_list)
                return plugin_list
            elif check_duplicates == 0:
                return plugin_list

        return None

    def __check_for_duplicate_classes(self, plugin_list):
        """ Check if there are any duplicates in the class list and throw an error if found.
        @param plugin_list: A list of the plugins found.
        @return: 0 for success and -1 if duplicate is found.
        """
        num_modules = len(plugin_list)
        for idx, module_name in enumerate(plugin_list):
            for i in range(idx + 1, num_modules):
                if module_name["name"] == plugin_list[i]["name"]:
                    self.__logger.error("Duplicate class name found: %s" %
                                        (module_name["name"]))
                    return -1
        return 0

    @staticmethod
    def __remove_duplicate_classes(plugin_list):
        """removes duplicate classes form plugin list
        """
        temp_mem = []
        copy_plugin_list = []

        for idx, module_name in enumerate(plugin_list):
            if module_name['name'] not in temp_mem:
                copy_plugin_list.append(plugin_list[idx])
                temp_mem.append(module_name['name'])

        return copy_plugin_list
Esempio n. 2
0
class ValEventList(object):
    """
    ValEventLoader Class - loads Event details from Database
    """
    def __init__(self, plugin_folder_list=None, ev_filter=None):
        """class for loading events form database

        :param plugin_folder_list: list of Plugin folders i.e. location where event class definition are located.
                               If folders are not provided or definition were not found by plugin manager
                               then typed class will be generated runtime inherited from `ValBaseEvent`.
                               **Pass this argument only if you have defined additional method.**
        :type plugin_folder_list: list
        :param ev_filter: Instance of Event Filter
        :type ev_filter: `ValEventFilter`
        """
        self._log = Logger(self.__class__.__name__)

        if plugin_folder_list is not None:
            self.__plugin_folders = plugin_folder_list
        else:
            self.__plugin_folders = None  # EVENT_PLUGIN_FOLDER_LIST
        self.__plugin_manager = None
        self.__event_types_list = None
        self.__event_list = []
        self.__event_inst_created = []
        self.__filter = ev_filter

    def __del__(self):
        """clean up
        """
        self.__event_list = []

    def _init_event_types(self, plugin_folders=None):
        """ Init the Plugin """
        new_plugin = False

        if plugin_folders is not None:
            new_plugin = True
            self.__plugin_folders = plugin_folders
        if self.__plugin_manager is None or new_plugin:
            if self.__plugin_folders is not None:
                self.__plugin_manager = PluginManager(self.__plugin_folders, ValBaseEvent)

        if self.__event_types_list is None and self.__plugin_folders is not None:
            self.__event_types_list = self.__plugin_manager.get_plugin_class_list(remove_duplicates=True)
        else:
            self.__event_types_list = []

    def Load(self, dbi_val, dbi_gbl, testrun_id, coll_id=None, meas_id=None,  # pylint: disable=C0103
             rd_id=None, obs_name=None, level=ValSaveLoadLevel.VAL_DB_LEVEL_BASIC,
             beginabsts=None, endabsts=None, asmt_state=None, filter_cond=None, plugin_folders=None, cons_key=None):
        """
        Load Events

        :param dbi_val: Validation Result Database interface
        :type dbi_val: `OracleValResDB` or `SQLite3ValResDB`
        :param dbi_gbl: Validation Global Database interface
        :type dbi_gbl: `OracleGblDB` or `SQLite3GblDB`
        :param testrun_id: Testrun Id as mandatory field
        :type testrun_id: Integer
        :param coll_id:  Not Used. It is useless to pass any values. This information is taken
                        from database using rd_id
        :type coll_id: Integer
        :param meas_id: Measurement Id load event only for specific recording
        :type meas_id: Integer
        :param rd_id: Result Descriptor Id as mandatory field
        :type rd_id: Integer or List
        :param obs_name: Not Used. It is useless to pass any values.
                        This information is taken from database with testrun_id
        :type obs_name: String
        :param level: Load Level to specify to which level the event data should be level
                      with following possibilities::

                        VAL_DB_LEVEL_STRUCT = Events
                        VAL_DB_LEVEL_BASIC = Events + Assessment
                        VAL_DB_LEVEL_INFO = Events + Assessment + Attribute
                        VAL_DB_LEVEL_ALL = Events + Assessment + Attribute + Image

        :type level: `ValSaveLoadLevel`
        :param beginabsts: Basic filter. Begin Absolute Time stamp i.e. Start of the events
        :type beginabsts: Integer
        :param endabsts: End Absolute Time stamp i.e. End of the events
        :type endabsts: Integer
        :param asmt_state: Assessment State
        :type asmt_state: String
        :param filter_cond: Advance filter feature which can filter events based on event attributes;
                            filter map name specified in XML config file of custom filters.
                            Please read documentation of `ValEventFilter` for more detail
        :param plugin_folders: The value passed in constructor overrules. It is useless to pass value
        :type plugin_folders: list
        :param cons_key: Constrain Key. Not used
        :type cons_key: NoneType
        """
        _ = coll_id
        _ = obs_name
        _ = asmt_state
        _ = plugin_folders
        _ = cons_key

        inc_asmt = False
        inc_attrib = False
        inc_images = False
        self.__event_list = []
        self.__event_inst_created = []
        unit_map = {}

        statement = None
        if filter_cond is not None:
            if self.__filter is not None:
                statement = self.__filter.Load(dbi_val, filtermap_name=filter_cond)
                if statement is None:
                    self._log.error("The map filter was invalid. Events will be loaded without filter")
                elif type(statement) is list:
                    self._log.debug("The map filter was found. Events will be loaded with filter")

        if rd_id is not None:
            rd_list = dbi_val.get_resuls_descriptor_child_list(rd_id)
            if len(rd_list) == 0:
                rd_list = [rd_id]
        else:
            return True

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_2:
            inc_asmt = True

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_3:
            inc_attrib = True
            unit_records = dbi_gbl.get_unit()
            for unit_entry in unit_records:
                unit_map[str(unit_entry[COL_NAME_UNIT_ID])] = unit_entry[COL_NAME_UNIT_NAME]

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_4:
            inc_images = True

        records, image_attribs = dbi_val.get_event_for_testrun(testrun_id, measid=meas_id, beginabsts=beginabsts,
                                                               endabsts=endabsts, rdid=rd_list, cond=None,
                                                               filt_stat=statement,
                                                               inc_asmt=inc_asmt, inc_attrib=inc_attrib,
                                                               inc_images=inc_images)
        col_list = records[0]
        records = records[1]
        self.__event_inst_created = {}
        self._init_event_types()

        while True:
            if len(records) <= 10000:
                self._prepare_events(dbi_val, records, col_list, image_attribs, unit_map,
                                     inc_asmt=inc_asmt, inc_attrib=inc_attrib, inc_images=inc_images)
                records = []
                break
            else:
                self._prepare_events(dbi_val, records[:10000], col_list, image_attribs, unit_map,
                                     inc_asmt=inc_asmt, inc_attrib=inc_attrib, inc_images=inc_images)
                del records[:10000]

        self.__event_inst_created = {}
        return True

    def _prepare_events(self, dbi_val, records, col_list, image_attribs, unit_map,
                        inc_asmt=True, inc_attrib=True, inc_images=True):
        """
        Prepare Event Object list by taking chunks for records from database

        :param dbi_val: DB interface to Validation Database
        :type dbi_val: OracleValResDB or  SQLite3ValResDB
        :param records: List of records as list of dict
        :type records: list
        :param col_list: Column List in records
        :type col_list: list
        :param image_attribs: Event Image attribute Id
        :type image_attribs: list
        :param unit_map: Unit map of Unit Id VS Unit Name
        :type unit_map: Dict
        :param inc_asmt: Flag to include Assessment in Event. Default True
        :type inc_asmt: Bool
        :param inc_attrib: Flag to include Event Attributes. Default True
        :type inc_attrib: Bool
        :param inc_images: Flag to include Event Attribute Images. Default True
        :type inc_images: Bool
        """
        event = ValBaseEvent()  # fix pylint problem, event will be set properly later
        if len(records) > 0:
            seid_eventlistmap = self.__event_inst_created
            sed_idx = col_list.index(COL_NAME_EVENTS_SEID)
            cls_name_idx = col_list.index(COL_NAME_EVENTS_VIEW_CLASSNAME)
            begin_idx = col_list.index(COL_NAME_EVENTS_VIEW_BEGINABSTS)
            start_idx = col_list.index(COL_NAME_EVENTS_VIEW_START_IDX)
            end_idx = col_list.index(COL_NAME_EVENTS_VIEW_ENDABSTS)
            stop_idx = col_list.index(COL_NAME_EVENTS_VIEW_STOP_IDX)
            measid_idx = col_list.index(COL_NAME_EVENTS_MEASID)

            if inc_asmt:
                usr_idx = col_list.index(COL_NAME_ASS_USER_ID)
                wf_idx = col_list.index("WF" + COL_NAME_WORKFLOW_NAME)
                asmtst_idx = col_list.index("ST" + COL_NAME_ASSESSMENT_STATE_NAME)
                comm_idx = col_list.index(COL_NAME_ASS_COMMENT)
                asmt_date_idx = col_list.index(COL_NAME_ASS_DATE)
                issue_idx = col_list.index(COL_NAME_ASS_TRACKING_ID)
                resassid_idx = col_list.index(COL_NAME_EVENTS_RESASSID)
            if inc_attrib:
                unitid_idx = col_list.index(COL_NAME_EVENT_ATTR_TYPES_UNITID)
                arribid_idx = col_list.index(COL_NAME_EVENT_ATTR_ATTRID)
                atrtypeid_idx = col_list.index(COL_NAME_EVENT_ATTR_TYPES_NAME)
                value_idx = col_list.index(COL_NAME_EVENT_ATTR_VALUE)

        for record in records:
            if str(int(record[sed_idx])) not in seid_eventlistmap:

                cls = None
                for etype in self.__event_types_list:
                    if etype['name'] == record[cls_name_idx]:
                        cls = etype['type']
                        break

                if cls is None:
                    e_type = type(record[cls_name_idx], (ValBaseEvent,), {})
                    event = e_type(start_time=record[begin_idx], start_index=record[start_idx],
                                   stop_time=record[end_idx], stop_index=record[stop_idx], seid=record[sed_idx])
                else:
                    event = object.__new__(cls)
                    event.__init__(start_time=record[begin_idx], start_index=record[start_idx],
                                   stop_time=record[end_idx], stop_index=record[stop_idx], seid=record[sed_idx])

                event.SetMeasId(record[measid_idx])

                if inc_asmt:
                    asmt = ValAssessment(user_id=record[usr_idx], wf_state=record[wf_idx],
                                         ass_state=record[asmtst_idx], ass_comment=record[comm_idx],
                                         date_time=record[asmt_date_idx], issue=record[issue_idx])
                    asmt.ass_id = record[resassid_idx]
                    event.AddAssessment(asmt)

                self.__event_list.append(event)
                seid_eventlistmap[str(int(record[sed_idx]))] = len(self.__event_list) - 1

            else:
                event = self.__event_list[seid_eventlistmap[str(int(record[sed_idx]))]]

            if inc_attrib:
                if record[unitid_idx] is not None:
                    unit = unit_map[str(record[unitid_idx])]
                else:
                    unit = str(record[unitid_idx])

                if inc_images and record[arribid_idx] in image_attribs:
                    image = dbi_val.get_event_image(record[arribid_idx])[COL_NAME_EVENT_IMG_IMAGE]
                else:
                    image = None
                event.AddAttribute(record[atrtypeid_idx], value=record[value_idx], unit=unit, image=image)

    def Save(self, dbi_val, dbi_gbl, testrun_id, coll_id, obs_name=None, parent_id=None,  # pylint: disable=C0103
             level=ValSaveLoadLevel.VAL_DB_LEVEL_BASIC, cons_key=None):
        """
        Save Events

        :param dbi_val: Validation Result Database interface
        :type dbi_val: `OracleValResDB` or `SQLite3ValResDB`
        :param dbi_gbl: Validation Global Database interface
        :type dbi_gbl: `OracleGblDB` or `SQLite3GblDB`
        :param testrun_id: Testrun Id
        :type testrun_id: Integer
        :param coll_id: Collection ID
        :type coll_id: Integer
        :param obs_name: Observer Name registered in Global Database
        :type obs_name: String
        :param parent_id: Parent Result Descriptor Id
        :type parent_id: Integer
        :param level: Save level::

                            - VAL_DB_LEVEL_STRUCT: Result Descriptor only,
                            - VAL_DB_LEVEL_BASIC: Result Descriptor and result,
                            - VAL_DB_LEVEL_INFO: Result Descriptor, Result and Assessment
                            - VAL_DB_LEVEL_ALL: Result with images and all messages

        :param cons_key: constraint key -- for future use
        :type cons_key: NoneType
        """
        res = False

        if dbi_val.get_testrun_lock(tr_id=testrun_id) == 1:
            self._log.error("No Event is saved due to locked testrun ")
            return res
        for evt in self.__event_list:
            try:
                res = evt.Save(dbi_val, dbi_gbl, testrun_id, coll_id, evt.GetMeasId(),
                               obs_name, parent_id, level, cons_key)
            except ValEventError, ex:
                self._log.warning("Events %s could not be stored. See details: %s " % (str(evt), ex))
                res = False

            if res is False:
                break

        if res is True:
            pass
            # dbi_val.commit()
            # dbi_gbl.commit()

        return res
Esempio n. 3
0
class AdditionalObjectList(object):
    """
    AdditionalObjectList maps a smaller object list to a larger one, e.g.:

    OBJ_signals: EmGenObjectList (62 elements) -> AOJ_signals: EMPublicObjData (100 elements)

    via a mapping signal AOJ_mapping: ``SIM VFB ALL.DataProcCycle.EmGenObjectList.aObject[%].General.uiID``
    ``EmGenObjectList.aObject[3].General.uiID = 17 -> EMPublicObjData.Objects[17].Private.u_RadarBasisClassInternal``

    the output port is same as for OBJList:
    ``objects = self._data_manager.GetDataPort(OBJECT_PORT_NAME, self.bus_name)``

    Config file example::

           ("OBJ_min_lifetime",        50),
           ("OBJ_number_of_objects",  62),
           ("OOI_number_of_objects",   6),
           ("AOJ_list_size", 100), <--- mandatory
           ("AOJ_mapping", "SIM VFB ALL.DataProcCycle.EmGenObjectList.aObject[%].General.uiID"), <--- mandatory
           ("OBJ_prefix",                 ""),
           ("AOJ_prefix",             ""), <--- optional (needs not be necessarily present in the config, if yes,
                                                    the prefix is added to AOJ_mapping and AOJ_signals)
           ("OBJ_signals", [
           {'SignalName':"SIM VFB ALL.DataProcCycle.EmGenObjectList.aObject[%].General.eMaintenanceState",
           'PortName':'eObjMaintenanceState'},
           {'SignalName':"SIM VFB ALL.DataProcCycle.EmGenObjectList.aObject[%].Kinematic.fDistX",
           'PortName':'DistX'}]),
           ("AOJ_signals", [
           {'SignalName':"SIM VFB ALL.DataProcCycle.EMPublicObjData.Objects[%].Private.u_RadarBasisClassInternal",
           'PortName':'Object_RadarBasisClass'}])
    """
    AOJ_INIT_ERROR = 'mandatory init parameters of AdditionalObjectList are None'

    def __init__(self, sig_read, add_obj_mapping_rule,
                 add_obj_port_and_signal_names, add_obj_list_size,
                 add_obj_prefix):
        """init

        :param sig_read: former binary signal reader, not needed anymore
        :type  sig_read: None
        :param add_obj_mapping_rule: AOJ_mapping signal name
        :type  add_obj_mapping_rule: string
        :param add_obj_port_and_signal_names: AOJ_signals
        :type  add_obj_port_and_signal_names: list of strings
        :param add_obj_list_size: AOJ_list_size
        :type  add_obj_list_size: integer
        :param add_obj_prefix: AOJ_prefix for signal and mapping name
        :type  add_obj_prefix: string
        """
        if (add_obj_mapping_rule is None
                or add_obj_port_and_signal_names is None
                or add_obj_list_size is None):
            raise ValueError(AdditionalObjectList.AOJ_INIT_ERROR)
        self.__sig_read = sig_read
        self.__add_obj_port_and_signal_names = add_obj_port_and_signal_names
        self.__add_obj_mapping_rule = add_obj_mapping_rule
        self.__add_obj_list_size = add_obj_list_size
        if add_obj_prefix is None:
            self.__add_obj_prefix = ""
        else:
            self.__add_obj_prefix = add_obj_prefix.strip()
        # my object list
        self.__my_object_index = None
        self.__mapping_signal = None
        # all obj cache
        self.__object_list = {}
        self.__log = Logger(self.__class__.__name__)

    def clear_cache(self):
        """
        call this in SignalExtractor when the file has been already processed e.g. in PostProcessData
        """
        self.__my_object_index = None
        if self.__mapping_signal is not None:
            del self.__mapping_signal
            self.__mapping_signal = None
        if self.__object_list is not None:
            del self.__object_list
            self.__object_list = {}

    def __get_other_obj(self, object_index):
        """
        Builds an on demand cache with all signals of the object with the given index. All addressed objects are kept.

        :param object_index: object index
        """
        ret_obj = self.__object_list.get(object_index)
        if ret_obj is not None:
            return ret_obj
        else:
            new_obj = {}
            for list_item in self.__add_obj_port_and_signal_names:
                signal_name = self.__add_obj_prefix + list_item[
                    SIGNAL_NAME].replace('%', str(object_index))
                port_name = list_item[PORT_NAME]
                new_obj[port_name] = self.__sig_read[signal_name]
            self.__object_list[object_index] = new_obj
            return new_obj

    def __get_mapping_signal(self, object_index):
        """
        Builds an on demand cache with the mapping signal of the object with the given index.

        New index deletes previous signal.

        :param object_index: object index
        """
        if self.__my_object_index is None:
            # mapping:
            mapping_signal_name = self.__add_obj_prefix + self.__add_obj_mapping_rule.replace(
                '%', str(object_index))
            self.__mapping_signal = self.__sig_read[mapping_signal_name]
            self.__my_object_index = object_index
        elif self.__my_object_index != object_index:
            del self.__mapping_signal[:]
            mapping_signal_name = self.__add_obj_mapping_rule.replace(
                '%', str(object_index))
            self.__mapping_signal = self.__sig_read[mapping_signal_name]
            self.__my_object_index = object_index
        else:
            # self.__my_object_index == object_index
            pass
        return self.__mapping_signal

    def add_additional_object_signals(self, new_obj, object_index,
                                      start_position, end_position, sig_read):
        """
        Builds an on demand cache with the mapping signal of the object with the given index.

        New index deletes previous signal.

        :param new_obj: object represented as a dictionary of list (see SignalExtractor). key is port of sig name,
                        **it extends the passed object new_obj with the other list's signals**
        :type  new_obj: dict
        :param object_index: current object index
        :type  object_index: integer
        :param start_position: start position in the meas file (cycle)
        :type  start_position: integer
        :param end_position: end position in the meas file (cycle)
        :type  end_position: integer
        :param sig_read: signal reader instance to extract a signal
        :type  sig_read: SignalReader
        :return: success (means: mapping object found, otherwise do not use 'new_obj' in signal extractor)
        :rtype:  boolean
        """
        if sig_read is None:
            raise ValueError(AdditionalObjectList.AOJ_INIT_ERROR)
        self.__sig_read = sig_read
        ret_success = False
        mapping_signal = self.__get_mapping_signal(object_index)
        # mapping assumes that the corresponding object id pointing to the other object list
        # is constant between start and end:
        idx_larger_list = mapping_signal[
            start_position] if mapping_signal is not None else -1
        if 0 <= idx_larger_list < self.__add_obj_list_size:
            self.__log.debug("MYOID: " + str(object_index) + " OOID: " +
                             str(idx_larger_list) + " START: " +
                             str(start_position) + " END: " +
                             str(end_position))
            other_obj = self.__get_other_obj(idx_larger_list)
            for sig_name, sig_val in other_obj.iteritems():
                new_obj[sig_name] = sig_val[start_position:end_position]
            ret_success = True
        else:
            self.__log.warning("obj idx: " + str(object_index) + " pos: " +
                               str(start_position) + '..' + str(end_position) +
                               " index: " +
                               str(mapping_signal[start_position]) +
                               " is out of range [0.." +
                               str(self.__add_obj_list_size - 1) + "]: " +
                               ", obj period will be omitted")
        return ret_success
Esempio n. 4
0
class Valf(object):
    """
    class defining methods to easily start validation suites
    by calling a python script without additional option settings (double click in win)

    mandatory settings:

    - outputpath (as instantiation parameter)
    - config file with `LoadConfig`
    - sw version of sw under test with `SetSwVersion`

    see `__init__` for additional options

    returns error level::

      RET_VAL_OK = 0       suite returned without error
      RET_GEN_ERROR = -1   general error
      RET_SYS_EXIT = -2    sys.exit called
      RET_CFG_ERROR = -3   error in direct settings or configuration file

    **Example:**

    .. python::

        # Import valf module
        from stk.valf import valf

        # set output path for logging ect., logging level and directory of plugins (if not subdir of current HEADDIR):
        vsuite = valf.Valf(getenv('HPCTaskDataFolder'), 10)  # logging level DEBUG, default level: INFO

        # mandatory: set config file and version of sw under test
        vsuite.LoadConfig(r'demo\\cfg\\bpl_demo.cfg')
        vsuite.SetSwVersion('AL_STK_V02.00.06')

        # additional defines not already set in config files or to be overwritten:
        vsuite.SetBplFile(r'cfg\\bpl.ini')
        vsuite.SetSimPath(r'\\\\Lifs010.cw01.contiwan.com\\data\\MFC310\\SOD_Development')

        # start validation:
        vsuite.Run()

    :author:        Joachim Hospes
    :date:          29.05.2013

    """
    def __init__(self, outpath, *args, **kwargs):
        """
        initialise all needed variables and settings

          - creates/cleans output folder
          - start process manager
          - start logging of all events, therefore the output path must be given

        :param outpath: path to output directory, can be relative to calling script
        :type outpath: str

        :param args: additional argument list which are also covered by keywords in order of occurrence

        :keyword logging_level: level of details to be displayed. default: info
                                (10=debug, 20=info, 30=warning, 40=error, 50=critical, 60=exception)
        :type logging_level: int [10|20|30|40|50]

        :keyword plugin_search_path: default: parent dir of stk folder, normally parallel to validation scripts
        :type plugin_search_path: str

        :keyword clean_folder:  default ``True``, set to ``False`` if the files in output folder should not be deleted
                                during instantiation of Valf
        :type clean_folder: bool

        :keyword logger_name:   name of logger is used for logfile name and printed in log file as base name,
                                if not set name/filename of calling function/module is used
        :type logger_name: str

        :keyword fail_on_error: Switch to control exception behaviour, if set
                                exceptions will be re-thrown rather than omitted or logged.
        :type fail_on_error: bool

        :keyword deprecations: set me to False to remove any deprecation warning outputs inside log
        :type deprecations: bool
        """
        self.__version = "$Revision: 1.6 $"
        self._uncrepl = UncRepl()

        self.__data_bus_names = [
        ]  # store all names of generated data busses like bus#0
        self.__process_mgr = None

        opts = arg_trans(
            [['logging_level', INFO], ['plugin_search_path', None],
             ['clean_folder', True], ['logger_name', None],
             ['fail_on_error', False], ['deprecations', True]], *args,
            **kwargs)

        self._fail_on_error = opts['fail_on_error']

        # prep output directory: create or clear content
        outpath = self._uncrepl(opath.abspath(outpath))
        clear_folder(outpath, opts['clean_folder'])

        logger_name = opts['logger_name']
        if logger_name is None:
            # get name of calling module
            frm = currentframe().f_back  # : disable=W0212
            if frm.f_code.co_filename:
                logger_name = opath.splitext(
                    opath.basename(frm.f_code.co_filename))[0]
            else:
                logger_name = 'Valf'
        # start logger, first with default level, idea for extension: can be changed later
        self.__logger = Logger(logger_name,
                               opts['logging_level'],
                               filename=opath.join(outpath,
                                                   logger_name + ".log"))
        self.__logger.info("Validation started at %s." %
                           strftime('%H:%M:%S', localtime(time())))
        self.__logger.info("Validation based on %s STK %s-%s of %s, CP: %s." %
                           ("original" if stk_checksum(True) else "adapted",
                            RELEASE, INTVERS, RELDATE, MKS_CP))
        self.__logger.info("Logging level is set to %s." % next(
            i
            for i, k in LEVEL_CALL_MAP.items() if k == opts['logging_level']))
        self.__logger.info("Validation arguments have been:")
        for k, v in opts.iteritems():
            self.__logger.info("    %s: %s" % (k, str(v)))

        if not opts['deprecations']:
            self.__logger.warning(
                "Deprecation warnings have been switched off!")
            DeprecationUsage().status = False

        # find all observers down current path
        plugin_search_path = opts['plugin_search_path']
        plugin_folder_list = []
        if plugin_search_path is None:
            plugin_search_path = [HEAD_DIR]
        # take care of fast connections
        plugin_search_path = [self._uncrepl(i) for i in plugin_search_path]
        for spath in plugin_search_path:
            plugin_folder_list.extend([
                dirPath for dirPath in list_folders(spath)
                if "\\stk\\" not in dirPath
            ])
            # left over from testing??? found in vers.1.14, introduced in 1.6
            # else:
            #     print folder_path

            self.__logger.info('added to plugin search path:' + spath)
        # and add all observers down calling script's path
        stk_plugins = [
            opath.join(HEAD_DIR, "stk", "valf"),
            opath.join(HEAD_DIR, "stk", "valf", "obs"),
            opath.join(HEAD_DIR, "stk", "val")
        ]

        plugin_folder_list.extend(plugin_search_path)

        for spath in stk_plugins:
            plugin_folder_list.append(spath)
            self.__logger.debug('added to plugin search path:' + spath)

        # start process manager
        try:
            self.__process_mgr = ProcessManager(plugin_folder_list,
                                                self._fail_on_error)
        except:  # pylint: disable=W0702
            self.__logger.exception(
                "Couldn't instantiate 'ProcessManager' class.")
            if self._fail_on_error:
                raise
            sys.exit(RET_GEN_ERROR)

        self.__process_mgr.set_data_port(OUTPUTDIRPATH_PORT_NAME, outpath)
        self.__logger.debug("OutputDirPath: '%s'" % outpath)

        # set still needed default settings as have been in valf.main
        self.SetMasterDbPrefix(DEFAULT_MASTER_SCHEMA_PREFIX)
        self.SetErrorTolerance(ERROR_TOLERANCE_NONE)

        # should be activated some day, for now not all validation suites can be parallelised
        # if set on default we should invent a method DeactivateHpcAutoSplit to run the remaining or old suites
        # self.SetDataPort("HpcAutoSplit", True, "Global")

    def _check_mandatory_settings(self):
        """ private method

        check if additional mandatory settings are done

        does not run complete sanity check for config, here we just check additional mandatory settings
        that do not prevent the validation to run if they are missing
        e.g. no test if db connection is defined for cat reader, if not set cat reader will stop the initialisation

        :return:   number of missing settings, 0 if settings completed
        :rtype:    integer
        """
        error_cnt = 0

        if self.GetDataPort("SWVersion", "Global") is None:
            self.__logger.error("version of test sw not defined!")
            error_cnt += 1

        if (self.GetDataPort("HpcAutoSplit", "Global") is True
                and self.GetDataPort("SimSelection", "Global") is not None):
            self.__logger.error(
                "DataPort 'SimSelection' used by HPC, not available if 'HpcAutoSplit' is active!"
            )
            self.__logger.error(
                "Set either 'HpcAutoSplit' to False or don't set 'SimSelection'!"
            )
            error_cnt += 1

        return error_cnt

    def _set_hpc_selection(self):
        """ private method

        if the start script is running as HPC task on an HPC machine then
        set SimSelection to use only the entry given by the task number.

        e.g. for HPC task003: set SimSelection to [2]
        """
        # check HPC usage
        if self.GetDataPort("HpcAutoSplit", "Global") is True:
            task_name = getenv("TaskName")
            try:
                # T0000x task ids start with 1,  bpl list index with 0
                task_id = int(match(r'T(\d+)', str(task_name)).group(1)) - 1
            except AttributeError:
                self.__logger.exception(
                    "can't set Hpc Auto Split value as HPC environment variable Task Id"
                    " is empty or not valid: %s" % task_name)
                if self._fail_on_error:
                    raise
                sys.exit(RET_CFG_ERROR)
            self.__logger.info(
                "HpcAutoSplit: using entry %d of the sim collection" % task_id)
            self.SetDataPort("SimSelection", "[%d]" % task_id, "Global")

    def LoadConfig(self, filepath):  # pylint: disable=C0103
        """
        load configuration from path/filename, path can be relative to calling script

        Valid configuration properties are:

            - version: string defining version of config file, added to dict on port "ConfigFileVersions"
            - ClassName: quoted string to determine observer class to include in run (not in section "Global")
            - PortOut: list of port values (quoted strings) which should be exported to given bus name
            - InputData: pythonic list of tuples/lists which are taken and given as input for observer to be configured
            - ConnectBus: list of bus names to connect / register observer to (first one is taken actually)
            - Active: True/False value weather observer should be enabled or not
            - include: file (quoted) to include herein, chapter should be repeated there,
              if include is used within global scope, all chapters from included file are used

        config file example::

            # valf_basic.cfg
            # config for testing Valf class, based on valf_demo settings,

            [Global]
            ; version string will be added to dict on port "ConfigFileVersions":
            version="$Revision: 1.6 $"
            ;PortOut: Informs the name of the port that are set by the component
            PortOut=["ProjectName", "SWVersion", "FunctionName", "Device_Prefix"]
            ;InputData: Declares all input parameters
            InputData=[('ProjectName', 'VALF-test'),
                       ('FunctionName', 'STK_moduletest'),
                       ('SimName', 'N/A'),
                       ('Multiprocess', True ),
                       ('ValName', 'N/A')]
            ;ConnectBus: Specifies the bus connect to the component
            ConnectBus=["Global"]

            ; db connection is needed for the catalog reader only, **deactivated** here!!
            ; connection parameters passed to validation_main.py as options because it will differ for projects
            [DBConnector]
            ClassName="DBConnector"
            InputData=[("UseAllConnections", "True")]
            PortOut=[ "DataBaseObjects"]
            ConnectBus=["DBBus#1"]
            Active=False
            ;Order: Specifies the calling order
            Order=0

            ; bpl reader can be used to read simulation results, but in future the cat_reader should be used
            ;  to test the difference switch Activate setting for BPLReader and CATReader
            [VALF_BPL_test]
            ClassName="BPLReader"
            PortOut=["CurrentMeasFile", "CurrentSimFile"]
            InputData=[("SimFileExt", "bin")]
            ConnectBus=["bus#1"]
            ; read additional config file data for this section, can overwrite complete setting before
            ; so e.g. InputData needs to list all input values,
            ; the values from include-cfg are not added but replace former set!
            Include="..\..\..\04_Test_Data\01a_Input\valf\valf_include_VALF_BPL_test.cfg"
            Active=True
            ;Order: Specifies the calling order
            Order=1

            ; cat reader needs db connector to setup connection to catalog db!
            [VALF_CAT_REF]
            ClassName="CATReader"
            PortOut=[ "CurrentMeasFile", "CurrentSimFile"]
            InputData=[("SimFileExt", "bsig"),("SimFileBaseName", "") ]
            ConnectBus=["Bus#1"]
            Active=False
            Order=1

        general used ports on bus ``Global`` (set by `ProjectManager`):

            - set "ConfigFileVersions"
                dict with file name as key and version as value for each loaded config file
            - read "FileCount"
                to show progress bar
            - read "IsFinished"
                to continue with next state when all sections of a recording are validated (set by `SignalExtractor`)

        Also setting ports as defined in ``InputData``  for the named bus.


        usage (example):

        .. python::

          from stk.valf import Valf

          vrun = stk.valf.Valf()
          vrun.load_config(r'conf/validation.cfg')

        :param filepath: path and filename of the config file to load
        :type filepath:  string
        """
        absfile = self._uncrepl(opath.abspath(filepath))
        # preset of port ConfigFileName currently not supported!!! what was it used for??
        # config_filename = self.__process_mgr.get_data_port(CFG_FILE_PORT_NAME)
        # if config_filename is None:
        #     config_filename = absfile
        # else:
        #     config_filename += ', ' + absfile
        self.__process_mgr.set_data_port(CFG_FILE_PORT_NAME, absfile)
        if self.__logger is not None:
            self.__logger.info("Using configuration file: '%s'" % absfile)
            try:
                if not self.__process_mgr.load_configuration(absfile):
                    sys.exit(RET_CFG_ERROR)
            except ValfError:
                msg = 'Validation error during configuration load'
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                self.__logger.exception(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_SYS_EXIT)
            except SystemExit:
                msg = 'system exit by one module during configuration load'
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                    self.__logger.exception(msg)
                self.__logger.error(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_SYS_EXIT)
            except:
                msg = "unexpected error (%s) during configuration load" % str(
                    sys.exc_info)
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                    self.__logger.exception(msg)
                self.__logger.exception(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_GEN_ERROR)

    def SetBplFile(self, filepath):  # pylint: disable=C0103
        """
        set data port ``BplFilePath`` to path/filename of bpl file (.ini or .bpl)
        path can be relative to starting script, checks existence of file and stops in case of errors

        :param filepath: path/filename of batch play list
        :type filepath:  string
        """
        absfilepath = self._uncrepl(opath.abspath(filepath))
        self.__logger.debug("BplFilePath: '%s'" % absfilepath)
        if filepath is not None and opath.isfile(absfilepath):
            self.__process_mgr.set_data_port(PLAY_LIST_FILE_PORT_NAME,
                                             absfilepath)
        else:
            self.__logger.error(
                "Missing mts batch play list: can not open bpl file '%s'" %
                absfilepath)
            sys.exit(RET_CFG_ERROR)

    def SetCollectionName(self, collection_name):  # pylint: disable=C0103
        """
        set data port ``RecCatCollectionName`` giving the collection name of rec files in catalog db
        used by the cat reader to select the recording list for a project

        :param collection_name: name of the collection
        :type collection_name:  string
        """
        self.__process_mgr.set_data_port(COLLECTION_NAME_PORT_NAME,
                                         collection_name)
        self.__logger.debug("Rec file cataloge collection name is: '%s'" %
                            collection_name)

    def SetDataPort(self, port_name, value, bus_name='Global'):  # pylint: disable=C0103
        """
        set named valf data port at named bus with given value,
        can be repeated for different ports and bus names

        in general these ports should be set using the config file ``InputData`` entry!

        :param port_name: valf data port name, not case sensitiv
        :type port_name:  string
        :param value:     port value, type depends on port usage
        :type value:      user defined
        :param bus_name:  valf data bus name, default: ``Global``, not case sensitiv
        :type bus_name:   string
        """
        self.__process_mgr.set_data_port(port_name, value, bus_name)
        self.__logger.debug('valf script setting port "%s" :' % port_name +
                            str(value))

    def SetDbFile(self, filepath):  # pylint: disable=C0103
        """
        set data port ``dbfile`` to define name of sqlite data base file to be used instead of oracle db
        checks existence of the file and raises an error if it's not readable

        :param filepath: path/name of the database file
        :type filepath:  string
        """
        database_filename = self._uncrepl(opath.abspath(filepath))
        if not opath.exists(database_filename):
            self.__logger.error("defined db file '%s' not found" %
                                database_filename)
            sys.exit(RET_CFG_ERROR)
        self.__process_mgr.set_data_port(DB_FILE_PORT_NAME, database_filename,
                                         'DBBus#1')

    def SetErrorTolerance(self, tolerance):  # pylint: disable=C0103
        """
        set data port ``ErrorTolerance`` to a value as defined in `db_commmon`

        :param tolerance: error tolerance value
        :type tolerance:  integer
        """
        self.__process_mgr.set_data_port(ERROR_TOLERANCE_PORT_NAME, tolerance,
                                         "Bus#1")

    @deprecated()
    def SetMasterDbDbq(self, dbq):  # pylint: disable=C0103
        """
        set data port "masterdbdbq" (name defined in `valf.db_connector`) to given name
        default value defined in db.db_common by DEFAULT_MASTER_DBQ

        :param dbq: data base qualifier for oracle data bases
        :type dbq:  string
        :note:      don't use together with DSN setting
        """
        self.__process_mgr.set_data_port(MASTER_DB_DBQ_PORT_NAME, dbq,
                                         "DBBus#1")

    @deprecated()
    def SetMasterDbDsn(self, dsn):  # pylint: disable=C0103
        """
        set data port ``masterdbdsn`` (name defined in `valf.db_connector`) to given name
        default value defined in db.db_common by DEFAULT_MASTER_DSN

        :param dsn: data source name for odbc interface connections
        :type dsn:  string
        :note:      don't use together with DBQ setting
        """
        self.__process_mgr.set_data_port(MASTER_DB_DSN_PORT_NAME, dsn,
                                         "DBBus#1")

    def SetMasterDbUser(self, user):  # pylint: disable=C0103
        """
        set data port ``masterdbuser`` (name defined in `valf.db_connector`) to given name

        :param user: name of data base user
        :type user:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_USR_PORT_NAME, user,
                                         "DBBus#1")

    def SetMasterDbPwd(self, passwd):  # pylint: disable=C0103
        """
        set data port ``masterdbpassword`` (name defined in `valf.db_connector`) to given name

        :param passwd: password for data base user
        :type passwd:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_PW_PORT_NAME, passwd,
                                         "DBBus#1")

    def SetMasterDbPrefix(self, prefix):  # pylint: disable=C0103
        """
        set data port ``masterdbschemaprefix`` (name defined in `valf.db_connector`) to given name

        :param prefix: schema prefix for data base table
        :type prefix:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_SPX_PORT_NAME, prefix,
                                         "DBBus#1")

    def SetSimPath(self, pathname, bus_name="Bus#1"):  # pylint: disable=C0103
        """
        set data port ``SimOutputPath`` at named bus (default:``Bus#0``) to given path
        where measurement files are stored

        checks if path exists and raises an `ValfError` if not

        for historical reasons the bus_name is set as default to ``bus#0``
        make sure your config sets the similar busses for bpl/cat reader(s)!

        :param pathname: absolute path where simulation result files are stored
        :type pathname:  string
        :param bus_name: data bus name of the bpl/cat reader, default ``bus#0``, not case sensitiv
        :type bus_name:  string
        """
        pathname = self._uncrepl(pathname)
        if opath.exists(pathname):
            self.__process_mgr.set_data_port(SIM_PATH_PORT_NAME, pathname,
                                             bus_name)
            self.__logger.debug(
                "Setting input data. [ Bus='{0}', "
                "PortName='SimOutputPath', PortValue={1}]".format(
                    bus_name, pathname))
            if bus_name not in self.__data_bus_names:
                self.__data_bus_names.append(bus_name)
                self.__process_mgr.set_data_port(DATA_BUS_NAMES,
                                                 self.__data_bus_names)
        else:
            exception_msg = "Sim Output folder providing bsig/csv files does not exist:\n" +\
                            "{}\nPlease check your setup".format(pathname)
            self.__logger.exception(exception_msg)
            raise ValfError(exception_msg)

    def SetSwVersion(self, version):  # pylint: disable=C0103
        """
        set data port ``SWVersion`` to given value
        currently mandatory setting!!

        :param version: sw version of sw under test
        :type version:  string
        """
        self.__process_mgr.set_data_port(SWVERSION_PORT_NAME, version)

    def SetRefSwVersion(self, version):  # pylint: disable=C0103
        """
        set data port ``SWVersion_REG`` to given value (optional)

        :param version: sw version of regression sw under test
        :type version:  string
        """
        self.__process_mgr.set_data_port(SWVERSION_REG_PORT_NAME, version)

    def SetSaveResults(self, saveit=True):  # pylint: disable=C0103
        """
        set data port ``SaveResultInDB`` to given value (optional)

        :param saveit: Save the results into the database, default = True
        :type saveit:  boolean
        """
        self.__process_mgr.set_data_port(SAVE_RESULT_IN_DB, saveit)

    def GetDataPort(self, port_name, bus_name='Global'):  # pylint: disable=C0103
        """
        get named valf data port at named bus,
        can be repeated for different ports and bus names

        :param port_name: valf data port name, not case sensitiv
        :type port_name:  string

        :param bus_name: valf data bus name, default: ``Global``, not case sensitiv
        :type bus_name:  string

        :return: port data
        :rtype:  undefined
        """
        return self.__process_mgr.get_data_port(port_name, bus_name)

    def ActivateHpcAutoSplit(self):  # pylint: disable=C0103
        r"""
        activate auto splitting of bpl/cat list on HPC

        Running on HPC a validation can run in parallel on several tasks. This method sets data port ``HpcAutoSplit``
        to ``True`` so each validation suite running on one task/machine only reads the sim results of one recording::

              bpl / cat list       HPC TaskID
            ---------------------- ----------
            recording_entry_0.rec    T00001
            recording_entry_1.rec    T00002
            recording_entry_2.rec    T00003
            ...                      ...

        **The tasks must be created during job submit,** this is not done by Valf!!

        Example to create an own task for each bpl entry:

        .. python::

            # Create the Validation Tasks
            reclist = bpl.Bpl(BPL_FILE).read()
            task = hpc.TaskFactory(job)
            for rec in reclist:
                task.create_task(r"D:\data\%JobName%\1_Input\valf_tests\custom\demo\run_valf_demo_bpl.py")

        """
        self.SetDataPort(HPC_AUTO_SPLIT_PORT_NAME, True, 'global')

    def Run(self):
        """ start the validation after all needed preparations

        :return:  success or error value during validation run
        :rtype:   error codes:
          RET_VAL_OK = 0
          RET_GEN_ERROR = -1
          RET_SYS_EXIT = -2
          RET_CFG_ERROR = -3

        """
        if LooseVersion(sqlite_version) <= LooseVersion(MIN_SQLITE_VERSION):
            self.__logger.error(
                "error in setup: please update your sqlite3.dll!\n"
                "Just call batch script listed on Validation wiki -> needed tools."
            )
            sys.exit(RET_CFG_ERROR)

        if self._check_mandatory_settings() is not 0:
            self.__logger.error("error in setup: mandatory settings missing")
            sys.exit(RET_CFG_ERROR)
        tstart = time()
        self._set_hpc_selection()
        try:
            ret_val = self.__process_mgr.run()
        except Exception:
            self.__logger.exception("unexpected runtime error")
            if self._fail_on_error:
                raise
            sys.exit(RET_GEN_ERROR)

        if ret_val is not RET_VAL_OK:
            self.__logger.error(
                "runtime error in validation suite, error level %d" % ret_val)

        self.__logger.info("Test duration(hh:mm:ss): " +
                           strftime('%H:%M:%S', gmtime(time() - tstart)))

        self.__logger.info("Logging statistics: " + ", ".join([
            "%s: %d" % (k, v)
            for k, v in self.__logger.get_statistics().items() if v > 0
        ]))

        print('val run ended with result', ret_val)
        return ret_val
Esempio n. 5
0
class BplUpdate(object):
    r"""
    **Update existing bpl files with changes in catalog db collections**

    Class provides methods to
      - read a config,
      - find all bpl files in the subfolders
      - compare the bpl files with collections
      - create a new bpl file if needed
      - check in the changed files
      - update member revisions for changed files

    It returns an error code to be executed as scheduled task, error code '0' shows execution without problems.
    Additionally the status is logged to the file ``bpl_update.log`` in same path as the config file.

    see more details in module description `bpl_update.py`

    **usage example** (see also function `main`):

    .. python::

        bpl_upd = BplUpdate(config_file)
        result = bpl_upd.update_directories()

    """
    def __init__(self, config_file):
        """
        read config and prepare update

        :param config_file: path/file name of config file
        :type  config_file: string
        """
        self.error_status = ERR_OK
        self.bpl_top_dir = dirname(config_file)
        self._logger = Logger('BplUpdate', INFO,
                              join(self.bpl_top_dir, 'bpl_update.log'))
        self._config = self._read_config(config_file)
        self.db_conn = None
        self.cat_db = None

        # setup db connection,
        # explicitly set default values for parameters that don't set None as default in DBconnect
        # unused for now: error_tolerance=ERROR_TOLERANCE_NONE, use_cx_oracle=False
        if self._config.get('connection') is None:
            self._logger.error(
                'No parameter "connection" in section "[db_connection]" of %s'
                % config_file)
            self.error_status = ERR_DB_CONNECTION_CONFIG
        else:
            try:
                connection = str(self._config.get('connection'))
                if connection.endswith('.sqlite'):
                    connection = join(self.bpl_top_dir, connection)
                self.cat_db = BaseRecCatalogDB(
                    connection)  # self.db_conn.Connect(cat)
            except Exception as err:
                self.error_status = ERR_DB_CONNECTION_CONFIG
                self._logger.error(
                    'can not setup db connection with configured settings: %s\n%s'
                    % (connection, err))
        # get all bpl files in the top dir and all sub dirs
        self.bpl_dict = self.get_bpl_files()

    def _read_config(self, config_file, incl_sect=None):
        """
        private method to read config, check some requirements and return dict with config

        :param config_file: path/file name to read
        :type  config_file: string
        :param incl_sect : section name to include from other config file, for recursive calls
        :type  incl_sect : string
        """
        raw_config = RawConfigParser()
        try:
            raw_config.read(abspath(config_file))
        except ParseError as err:
            self.error_status = ERR_CONFIG_FILE_READ
            self._logger.error(err)
            return {}

        section_names_list = raw_config.sections()
        if not len(section_names_list):
            self.error_status = ERR_CONFIG_FILE_CONTENT
            self._logger.error(
                'No sections defined in config file %s - min: [db_connection] and [collections].'
                % config_file)
            return {}

        include_section = section_names_list if incl_sect is None else incl_sect

        include_config = []
        sections_list = OrderedDict()
        try:
            for section_name in section_names_list:
                # don't import if not inside specific chapter
                if section_name not in include_section:
                    continue
                # sections_list[section_name] = {}

                try:
                    include = raw_config.get(section_name,
                                             "include").strip('"\' ')
                    if len(include):
                        include_config.append([include, section_name])
                except ParseError:
                    pass

                if section_name == "db_connection":
                    sections_list["connection"] = eval(
                        raw_config.get(section_name, "connection"))
                elif section_name == 'collections':
                    sections_list["update_list"] = eval(
                        raw_config.get(section_name, 'update_list'))
                elif section_name == 'mks_settings':
                    if raw_config.has_option('mks_settings', 'task_id'):
                        sections_list['task_id'] = raw_config.get(
                            section_name, 'task_id')

            # iterate through additional configs from includes now
            for inc in include_config:
                if not isabs(inc[0]):
                    inc[0] = join(dirname(config_file), inc[0])
                incl_lst = self._read_config(inc[0], inc[1])
                for incl_sct in incl_lst:
                    if incl_sct not in sections_list:
                        sections_list[incl_sct] = incl_lst[incl_sct]
                    else:
                        sections_list[incl_sct].update(incl_lst[incl_sct])

        except ParseError as err:
            self.error_status = ERR_CONFIG_FILE_CONTENT
            self._logger.error('Parse error during config file reading:\n %s' %
                               err)

        return sections_list

    def get_bpl_files(self):
        """
        find all bpl files starting from set directory

        :return: dict { 'basename': {'path': relpath, 'status': 'old'}}
        """
        bpl_files = {}
        for root, _, files in walk(self.bpl_top_dir):
            for bpl_file in files:
                if splitext(bpl_file)[1] != '.bpl':
                    continue
                bpl_path = relpath(root, self.bpl_top_dir)
                # print r'found file %s\%s' % (bpl_path, bpl_file)
                bpl_file_name = str(splitext(bpl_file)[0]).lower()
                bpl_files[bpl_file_name] = {
                    'path': bpl_path,
                    'filename': join(root, bpl_file),
                    'status': 'old'
                }
        return bpl_files

    @staticmethod
    def compare_col_bpl(col_recs, bpl_list):
        """
        compare rec files in passed lists

        :param col_recs: all recording names of a collection
        :type  col_recs: list of names
        :param bpl_list: all rec files in batch play list
        :type  bpl_list: `BplList` - list of `BplListEntries` with 'filepath' and sectionlist
        :return: True if similar lists
        """
        # first check length
        if len(col_recs) != len(bpl_list):
            return False
        # then check if all bpl entries have matching collection entry
        bpl_rec_names = [r.filepath for r in bpl_list]
        for rec in bpl_rec_names:
            if rec not in col_recs:
                return False
        return True

    def create_fct_dir(self, col_name):
        """
        create the directory for the function named in the collection
        based on the current dir bpl_top_dir
        :param col_name: name of the collection
        :type  col_name: string
        :return: name of function
        """
        if len(col_name.split('_')) > 1:
            funct = col_name.split('_')[1]
        else:
            funct = ''
        # prep: create path if needed
        bpl_path = join(self.bpl_top_dir, funct)
        if not exists(bpl_path):
            makedirs(bpl_path)

        return funct

    def generate_bpl_file(self, col_name, rec_list):
        """
        generate a bpl file for a given collection

        uses existing connection to cat db and creates a bpl file with:
          - file name like collection name
          - in a folder named after the function coded in collection name <project>_<function>_<param>

        a missing folder is also generated starting at current bpl_top_dir

        :param col_name: name of collection listing the recordings
        :type  col_name: string
        :param rec_list: list of recordings
        :type rec_list: list
        :return: path/file name of generated file
        """
        dir_name = self.create_fct_dir(col_name)
        bpl_file_name = join(self.bpl_top_dir, dir_name, col_name + '.bpl')
        # make sure this file is not locked by mks or whatever
        if isfile(bpl_file_name):
            chmod(bpl_file_name, S_IWUSR)
        bpl_writer = Bpl(str(bpl_file_name))
        for rec in rec_list:
            bpl_writer.append(BplListEntry(rec))
        bpl_writer.write()

        return bpl_file_name

    def update_directories(self):
        """run through all subfolders and update existing bpl files
        """

        # get all collections to update
        # for each collection:
        collections = self._config.get('update_list')
        for col_name in collections:
            # print 'search for collection "%s"' % col_name
            try:
                _ = self.cat_db.get_collection_id(col_name)
            except AdasDBError as db_err:
                self._logger.warning(db_err)
                self.error_status = ERR_DB_COLL_MISSING
                continue
            # get directory for function
            fct_name = self.create_fct_dir(col_name)
            # create the new bpl file
            bpl_file_name_new = join(self.bpl_top_dir, fct_name,
                                     col_name + '_new.bpl')
            try:
                self.cat_db.export_bpl_for_collection(col_name,
                                                      bpl_file_name_new, True,
                                                      True)
            except AdasDBError as err:
                self._logger.error('problems writing bpl file %s:\n%s' %
                                   (bpl_file_name_new, err))
                self.error_status = ERR_BPL_FILE_CREATION
                continue
            # compare the new bpl file with an existing one (if there is one)
            bpl_file_name = join(self.bpl_top_dir, fct_name, col_name + '.bpl')
            if isfile(bpl_file_name):
                same = fcmp(bpl_file_name, bpl_file_name_new)
                if not same:
                    self._logger.info('update bpl file %s for collection %s' %
                                      (bpl_file_name, col_name))
                    chmod(bpl_file_name, S_IWUSR)
                    remove(bpl_file_name)
                    rename(bpl_file_name_new, bpl_file_name)
                    self.bpl_dict[col_name.lower()]['status'] = 'updated'
                else:
                    self._logger.info('bpl for collection "%s" up to date' %
                                      col_name)
                    remove(bpl_file_name_new)
                    self.bpl_dict[col_name.lower()]['status'] = 'match'
            else:
                # bpl file didn't exist before
                self.bpl_dict[col_name.lower()] = {
                    'status': 'new',
                    'filename': join(self.bpl_top_dir, col_name + '.bsig')
                }
                rename(bpl_file_name_new, bpl_file_name)
                self._logger.info(
                    'created new bpl file "%s" for collection %s' %
                    (bpl_file_name, col_name))

        # check if collections are removed but bpl files exist for that collection
        # and list bpl files that have no matching collections
        all_col_names = self.cat_db.get_all_collection_names()
        for bpl_name in [
                b.lower() for b in self.bpl_dict
                if self.bpl_dict[b]['status'] == 'old'
        ]:
            bpl_file_name = relpath(self.bpl_dict[bpl_name]['filename'],
                                    self.bpl_top_dir)
            if bpl_name in all_col_names:
                self.bpl_dict[bpl_name]['status'] = 'rem_col?'
                self._logger.warning(
                    'collection removed from config? - file %s has matching collection "%s"'
                    % (bpl_file_name, bpl_name))
            else:
                self.bpl_dict[bpl_name]['status'] = 'junk'
                self._logger.warning(
                    'found bpl file with no matching collection: %s' %
                    bpl_file_name)

        # create table with all bpl update results
        with open(join(self.bpl_top_dir, 'bpl_update_result.csv'),
                  'w') as res_file:
            res_file.write('collection; status; bpl file\n')
            for bpl_name in self.bpl_dict:
                res_file.write(bpl_name + '; ' +
                               self.bpl_dict[bpl_name]['status'] + '; ' +
                               relpath(self.bpl_dict[bpl_name]['filename'],
                                       self.bpl_top_dir) + '\n')

        return self.error_status

    def checkin_updated_files(self):
        """
        use internal bpl dict to check in all updated files

        :TODO: currently stk.mks.si does not return sufficient error messages
               checkin_updated_files() does not recognize errors during checkin/checkout
        """
        # first check if bpl top dir contains a mks project file, make sure we have a sandbox
        error = ERR_OK
        task_id = self._config.get('task_id')
        if not task_id:
            self._logger.warning(
                'no mks task configured, if the updates should be checked in define the "task_id" '
                'string in a config section "[mks_settings]"')
            return ERR_OK
        if not exists(join(self.bpl_top_dir, 'project.pj')):
            self._logger.error(
                'bpl files not in a sandbox, can not find file project.pj with mks information.'
            )
            return ERR_NO_SANDBOX
        mks = mks_si.Si()
        mks.setChangePackageId(task_id)
        for name in [
                b.lower() for b in self.bpl_dict
                if self.bpl_dict[b]['status'] == 'updated'
        ]:
            print 'checking in %s' % self.bpl_dict[name]['filename']
            try:
                if mks.co(self.bpl_dict[name]['filename']):
                    error = ERR_CO_ERROR
                    self._logger.error(
                        'can not check out %s: returned error %s' %
                        (self.bpl_dict[name]['filename'], error))
                    continue
            except mks_si.SiException as err:
                self._logger.error('can not check out %s:%s' %
                                   (self.bpl_dict[name]['filename'], err))
                error = ERR_CO_ERROR
                continue
            try:
                if mks.ci(self.bpl_dict[name]['filename'],
                          'modified by bpl_update tool'):
                    error = ERR_CO_ERROR
                    self._logger.error(
                        'check in problems with %s - returned error %s' %
                        (self.bpl_dict[name]['filename'], error))
                    continue
            except mks_si.SiException as err:
                self._logger.error('check in problems with %s:%s' %
                                   (self.bpl_dict[name]['filename'], err))
                error = ERR_CO_ERROR
                continue
            self._logger.info('update in mks for %s' %
                              self.bpl_dict[name]['filename'])

        return error
Esempio n. 6
0
def merge_recfile_sequences(lbl_db_conn,
                            recfile_name,
                            project_name=None,
                            function=None,
                            department=None):
    """
    **read and merge all label sequences from label Db for given recfile**

    filtered by project, function and department.

    During labelling process the original label sequences will be completely or partly overwritten
    or extended by additional sections::

        recording:  +------------------------------------------------------+
        1st order:      +--S1-----+       +--------S2-------+  +--S3--+
        revision1:           +-S1.1--+       +--S2.1--+     +--+S3.1
        merged   :      +------------+    +---------------------------+

    This method returns the merged list of all combined sections stored in LabelDb without overlap,
    so the number or returned sections will be smaller than returned by `GetMeasurementSequences`
    directly from LabelDb which provides the unchanged list of sections for a recfile.

    **Attention**: the time stamps in the ordered sections does not have to be a
    valid time stamp of one recording frame, it might be a time value between two adjacent frames!
    *Use the returned values as lower or upper border when filtering recording frames.*

    **Example:**

    .. python::
        recfile_name = 'Continuous_2014.04.15_at_08.24.17.rec'  # or Port "CurrentFile", or ...

        bpl_list_entry = dmt.lbl.merge_recfile_sequences(lbl_db_conn, recfile_name, "MFC300", "sr", "eva")

        # get simple list of start/end tuples:
        sect_list = [(s.start_ts, s.end_ts) for s in bpl_list_entry]

    See `merge_bpl_sections` example how to get sections if no LabelDb connection is available.

    :param lbl_db_conn:  connection to label db
    :type lbl_db_conn:   instance of `DBConnect`
    :param recfile_name: name of recording, leading path will be removed to check Label Db entry
    :type recfile_name:  string
    :param project_name: Project name to filter label sequences
    :type project_name:  string
    :param function:     Function name to filter label sequences
    :type function:      string
    :param department:   department (process) name to filter sequences
    :type department:    string, currently used ['dev'|'eva']

    :returns: bpl list entry with according section list giving start_ts and end_ts of the labelled sections,
              all sections will have absolute time stamps (BplListEntry.rel = False)
    :rtype:  `BplListEntry`

    :author: Joachim Hospes
    :date:   17.07.2014
    """
    logger = Logger(__name__)
    # get labeled sections from db and merge them
    sections, _ = lbl_db_conn.get_measurement_sequences(
        recfile_name, project_name, function, department)
    if not sections:
        logger.warning(
            'no sections stored in LabelDb for %s filtered by %s, %s and %s' %
            (recfile_name, project_name, function, department))
    sections.sort()
    logger.debug('LabelDb returned for %s: %s' % (recfile_name, sections))
    i = 0
    while i < len(sections) - 1:
        while i < len(sections) - 1 and sections[i][1] >= sections[i + 1][0]:
            sections[i] = (sections[i][0],
                           max(sections[i][1], sections[i + 1][1]))
            sections.pop(i + 1)
        i += 1

    bpl_list_entry = BplListEntry(recfile_name)
    for sect in sections:
        bpl_list_entry.append(sect[0], sect[1], False)

    return bpl_list_entry