Example #1
0
class GenericRectObject(BaseAdasObject):
    """
    Generic rectangular object from a binary file
    """
    def __init__(self,
                 global_obj_id,
                 obj_id,
                 startts,
                 stopts,
                 data_source,
                 bus,
                 signal_names=None,
                 signals=None,
                 ignore_error=False,
                 obj=None):
        """
        Constructor creating a rectangular object either from data_source
        or from signals if specified

        :param global_obj_id: global object id from bin file
        :param obj_id: object id from bin file
        :param startts: absolute start time stamp
        :type startts: long
        :param stopts: absolute stop time stamp
        :type stopts: long
        :param data_source: dictionary from the bin file containing data
        :type data_source: DataManager
        :param bus: bus pertaining to DataManager GetDataPort
        for one single obj, e.g. [{'Index': 2, 'VrelX': [-37.20 etc.
        :param signal_names: list of names of signals, default is GENERIC_OBJECT_SIGNAL_NAMES
        :param signals: if this is specified, signals are directly filled with it; data source is not used for filling
        :param obj: Raw object data as dict, as put on the bus by classic signal extractor
        """
        if signal_names is None:
            signal_names = GENERIC_OBJECT_SIGNAL_NAMES
        BaseAdasObject.__init__(self, global_obj_id, data_source, signal_names)
        self.__ignore_error = ignore_error
        self.__obj_id = obj_id
        self.__bus = bus
        self._logger = Logger(self.__class__.__name__)
        if signals is not None:
            self._signals = signals
        else:
            self.__fill_object_data(global_obj_id, startts, stopts, bus, obj)

    def get_subset(self, startts=None, stopts=None):
        """
        Makes a subset of the signals within the time interval

        :param startts: start time stamp
        :param stopts: stop time stamp
        """
        return GenericRectObject(self.get_id(), self.__obj_id, startts, stopts,
                                 self._data_source, self.__bus,
                                 self._signal_names,
                                 self._get_subset_of_signals(startts, stopts))

    def get_object_id(self):
        """
        Get Object Id
        """
        return self.__obj_id

    def __fill_object_data(self, obj_id, startts, stopts, bus, obj=None):
        """Fills in signals from bin file within the time interval
        :param obj_id: object id
        :param startts: start time slot
        :param stopts: stop time slot
        :param bus: name of the bus
        """

        self._signals = {}
        if obj:
            # Used when loaded through signal extractor gen obj loader
            myobj = obj
        else:
            objects = self._data_source.GetDataPort(OBJECT_PORT_NAME, bus)
            myobj = None
            for obj in objects:
                if obj[OBJ_GLOBAL_ID] == obj_id:
                    myobj = obj
                    break
        if myobj is None:
            raise AdasObjectLoadError("Binary file does not contain object id")
        tstamp = myobj[OBJ_TIME_STAMPS]

        for sig in self._signal_names:
            if sig in myobj:
                sig_vec = myobj[sig]
                time_stamp = tstamp
                if self.__ignore_error:
                    if len(sig_vec) != len(tstamp):
                        self._logger.error("Fixing signal: " + sig +
                                           " length of timestamp vector: " +
                                           str(len(tstamp)) +
                                           " length of signal value vector: " +
                                           str(len(sig_vec)))
                        min_length = min(len(tstamp), len(sig_vec))
                        sig_vec = sig_vec[0:min_length]
                        time_stamp = tstamp[0:min_length]
                        self._logger.error("Fixed signal: " + sig +
                                           " length of timestamp vector: " +
                                           str(len(time_stamp)) +
                                           " length of signal value vector: " +
                                           str(len(sig_vec)))

                if time_stamp is not None and sig_vec is not None and len(
                        sig_vec) == len(time_stamp):
                    complete_signal = Signal(sig, None, sig_vec, time_stamp,
                                             min(sig_vec), max(sig_vec))
                    self._signals[
                        sig] = complete_signal.GetSubsetForTimeInterval(
                            startts, stopts)
                else:
                    self._logger.error("Signal: " + sig +
                                       " length of timestamp vector: " +
                                       str(len(time_stamp)) +
                                       " length of signal value vector: " +
                                       str(len(sig_vec)))
                    raise AdasObjectLoadError(
                        "Length of signal values and time_stamp is not equal")
            else:
                raise AdasObjectLoadError(
                    "Required Signal" + sig +
                    " not found. Please check the configuration")

        return True

    @deprecated('get_subset')
    def GetSubset(self, startts=None, stopts=None):  # pylint: disable=C0103
        """
        :deprecated: use `get_subset` instead
        """
        return self.get_subset(startts, stopts)

    @deprecated('get_object_id')
    def GetObjectId(self):  # pylint: disable=C0103
        """
        :deprecated: use `get_object_id` instead
        """
        return self.get_object_id()

    @deprecated('__fill_object_data')
    def __FillObjectData(self, obj_id, startts, stopts, bus, obj=None):  # pylint: disable=C0103
        """
        :deprecated: use `__fill_object_data` instead
        """
        return self.__fill_object_data(obj_id, startts, stopts, bus, obj)
class ValEventList(object):
    """
    ValEventLoader Class - loads Event details from Database
    """
    def __init__(self, plugin_folder_list=None, ev_filter=None):
        """class for loading events form database

        :param plugin_folder_list: list of Plugin folders i.e. location where event class definition are located.
                               If folders are not provided or definition were not found by plugin manager
                               then typed class will be generated runtime inherited from `ValBaseEvent`.
                               **Pass this argument only if you have defined additional method.**
        :type plugin_folder_list: list
        :param ev_filter: Instance of Event Filter
        :type ev_filter: `ValEventFilter`
        """
        self._log = Logger(self.__class__.__name__)

        if plugin_folder_list is not None:
            self.__plugin_folders = plugin_folder_list
        else:
            self.__plugin_folders = None  # EVENT_PLUGIN_FOLDER_LIST
        self.__plugin_manager = None
        self.__event_types_list = None
        self.__event_list = []
        self.__event_inst_created = []
        self.__filter = ev_filter

    def __del__(self):
        """clean up
        """
        self.__event_list = []

    def _init_event_types(self, plugin_folders=None):
        """ Init the Plugin """
        new_plugin = False

        if plugin_folders is not None:
            new_plugin = True
            self.__plugin_folders = plugin_folders
        if self.__plugin_manager is None or new_plugin:
            if self.__plugin_folders is not None:
                self.__plugin_manager = PluginManager(self.__plugin_folders, ValBaseEvent)

        if self.__event_types_list is None and self.__plugin_folders is not None:
            self.__event_types_list = self.__plugin_manager.get_plugin_class_list(remove_duplicates=True)
        else:
            self.__event_types_list = []

    def Load(self, dbi_val, dbi_gbl, testrun_id, coll_id=None, meas_id=None,  # pylint: disable=C0103
             rd_id=None, obs_name=None, level=ValSaveLoadLevel.VAL_DB_LEVEL_BASIC,
             beginabsts=None, endabsts=None, asmt_state=None, filter_cond=None, plugin_folders=None, cons_key=None):
        """
        Load Events

        :param dbi_val: Validation Result Database interface
        :type dbi_val: `OracleValResDB` or `SQLite3ValResDB`
        :param dbi_gbl: Validation Global Database interface
        :type dbi_gbl: `OracleGblDB` or `SQLite3GblDB`
        :param testrun_id: Testrun Id as mandatory field
        :type testrun_id: Integer
        :param coll_id:  Not Used. It is useless to pass any values. This information is taken
                        from database using rd_id
        :type coll_id: Integer
        :param meas_id: Measurement Id load event only for specific recording
        :type meas_id: Integer
        :param rd_id: Result Descriptor Id as mandatory field
        :type rd_id: Integer or List
        :param obs_name: Not Used. It is useless to pass any values.
                        This information is taken from database with testrun_id
        :type obs_name: String
        :param level: Load Level to specify to which level the event data should be level
                      with following possibilities::

                        VAL_DB_LEVEL_STRUCT = Events
                        VAL_DB_LEVEL_BASIC = Events + Assessment
                        VAL_DB_LEVEL_INFO = Events + Assessment + Attribute
                        VAL_DB_LEVEL_ALL = Events + Assessment + Attribute + Image

        :type level: `ValSaveLoadLevel`
        :param beginabsts: Basic filter. Begin Absolute Time stamp i.e. Start of the events
        :type beginabsts: Integer
        :param endabsts: End Absolute Time stamp i.e. End of the events
        :type endabsts: Integer
        :param asmt_state: Assessment State
        :type asmt_state: String
        :param filter_cond: Advance filter feature which can filter events based on event attributes;
                            filter map name specified in XML config file of custom filters.
                            Please read documentation of `ValEventFilter` for more detail
        :param plugin_folders: The value passed in constructor overrules. It is useless to pass value
        :type plugin_folders: list
        :param cons_key: Constrain Key. Not used
        :type cons_key: NoneType
        """
        _ = coll_id
        _ = obs_name
        _ = asmt_state
        _ = plugin_folders
        _ = cons_key

        inc_asmt = False
        inc_attrib = False
        inc_images = False
        self.__event_list = []
        self.__event_inst_created = []
        unit_map = {}

        statement = None
        if filter_cond is not None:
            if self.__filter is not None:
                statement = self.__filter.Load(dbi_val, filtermap_name=filter_cond)
                if statement is None:
                    self._log.error("The map filter was invalid. Events will be loaded without filter")
                elif type(statement) is list:
                    self._log.debug("The map filter was found. Events will be loaded with filter")

        if rd_id is not None:
            rd_list = dbi_val.get_resuls_descriptor_child_list(rd_id)
            if len(rd_list) == 0:
                rd_list = [rd_id]
        else:
            return True

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_2:
            inc_asmt = True

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_3:
            inc_attrib = True
            unit_records = dbi_gbl.get_unit()
            for unit_entry in unit_records:
                unit_map[str(unit_entry[COL_NAME_UNIT_ID])] = unit_entry[COL_NAME_UNIT_NAME]

        if level & ValSaveLoadLevel.VAL_DB_LEVEL_4:
            inc_images = True

        records, image_attribs = dbi_val.get_event_for_testrun(testrun_id, measid=meas_id, beginabsts=beginabsts,
                                                               endabsts=endabsts, rdid=rd_list, cond=None,
                                                               filt_stat=statement,
                                                               inc_asmt=inc_asmt, inc_attrib=inc_attrib,
                                                               inc_images=inc_images)
        col_list = records[0]
        records = records[1]
        self.__event_inst_created = {}
        self._init_event_types()

        while True:
            if len(records) <= 10000:
                self._prepare_events(dbi_val, records, col_list, image_attribs, unit_map,
                                     inc_asmt=inc_asmt, inc_attrib=inc_attrib, inc_images=inc_images)
                records = []
                break
            else:
                self._prepare_events(dbi_val, records[:10000], col_list, image_attribs, unit_map,
                                     inc_asmt=inc_asmt, inc_attrib=inc_attrib, inc_images=inc_images)
                del records[:10000]

        self.__event_inst_created = {}
        return True

    def _prepare_events(self, dbi_val, records, col_list, image_attribs, unit_map,
                        inc_asmt=True, inc_attrib=True, inc_images=True):
        """
        Prepare Event Object list by taking chunks for records from database

        :param dbi_val: DB interface to Validation Database
        :type dbi_val: OracleValResDB or  SQLite3ValResDB
        :param records: List of records as list of dict
        :type records: list
        :param col_list: Column List in records
        :type col_list: list
        :param image_attribs: Event Image attribute Id
        :type image_attribs: list
        :param unit_map: Unit map of Unit Id VS Unit Name
        :type unit_map: Dict
        :param inc_asmt: Flag to include Assessment in Event. Default True
        :type inc_asmt: Bool
        :param inc_attrib: Flag to include Event Attributes. Default True
        :type inc_attrib: Bool
        :param inc_images: Flag to include Event Attribute Images. Default True
        :type inc_images: Bool
        """
        event = ValBaseEvent()  # fix pylint problem, event will be set properly later
        if len(records) > 0:
            seid_eventlistmap = self.__event_inst_created
            sed_idx = col_list.index(COL_NAME_EVENTS_SEID)
            cls_name_idx = col_list.index(COL_NAME_EVENTS_VIEW_CLASSNAME)
            begin_idx = col_list.index(COL_NAME_EVENTS_VIEW_BEGINABSTS)
            start_idx = col_list.index(COL_NAME_EVENTS_VIEW_START_IDX)
            end_idx = col_list.index(COL_NAME_EVENTS_VIEW_ENDABSTS)
            stop_idx = col_list.index(COL_NAME_EVENTS_VIEW_STOP_IDX)
            measid_idx = col_list.index(COL_NAME_EVENTS_MEASID)

            if inc_asmt:
                usr_idx = col_list.index(COL_NAME_ASS_USER_ID)
                wf_idx = col_list.index("WF" + COL_NAME_WORKFLOW_NAME)
                asmtst_idx = col_list.index("ST" + COL_NAME_ASSESSMENT_STATE_NAME)
                comm_idx = col_list.index(COL_NAME_ASS_COMMENT)
                asmt_date_idx = col_list.index(COL_NAME_ASS_DATE)
                issue_idx = col_list.index(COL_NAME_ASS_TRACKING_ID)
                resassid_idx = col_list.index(COL_NAME_EVENTS_RESASSID)
            if inc_attrib:
                unitid_idx = col_list.index(COL_NAME_EVENT_ATTR_TYPES_UNITID)
                arribid_idx = col_list.index(COL_NAME_EVENT_ATTR_ATTRID)
                atrtypeid_idx = col_list.index(COL_NAME_EVENT_ATTR_TYPES_NAME)
                value_idx = col_list.index(COL_NAME_EVENT_ATTR_VALUE)

        for record in records:
            if str(int(record[sed_idx])) not in seid_eventlistmap:

                cls = None
                for etype in self.__event_types_list:
                    if etype['name'] == record[cls_name_idx]:
                        cls = etype['type']
                        break

                if cls is None:
                    e_type = type(record[cls_name_idx], (ValBaseEvent,), {})
                    event = e_type(start_time=record[begin_idx], start_index=record[start_idx],
                                   stop_time=record[end_idx], stop_index=record[stop_idx], seid=record[sed_idx])
                else:
                    event = object.__new__(cls)
                    event.__init__(start_time=record[begin_idx], start_index=record[start_idx],
                                   stop_time=record[end_idx], stop_index=record[stop_idx], seid=record[sed_idx])

                event.SetMeasId(record[measid_idx])

                if inc_asmt:
                    asmt = ValAssessment(user_id=record[usr_idx], wf_state=record[wf_idx],
                                         ass_state=record[asmtst_idx], ass_comment=record[comm_idx],
                                         date_time=record[asmt_date_idx], issue=record[issue_idx])
                    asmt.ass_id = record[resassid_idx]
                    event.AddAssessment(asmt)

                self.__event_list.append(event)
                seid_eventlistmap[str(int(record[sed_idx]))] = len(self.__event_list) - 1

            else:
                event = self.__event_list[seid_eventlistmap[str(int(record[sed_idx]))]]

            if inc_attrib:
                if record[unitid_idx] is not None:
                    unit = unit_map[str(record[unitid_idx])]
                else:
                    unit = str(record[unitid_idx])

                if inc_images and record[arribid_idx] in image_attribs:
                    image = dbi_val.get_event_image(record[arribid_idx])[COL_NAME_EVENT_IMG_IMAGE]
                else:
                    image = None
                event.AddAttribute(record[atrtypeid_idx], value=record[value_idx], unit=unit, image=image)

    def Save(self, dbi_val, dbi_gbl, testrun_id, coll_id, obs_name=None, parent_id=None,  # pylint: disable=C0103
             level=ValSaveLoadLevel.VAL_DB_LEVEL_BASIC, cons_key=None):
        """
        Save Events

        :param dbi_val: Validation Result Database interface
        :type dbi_val: `OracleValResDB` or `SQLite3ValResDB`
        :param dbi_gbl: Validation Global Database interface
        :type dbi_gbl: `OracleGblDB` or `SQLite3GblDB`
        :param testrun_id: Testrun Id
        :type testrun_id: Integer
        :param coll_id: Collection ID
        :type coll_id: Integer
        :param obs_name: Observer Name registered in Global Database
        :type obs_name: String
        :param parent_id: Parent Result Descriptor Id
        :type parent_id: Integer
        :param level: Save level::

                            - VAL_DB_LEVEL_STRUCT: Result Descriptor only,
                            - VAL_DB_LEVEL_BASIC: Result Descriptor and result,
                            - VAL_DB_LEVEL_INFO: Result Descriptor, Result and Assessment
                            - VAL_DB_LEVEL_ALL: Result with images and all messages

        :param cons_key: constraint key -- for future use
        :type cons_key: NoneType
        """
        res = False

        if dbi_val.get_testrun_lock(tr_id=testrun_id) == 1:
            self._log.error("No Event is saved due to locked testrun ")
            return res
        for evt in self.__event_list:
            try:
                res = evt.Save(dbi_val, dbi_gbl, testrun_id, coll_id, evt.GetMeasId(),
                               obs_name, parent_id, level, cons_key)
            except ValEventError, ex:
                self._log.warning("Events %s could not be stored. See details: %s " % (str(evt), ex))
                res = False

            if res is False:
                break

        if res is True:
            pass
            # dbi_val.commit()
            # dbi_gbl.commit()

        return res
Example #3
0
class GenObjList(object):
    """
    GenObjList is lightweight version of GenericObjectList for fast object matching

    E.g.::
        generic_object_list = GenObjList(data_manager, bus_name, sig_names=MY_BIN_SIGNALS)
        best_obj = generic_object_list.get_best_tracked_object(ref_obj)
    """
    def __init__(self,
                 data_manager,
                 bus_name,
                 sig_names=None,
                 distx_sig_name=None,
                 disty_sig_name=None,
                 velx_sig_name=None):
        """
        :param data_manager: data_manager
        :param bus_name: bus_name
        :param sig_names: names of the signals to be extracted, default: [OBJ_DISTX, OBJ_DISTY, OBJ_VELX]
        :param distx_sig_name: distx_sig_name, default: OBJ_DISTX
        :param disty_sig_name: disty_sig_name, default: OBJ_DISTX
        :param velx_sig_name: velx_sig_name, default: OBJ_VELX
        """
        self.objects = []
        self.data_manager = data_manager
        self.bus_name = bus_name
        self.logger = Logger(self.__class__.__name__)

        if sig_names is None:
            self.sig_names = [OBJ_DISTX, OBJ_DISTY, OBJ_VELX]
        else:
            self.sig_names = sig_names
        if distx_sig_name is None:
            self.distx_sig_name = OBJ_DISTX
        else:
            self.distx_sig_name = distx_sig_name
        if disty_sig_name is None:
            self.disty_sig_name = OBJ_DISTY
        else:
            self.disty_sig_name = disty_sig_name
        if velx_sig_name is None:
            self.velx_sig_name = OBJ_VELX
        else:
            self.velx_sig_name = velx_sig_name

        self.disty_sig_name = disty_sig_name
        self.velx_sig_name = velx_sig_name
        self.load()

    def load(self):
        """
        loads objects from signal extractor objects port
        """
        objects = self.data_manager.GetDataPort(OBJECT_PORT_NAME,
                                                self.bus_name)
        for idx, obj_dict in enumerate(objects):
            self.objects.append(
                GenObj(obj_dict[OBJ_OBJECT_ID], obj_dict[OBJ_GLOBAL_ID], idx,
                       obj_dict[OBJ_TIME_STAMPS][0],
                       obj_dict[OBJ_TIME_STAMPS][-1]))

    @staticmethod
    def get_overlap(ref_startts, ref_stopts, my_startts, my_stopts):
        """
        Gets the overlapping time interval between reference and candidate object

        :param ref_startts: ref_startts
        :param ref_stopts: ref_stopts
        :param my_startts: my_startts
        :param my_stopts: my_stopts
        """
        if my_startts <= ref_startts:
            if my_stopts >= ref_startts:
                startts = ref_startts
                stopts = min(my_stopts, ref_stopts)
                return startts, stopts
        else:
            if my_startts <= ref_stopts:
                startts = my_startts
                stopts = min(my_stopts, ref_stopts)
                return startts, stopts
        return None, None

    def get_best_tracked_object(self,
                                ref_obj,
                                min_nr_ts=50,
                                min_nr_lifetime_full_overlap=50,
                                max_norm=1.0,
                                get_lightweight_obj=False,
                                get_all_objects=False):
        """
        gets a GenericRectObject (,GenObj) with the best track based on best norm an min number of timestamps

        :param ref_obj: ref_oid from the object DB
        :param min_nr_ts: minimum number of overlapping time slots considered for matching
        :param min_nr_lifetime_full_overlap: objects having a full overlap during their whole lifetime are selected.
                                             this parameter limit the minimum required lifetime for this kind of selection
        :param max_norm: maximum norm (root mean square deviation of distance and velocity) considered for matching
        :param get_lightweight_obj: return also lightweight GenObj
        :param get_all_objects: returns all objects which fulfill minimum criteria
        :return: best obj as GenericRectObject/None or if get_lightweight_obj: GenericRectObject, GenObj or None, None
                 if get_all_objects: [(GenericRectObject1, GenObj1), (GenericRectObject2, GenObj2)]
        """
        # The typical accuracy of the sensor may be taken from the OD requirement specification:
        # doors://rbgs854a:40000/?version=2&prodID=0&view=00000001&urn=urn:telelogic::1-503e822e5ec3651e-O-352-000221c5
        std_err_x_off = 0.15
        std_err_y_off = 0.23
        std_err_y_prop_x = 0.0044
        std_err_v_off = 0.2

        ret_objects = []
        rts = ref_obj.get_signal(OBJ_DISTX).GetTimestamps()
        rdx = ref_obj.get_signal(OBJ_DISTX).GetValue()
        rdy = ref_obj.get_signal(OBJ_DISTY).GetValue()
        rvx = ref_obj.get_signal(OBJ_VELX).GetValue()
        ref_timestamp = np.fromiter(rts, np.float)
        ref_distx = np.fromiter(rdx, np.float)
        ref_disty = np.fromiter(rdy, np.float)
        ref_velx = np.fromiter(rvx, np.float)
        ref_startts = ref_obj.get_start_time()
        ref_stopts = ref_obj.get_end_time()

        # compute cycle time from the first 2 timestamps difference
        if len(rts) > 2:
            cycle_time = rts[1] - rts[0]
        else:
            cycle_time = 60000

        min_length_ts = cycle_time * min_nr_ts

        best_obj = None
        best_norm = None
        best_ol_startts = None
        best_ol_stopts = None
        # self.logger.debug("ref oid: " + str(ref_obj.get_id()))
        sig_length_error = False
        for co in self.objects:
            ol_starts, ol_stopts = self.get_overlap(ref_startts, ref_stopts,
                                                    co.startts, co.stopts)
            # Reduce the minimum overlapping time for objects which spent their whole life in the label
            min_time_in_label = max(cycle_time * min_nr_lifetime_full_overlap,
                                    (co.stopts - co.startts) - 1)
            # For other objects a minimum overlapping time is required
            min_time_in_label = min(min_time_in_label, min_length_ts)
            if ol_starts is not None and ol_stopts is not None and (
                    ol_stopts - ol_starts) > min_time_in_label:
                # determine start and stop indexes of reference and candidate objects
                cots, codx, cody, covx = co.get_ts_distx_disty_velx(
                    self.data_manager, self.bus_name)
                obj_timestamp = np.fromiter(cots, np.float)
                r_start_idx = np.where(ref_timestamp == ol_starts)[0]
                r_stop_idx = np.where(ref_timestamp == ol_stopts)[0]
                co_start_idx = np.where(obj_timestamp == ol_starts)[0]
                co_stop_idx = np.where(obj_timestamp == ol_stopts)[0]
                # if indexes were found:
                if r_start_idx.size != 0 and r_stop_idx.size != 0 and co_start_idx.size != 0 and co_stop_idx.size != 0:
                    r_start_idx = r_start_idx[0]
                    r_stop_idx = r_stop_idx[0]
                    co_start_idx = co_start_idx[0]
                    co_stop_idx = co_stop_idx[0]
                    sig_length_ref = r_stop_idx - r_start_idx + 1
                    sig_length_co = co_stop_idx - co_start_idx + 1
                    # if index lengths are the same:
                    if sig_length_ref == sig_length_co:
                        # candidate object signals
                        obj_timestamp = obj_timestamp[
                            co_start_idx:co_stop_idx + 1]
                        co_distx = np.fromiter(
                            codx, np.float)[co_start_idx:co_stop_idx + 1]
                        co_disty = np.fromiter(
                            cody, np.float)[co_start_idx:co_stop_idx + 1]
                        co_velx = np.fromiter(
                            covx, np.float)[co_start_idx:co_stop_idx + 1]
                        # reference object signals
                        r_distx = ref_distx[r_start_idx:r_stop_idx + 1]
                        r_disty = ref_disty[r_start_idx:r_stop_idx + 1]
                        r_velx = ref_velx[r_start_idx:r_stop_idx + 1]
                        if (len(co_distx) != len(r_distx)
                                or len(co_disty) != len(r_disty)
                                or len(co_velx) != len(r_velx)):
                            self.logger.error(
                                "signal length check failed for global oid: " +
                                str(co.global_oid))
                        else:
                            # if ref_obj.get_id() == 161443:
                            #    pass
                            # see formula definition in EM/OD Design specification
                            std_err_x = np.array([std_err_x_off] *
                                                 sig_length_ref)
                            std_err_y = np.array(
                                [std_err_y_off] *
                                sig_length_ref) + std_err_y_prop_x * co_distx
                            std_err_v = np.array([std_err_v_off] *
                                                 sig_length_ref)
                            norm = np.linalg.norm([
                                (co_distx - r_distx) / std_err_x,
                                (co_disty - r_disty) / std_err_y,
                                (co_velx - r_velx) / std_err_v
                            ])
                            norm_norm = norm / np.float(sig_length_ref)
                            is_norm_ok = norm_norm < max_norm
                            if get_all_objects:
                                # self.logger.debug("OK oid: " + str(co.oid) + " goid: " + str(co.global_oid) +
                                #                  " norm: " + str(norm_norm))
                                if is_norm_ok:
                                    # print "ref oid: " + str(ref_obj.get_id())
                                    # print "OK oid: " + str(co.oid) + " goid: " + str(co.global_oid)
                                    # print "norm: " + str(norm_norm)
                                    if get_lightweight_obj:
                                        ret_objects.append(
                                            (co.get(self.sig_names, ol_starts,
                                                    ol_stopts,
                                                    self.data_manager,
                                                    self.bus_name), co))
                                    else:
                                        ret_objects.append(
                                            co.get(self.sig_names, ol_starts,
                                                   ol_stopts,
                                                   self.data_manager,
                                                   self.bus_name))
                            else:
                                if (best_norm is None or
                                        norm_norm < best_norm) and is_norm_ok:
                                    best_norm = norm_norm
                                    best_obj = co
                                    best_ol_startts = ol_starts
                                    best_ol_stopts = ol_stopts
                    else:
                        # self.logger.debug("signal lengths are not equal, reference / candidate obj: " +
                        #                  str(sig_length_co) + '/' + str(sig_length_ref))
                        # self.logger.debug("ref timestamps: " +
                        #                  str(ref_timestamp[r_start_idx:r_stop_idx + 1].tolist()))
                        # self.logger.debug("obj timestamps: " +
                        #                  str(obj_timestamp[co_start_idx:co_stop_idx + 1].tolist()))
                        sig_length_error = True
                else:
                    # self.logger.debug( "no overlap" )
                    pass
        if sig_length_error:
            self.logger.error(
                "length of reference object signals were not equal to the measurement object signals"
                + " use DbObjectList:interpolate_to_time_system() to have" +
                " the same time stamps for the reference objects that the measurement has"
            )
        # return only the best
        if not get_all_objects:
            if best_obj is None:
                if get_lightweight_obj:
                    return None, None
                else:
                    return None
            else:
                if get_lightweight_obj:
                    return best_obj.get(self.sig_names, best_ol_startts,
                                        best_ol_stopts, self.data_manager,
                                        self.bus_name), best_obj
                else:
                    return best_obj.get(self.sig_names, best_ol_startts,
                                        best_ol_stopts, self.data_manager,
                                        self.bus_name)
        # return all
        else:
            return ret_objects
Example #4
0
class GenericObjectList(BaseObjectList):
    """
    Generic object list loaded from a binary file
    """
    def __init__(self,
                 data_source,
                 sensor,
                 list_name,
                 object_filter_if,
                 bus="Bus#1",
                 signal_names=None,
                 objects=None):
        """
        :param data_source: data_manager initialized with binary data.
                            must have e.g. GetDataPort("objects" , "Bus#1")
        :param sensor: name of the sensor
        :param list_name: name of the list
        :param object_filter_if: ObjectFilterIf, e.g. ObjectByGateFilter
        :param bus: bus pertaining to DataManager GetDataPort
        :param signal_names: list of names of signals to be loaded,
                             default is GENERIC_OBJECT_SIGNAL_NAMES
        """
        if signal_names is None:
            signal_names = GENERIC_OBJECT_SIGNAL_NAMES

        BaseObjectList.__init__(self, data_source, sensor, list_name,
                                object_filter_if, signal_names)

        self._logger = Logger(self.__class__.__name__, level=INFO)

        if objects is None:
            self._objects = []
        else:
            self._objects = objects

        self.__bus = bus

    @staticmethod
    def __load_needed(ref_startts, ref_stopts, my_startts, my_stopts):
        """
        if there is an overlap between reference and candidate object time intervals

        :param ref_startts: reference startts
        :type ref_stopts: reference stopts
        :param my_startts: my startts
        :type my_stopts: my stopts
        """
        if ref_startts is None and ref_stopts is None:
            return True
        elif ref_startts is not None and ref_stopts is None:
            if my_stopts >= ref_startts:
                return True
            else:
                return False
        elif ref_startts is None and ref_stopts is not None:
            if my_startts <= ref_stopts:
                return True
            else:
                return False
        else:  # ref_startts is not None and ref_stopts is not None:
            if my_startts <= ref_stopts and my_stopts >= ref_startts:
                return True
            else:
                return False

    def load_objects(self, startts=None, stopts=None, ignore_error=False):
        """
        LoadObjects into GenericObjectList. It may raise AdasObjectLoadError

        :param startts: absolute start time stamp
        :type startts: long
        :param stopts: absolute stop time stamp
        :type stopts: long
        :param ignore_error: TODO
        :type ignore_error: TODO
        """
        # clear objects:
        del self._objects[:]
        self._objects = []

        # get objects
        objects = self._data_source.GetDataPort(OBJECT_PORT_NAME, self.__bus)

        if objects is None:
            raise AdasObjectLoadError("Binary file query returned None")

        for obj_dict in objects:
            try:
                my_startts = obj_dict[OBJ_TIME_STAMPS][0]
                my_stopts = obj_dict[OBJ_TIME_STAMPS][-1]
                if self.__load_needed(startts, stopts, my_startts, my_stopts):
                    self._objects.append(
                        GenericRectObject(obj_dict[OBJ_GLOBAL_ID],
                                          obj_dict[OBJ_OBJECT_ID], startts,
                                          stopts, self._data_source,
                                          self.__bus, self._signal_names, None,
                                          ignore_error, obj_dict))
            except AdasObjectLoadError, ex:
                msg = "Object %s could not be loaded from binary. EX:" % str(
                    obj_dict[OBJ_GLOBAL_ID])
                msg += str(ex)
                self._logger.error(msg)

        return True
Example #5
0
class ValAssessmentStates(object):
    """ Base class for assessments states
    """
    PASSED = "Passed"
    FAILED = "Failed"
    INVESTIGATE = "Investigate"
    NOT_ASSESSED = "Not Assessed"

    def __init__(self, obs_type):
        """Constructor for Assessment class

        :param obs_type: Name of the observer type
        """
        self.__states = []
        self.__type = obs_type
        self.__type_id = None
        self._logger = Logger(self.__class__.__name__)
        self.__default_stateid = None

    def load(self, dbi_gbl):
        """Load the assessment states

        :param dbi_gbl: global db interface
        :return: True on passed, False on Error
        """
        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False
        self.__type_id = dbi_gbl.get_val_observer_type_id(self.__type)
        self.__states = dbi_gbl.get_assessment_state(
            observer_type_id=self.__type_id)
        self.__default_stateid = dbi_gbl.get_assessment_state_id(
            self.NOT_ASSESSED)
        return True

    def save(self, dbi_gbl):
        """Save the assessment states

        :param dbi_gbl: GBL Database interface
        """
        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False

        self.__type_id = dbi_gbl.get_val_observer_type_id(self.__type)
        for state in self.__states:
            if db_gbl.COL_NAME_ASSESSMENT_STATE_ASSID not in state:
                state[db_gbl.
                      COL_NAME_ASSESSMENT_STATE_VALOBS_TYPEID] = self.__type_id
                dbi_gbl.add_assessment_state(state)

        return True

    def add_state(self, name, desc):
        """ Get the Result Name

        :param name: name of assessment state
        :param desc: description of assessment state
        """
        for state in self.__states:
            if name.lower() == state[db_gbl.COL_NAME_ASSESSMENT_STATE_NAME]:
                return False
        self.__states.append({
            db_gbl.COL_NAME_ASSESSMENT_STATE_NAME: name,
            db_gbl.COL_NAME_ASSESSMENT_STATE_DESCRIPTION: desc
        })
        return True

    @property
    def type(self):
        """ Get the Result Name
        """
        return self.__type

    def get_states(self, with_id=False):
        """ Return the list of assessment states or a key / value dictonary with ID and Name

        :param with_id:
        """
        if with_id is False:
            state_list = []
            for state in self.__states:
                state_list.append(state[db_gbl.COL_NAME_ASSESSMENT_STATE_NAME])

        else:
            state_list = {}
            for state in self.__states:
                state_list[state[db_gbl.COL_NAME_ASSESSMENT_STATE_ASSID]] = \
                    state[db_gbl.COL_NAME_ASSESSMENT_STATE_NAME]

        return state_list

    def get_state_id(self, state_name):
        """ Get State Identifier of the given Assessment

        :param state_name: Assessment State name
        """
        obs_typeids = [None]
        if self.__type_id is not None:
            obs_typeids.append(self.__type_id)
        for state in self.__states:
            if (state_name.lower()
                    == state[db_gbl.COL_NAME_ASSESSMENT_STATE_NAME].lower()
                    and state[db_gbl.COL_NAME_ASSESSMENT_STATE_VALOBS_TYPEID]
                    in obs_typeids):
                return state[db_gbl.COL_NAME_ASSESSMENT_STATE_ASSID]
        return self.__default_stateid

    def get_state_name(self, state_id):
        """ Get Assessment State by given Identifier

        :param state_id: Assessment State Identifier
        """
        for state in self.__states:
            if state_id == state[db_gbl.COL_NAME_ASSESSMENT_STATE_ASSID]:
                return state[db_gbl.COL_NAME_ASSESSMENT_STATE_NAME]

    @deprecated('load')
    def Load(self, dbi_gbl):  # pylint: disable=C0103
        """deprecated"""
        return self.load(dbi_gbl)

    @deprecated('save')
    def Save(self, dbi_gbl):  # pylint: disable=C0103
        """deprecated"""
        return self.save(dbi_gbl)

    @deprecated('add_state')
    def AddState(self, name, desc):  # pylint: disable=C0103
        """deprecated"""
        return self.add_state(name, desc)

    @deprecated('type (property)')
    def GetType(self):  # pylint: disable=C0103
        """deprecated"""
        return self.type

    @deprecated('get_states')
    def GetStates(self, with_id=False):  # pylint: disable=C0103
        """deprecated"""
        return self.get_states(with_id)

    @deprecated('get_state_id')
    def GetStateId(self, state_name):  # pylint: disable=C0103
        """deprecated"""
        return self.get_state_id(state_name)

    @deprecated('get_state_name')
    def GetStateName(self, state_id):  # pylint: disable=C0103
        """deprecated"""
        return self.get_state_name(state_id)
Example #6
0
class ValAssessment(object):
    """ Base class for assessments
    """
    def __init__(self, *args, **kwargs):
        """(init)

        :keyword user_id: User Id
        :keyword wf_state: Workflow State
        :keyword ass_state: Assessment State
        :keyword ass_comment: Assessment Comment
        :keyword issue: Issue name from MKS
        """
        opts = arg_trans([
            'user_id', 'wf_state', 'ass_state', 'ass_comment', 'date_time',
            'issue'
        ], *args, **kwargs)
        self.__user_id = opts[0]
        self.__wf_state = opts[1]
        self.__ass_state = opts[2]
        self.__ass_comment = opts[3]
        self.__date_time = opts[4]
        self.__issue = opts[5]
        self.__id = None
        self.__ass_states = None
        self.__ass_wf = None
        self.__user_account = None
        self._logger = Logger(self.__class__.__name__)

    def __str__(self):
        """ Return the Assessment as String
        """
        txt = "ValAssessment:\n"
        if self.__id is not None:
            txt += str(" ID: %s" % self.__id)
        else:
            txt += str(" ID: -")

        txt += str(" Status: '%s'" % self.__wf_state)
        txt += str(" Result: '%s'" % self.__ass_state)
        if self.__issue is not None:
            txt += str(" Issue: %s" % self.__issue)

        txt += str(" Date: %s" % self.__date_time)
        txt += str(" Info: '%s'" % self.__ass_comment)
        return txt

    def load(self, ass_id, dbi_val, dbi_gbl, val_obs_name):
        """ The Assessment from DB

        :param ass_id: Assessment ID
        :param dbi_val: VAL Database interface
        :param dbi_gbl:  GBL Database interface
        :param val_obs_name: name of observer
        """
        if not issubclass(dbi_val.__class__, db_val.BaseValResDB):
            self._logger.error("VAL Database interface undefined")
            return False

        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False

        self.__load_states(dbi_gbl, val_obs_name)

        entries = dbi_val.get_assessment(ass_id)
        if len(entries) == 0:
            self._logger.error("No result for Assessment ID was not found")
            return False
        elif len(entries) == 1:
            record = entries[0]
            self.__id = record[db_val.COL_NAME_ASS_ID]
            self.__user_id = record[db_val.COL_NAME_ASS_USER_ID]
            self.__ass_comment = record[db_val.COL_NAME_ASS_COMMENT]
            self.__date_time = record[db_val.COL_NAME_ASS_DATE]
            self.__issue = record[db_val.COL_NAME_ASS_TRACKING_ID]
            wf_id = record[db_val.COL_NAME_ASS_WFID]
            self.__wf_state = self.__ass_wf.get_state_name(wf_id)

            self.__user_account = dbi_gbl.get_user(
                user_id=self.__user_id)[db_gbl.COL_NAME_USER_LOGIN]

            assst_id = record[db_val.COL_NAME_ASS_ASSSTID]
            self.__ass_state = self.__ass_states.get_state_name(assst_id)
            return True

        return False

    def save(self, dbi_val, dbi_gbl, val_obs_name):
        """ Save the result

        :type dbi_val: validation DB connection
        :param dbi_gbl: global DB connection
        :param val_obs_name: name of observer
        """
        record = {}
        if not issubclass(dbi_val.__class__, db_val.BaseValResDB):
            self._logger.error("VAL Database interface undefined")
            return False

        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False

        self.__load_states(dbi_gbl, val_obs_name)
        if self.__user_id is None:
            self.__user_id = dbi_gbl.current_gbluserid

        record[db_val.COL_NAME_ASS_USER_ID] = self.__user_id
        record[db_val.COL_NAME_ASS_COMMENT] = self.__ass_comment
        record[db_val.COL_NAME_ASS_TRACKING_ID] = self.__issue
        wf_id = self.__ass_wf.get_state_id(self.__wf_state)
        record[db_val.COL_NAME_ASS_WFID] = wf_id

        record[db_val.COL_NAME_ASS_DATE] = self.__date_time
        assst_id = self.__ass_states.get_state_id(self.__ass_state)
        record[db_val.COL_NAME_ASS_ASSSTID] = assst_id

        self.__id = dbi_val.add_assessment(record)
        # by default db sets current db date to assessment date entries if nothing is passed
        # so setting it for further work with the assessment (e.g. in report) has to be done after adding
        # because the db returns date time in different format as it expects for setting
        if self.__date_time is None:
            self.__date_time = dbi_gbl.curr_date_time()

        return True

    def update(self, dbi_val, dbi_gbl, val_obs_name):
        """ Update the Assessment

        :param dbi_val: validation db connection
        :param dbi_gbl: global db connection
        :type val_obs_name: observer name
        """
        if not issubclass(dbi_val.__class__, db_val.BaseValResDB):
            self._logger.error("VAL Database interface undefined")
            return False

        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False
        if self.__id is None:
            self._logger.error("Cannot Update Unsaved/Unloaded Assessment")
            return False
        else:
            if dbi_val.is_assessment_locked(self.__id):
                self._logger.error(
                    "Cannot Update Assessment due to Locked Testrun")
                return False
            record = {}
            self.__load_states(dbi_gbl, val_obs_name)
            user = dbi_gbl.get_user(login=environ["USERNAME"])
            self.__user_id = user[db_gbl.COL_NAME_USER_ID]
            record[db_val.COL_NAME_ASS_ID] = self.__id
            record[db_val.COL_NAME_ASS_USER_ID] = self.__user_id
            record[db_val.COL_NAME_ASS_COMMENT] = self.__ass_comment
            record[db_val.COL_NAME_ASS_TRACKING_ID] = self.__issue
            assst_id = self.__ass_states.get_state_id(self.__ass_state)
            record[db_val.COL_NAME_ASS_ASSSTID] = assst_id
            self.__date_time = dbi_gbl.curr_date_time()

            record[db_val.COL_NAME_ASS_DATE] = self.__date_time
            wf_id = self.__ass_wf.get_state_id(
                ValAssessmentWorkFlows.ASS_WF_MANUAL)
            record[db_val.COL_NAME_ASS_WFID] = wf_id
            dbi_val.update_assessment(record)
            self.load(self.__id, dbi_val, dbi_gbl, val_obs_name)

    def __load_states(self, dbi_gbl, val_obs_name):
        """ Load the States """

        if self.__ass_states is None:
            self.__ass_states = ValAssessmentStates(val_obs_name)
            self.__ass_states.load(dbi_gbl)

        if self.__ass_wf is None:
            self.__ass_wf = ValAssessmentWorkFlows()
            self.__ass_wf.load(dbi_gbl)

    @property
    def user_id(self):
        """ Get the User Name
        """
        return self.__user_id

    @property
    def user_account(self):
        """ return the account name of the user
        """
        return self.__user_account

    @property
    def ass_id(self):
        """Get the Assessment Identifier
        """
        return self.__id

    @ass_id.setter
    def ass_id(self, value):
        """setter property for Assessment ID

        :param value: id of assessment
        """
        self.__id = value

    @property
    def wf_state(self):
        """Get the Assessment WorkFlow State
        """
        return self.__wf_state

    def __get_wf_state(self):
        """
        getter for workflow state
        """
        return self.__wf_state

    def __set_wf_state(self, value):
        """
        setter property for workflow state
        """
        self.__wf_state = value

    wf_state = property(__get_wf_state, __set_wf_state)

    @property
    def comment(self):
        """ getter for property `comment` """
        return self.__ass_comment

    @comment.setter
    def comment(self, value):
        """ setter for property `comment`

        :param value: comment of assessment
        """
        self.__ass_comment = value

    @property
    def ass_state(self):
        """ getter for property `comment` """
        return self.__ass_state

    @ass_state.setter
    def ass_state(self, value):
        """ setter for property `comment`

        :param value: state of assessment
        """
        self.__ass_state = value

    @property
    def issue(self):
        """ getter for property `comment` """
        return self.__issue

    @issue.setter
    def issue(self, value):
        """ setter for property `comment`

        :param value: MKS issue of assessment
        """
        self.__issue = value

    @property
    def date(self):
        """ Get Assessment Date when last time it was inserted/modified
        """
        return self.__date_time

    @deprecated('date (property)')
    def GetDate(self):  # pylint: disable=C0103
        """deprecated"""
        return self.date

    @deprecated('load')
    def Load(self, ass_id, dbi_val, dbi_gbl, val_obs_name):  # pylint: disable=C0103
        """deprecated"""
        return self.load(ass_id, dbi_val, dbi_gbl, val_obs_name)

    @deprecated('save')
    def Save(self, dbi_val, dbi_gbl, val_obs_name):  # pylint: disable=C0103
        """deprecated"""
        return self.save(dbi_val, dbi_gbl, val_obs_name)

    @deprecated('update')
    def Update(self, dbi_val, dbi_gbl, val_obs_name):  # pylint: disable=C0103
        """deprecated"""
        return self.update(dbi_val, dbi_gbl, val_obs_name)

    @deprecated('user_id (property)')
    def GetUserId(self):  # pylint: disable=C0103
        """deprecated"""
        return self.user_id

    @deprecated('user_account (property)')
    def GetUserAccount(self):  # pylint: disable=C0103
        """deprecated"""
        return self.user_account

    @deprecated('ass_id (property)')
    def GetId(self):  # pylint: disable=C0103
        """deprecated"""
        return self.ass_id

    @deprecated('comment (property)')
    def GetComment(self):  # pylint: disable=C0103
        """deprecated"""
        return self.comment

    @deprecated('comment (property)')
    def SetComment(self, comment):  # pylint: disable=C0103
        """deprecated"""
        self.comment = comment

    @deprecated('ass_state (property)')
    def GetAssesmentState(self):  # pylint: disable=C0103
        """deprecated"""
        return self.ass_state

    @deprecated('ass_state (property)')
    def SetAssesmentState(self, ass_state):  # pylint: disable=C0103
        """deprecated"""
        self.ass_state = ass_state

    @deprecated('issue (property)')
    def GetIssue(self):  # pylint: disable=C0103
        """deprecated"""
        return self.issue

    @deprecated('issue (property)')
    def SetIssue(self, issue):  # pylint: disable=C0103
        """deprecated"""
        self.issue = issue
class DbTestRunDelete(object):
    """
        Db TestRun Data Delete Utility Class
    """
    def __init__(self):

        self.__dbfile = None
        self.__masterdbdsn = None
        self.__masterdbdbq = None
        self.__masterdbuser = None
        self.__masterdbpassword = None
        self.__masterdbschemaprefix = None
        self.__db_connector = None
        self.__masterdbdrv = None
        self.__trname = None
        self.__checkpoint = None
        self.__projname = None
        self.__limit = 10
        self.__trids = []
        self.__logger = Logger(self.__class__.__name__, level=MODE)
        self._dbgbl = None
        self._dbval = None

    def __initialize(self, line=None):
        """
        Initialize Export process with Establishing connection and parsing argument
        """
        self.__parse_arguements(line)

        if self.__dbfile is None:
            self._dbval = BaseValResDB(
                "uid=%s;pwd=%s" %
                (self.__masterdbuser, self.__masterdbpassword),
                table_prefix="%s." % (self.__masterdbuser),
                error_tolerance=ERROR_TOLERANCE_NONE)
            self._dbgbl = BaseGblDB(
                "uid=%s;pwd=%s" %
                (self.__masterdbuser, self.__masterdbpassword),
                table_prefix="%s." % (self.__masterdbuser),
                error_tolerance=ERROR_TOLERANCE_NONE)
        else:
            self._dbval = BaseValResDB(self.__dbfile,
                                       error_tolerance=ERROR_TOLERANCE_NONE)
            self._dbgbl = BaseGblDB(self.__dbfile,
                                    table_prefix="%s." % (self.__masterdbuser),
                                    error_tolerance=ERROR_TOLERANCE_NONE)

    def __terminate(self):
        """
        Terminating method with closing database connections
        """
        self._dbval.close()
        self._dbgbl.close()

    def __parse_arguements(self, line=None):
        """
        Parsing commandline arguements
        """
        optparser = OptionParser(usage="usage: %prog [options] command")
        optparser.add_option("-f",
                             "--dbfile",
                             dest="dbfile",
                             help="The name of the Sqlite database file. ")
        optparser.add_option("-b",
                             "--master-db-dsn",
                             dest="masterdbdsn",
                             help="The name of the DSN.")
        optparser.add_option("-q",
                             "--master-db-dbq",
                             dest="masterdbdbq",
                             help="The name of the DBQ.")
        optparser.add_option("-u",
                             "--master-db-user",
                             dest="masterdbuser",
                             help="The name of the oracle database user.")
        optparser.add_option("-p",
                             "--master-db-password",
                             dest="masterdbpassword",
                             help="The name of the oracle database password.")
        optparser.add_option(
            "-c",
            "--master-db-schema-prefix",
            dest="masterdbschemaprefix",
            help="The name of the oracle database schema prefix.")
        optparser.add_option(
            "-l",
            "--limit",
            dest="limit",
            help=
            "MAX no. of parent testrun deleted e.g. default:10, -1 all deleted testrun"
        )
        optparser.add_option("-t",
                             "--trname",
                             dest="trname",
                             help="Testrun to import export")
        optparser.add_option("-v",
                             "--checkpoint",
                             dest="checkpoint",
                             help="Checkpoint")
        optparser.add_option("-n",
                             "--prname",
                             dest="prname",
                             help="Project Name e.g. ARS400_PR")

        if line is not None:
            cmd_options = optparser.parse_args(line.split())
        else:
            cmd_options = optparser.parse_args()

        self.__dbfile = cmd_options[0].dbfile
        self.__masterdbdsn = cmd_options[0].masterdbdsn
        self.__masterdbdbq = cmd_options[0].masterdbdbq
        self.__masterdbuser = cmd_options[0].masterdbuser
        self.__masterdbpassword = cmd_options[0].masterdbpassword
        self.__masterdbschemaprefix = cmd_options[0].masterdbschemaprefix
        if cmd_options[0].limit is not None:
            self.__limit = int(cmd_options[0].limit)
        self.__trname = cmd_options[0].trname
        self.__checkpoint = cmd_options[0].checkpoint
        self.__projname = cmd_options[0].prname

    def delete_test_run_data(self, line=None):
        """
        Main function of DB Delete Testrun
        """
        start_date = datetime.now()
        self.__logger.info("Starting TestRun Delete at %s" %
                           start_date.strftime("%d-%m-%Y %H:%M:%S"))
        self.__initialize(line)
        if self.__projname is not None:
            pid = self._dbgbl.GetProjectId(self.__projname.upper())
        else:
            pid = None
        self.__trids = self._dbval.get_deleted_testrun_ids(
            name=self.__trname,
            checkpoint=self.__checkpoint,
            pid=pid,
            limit=self.__limit,
            distinct=False)
        for trid in self.__trids:
            self._dbval.delete_testrun(tr_id=trid)

        for trid in reversed(self.__trids):
            tr_rec = self._dbval.get_testrun(tr_id=trid)
            if len(tr_rec) > 0:
                self.__logger.error(
                    "Testrun with Id = %d delete attempt failed" % trid)
                self.__logger.error(
                    "Delete operation Aborted with no Commit Changes in Database"
                )
                raise StandardError("Operation Aborted")

        end_date = datetime.now()
        duration = end_date - start_date
        self._dbval.commit()
        print str(tuple(self.__trids))
        self.__logger.info("Delete Finshed with Total Duration = %s " %
                           str(duration))
        self.__logger.info("Total Testrun deleted = %s " %
                           str(len(self.__trids)))
        print "exit"
Example #8
0
class BplUpdate(object):
    r"""
    **Update existing bpl files with changes in catalog db collections**

    Class provides methods to
      - read a config,
      - find all bpl files in the subfolders
      - compare the bpl files with collections
      - create a new bpl file if needed
      - check in the changed files
      - update member revisions for changed files

    It returns an error code to be executed as scheduled task, error code '0' shows execution without problems.
    Additionally the status is logged to the file ``bpl_update.log`` in same path as the config file.

    see more details in module description `bpl_update.py`

    **usage example** (see also function `main`):

    .. python::

        bpl_upd = BplUpdate(config_file)
        result = bpl_upd.update_directories()

    """
    def __init__(self, config_file):
        """
        read config and prepare update

        :param config_file: path/file name of config file
        :type  config_file: string
        """
        self.error_status = ERR_OK
        self.bpl_top_dir = dirname(config_file)
        self._logger = Logger('BplUpdate', INFO,
                              join(self.bpl_top_dir, 'bpl_update.log'))
        self._config = self._read_config(config_file)
        self.db_conn = None
        self.cat_db = None

        # setup db connection,
        # explicitly set default values for parameters that don't set None as default in DBconnect
        # unused for now: error_tolerance=ERROR_TOLERANCE_NONE, use_cx_oracle=False
        if self._config.get('connection') is None:
            self._logger.error(
                'No parameter "connection" in section "[db_connection]" of %s'
                % config_file)
            self.error_status = ERR_DB_CONNECTION_CONFIG
        else:
            try:
                connection = str(self._config.get('connection'))
                if connection.endswith('.sqlite'):
                    connection = join(self.bpl_top_dir, connection)
                self.cat_db = BaseRecCatalogDB(
                    connection)  # self.db_conn.Connect(cat)
            except Exception as err:
                self.error_status = ERR_DB_CONNECTION_CONFIG
                self._logger.error(
                    'can not setup db connection with configured settings: %s\n%s'
                    % (connection, err))
        # get all bpl files in the top dir and all sub dirs
        self.bpl_dict = self.get_bpl_files()

    def _read_config(self, config_file, incl_sect=None):
        """
        private method to read config, check some requirements and return dict with config

        :param config_file: path/file name to read
        :type  config_file: string
        :param incl_sect : section name to include from other config file, for recursive calls
        :type  incl_sect : string
        """
        raw_config = RawConfigParser()
        try:
            raw_config.read(abspath(config_file))
        except ParseError as err:
            self.error_status = ERR_CONFIG_FILE_READ
            self._logger.error(err)
            return {}

        section_names_list = raw_config.sections()
        if not len(section_names_list):
            self.error_status = ERR_CONFIG_FILE_CONTENT
            self._logger.error(
                'No sections defined in config file %s - min: [db_connection] and [collections].'
                % config_file)
            return {}

        include_section = section_names_list if incl_sect is None else incl_sect

        include_config = []
        sections_list = OrderedDict()
        try:
            for section_name in section_names_list:
                # don't import if not inside specific chapter
                if section_name not in include_section:
                    continue
                # sections_list[section_name] = {}

                try:
                    include = raw_config.get(section_name,
                                             "include").strip('"\' ')
                    if len(include):
                        include_config.append([include, section_name])
                except ParseError:
                    pass

                if section_name == "db_connection":
                    sections_list["connection"] = eval(
                        raw_config.get(section_name, "connection"))
                elif section_name == 'collections':
                    sections_list["update_list"] = eval(
                        raw_config.get(section_name, 'update_list'))
                elif section_name == 'mks_settings':
                    if raw_config.has_option('mks_settings', 'task_id'):
                        sections_list['task_id'] = raw_config.get(
                            section_name, 'task_id')

            # iterate through additional configs from includes now
            for inc in include_config:
                if not isabs(inc[0]):
                    inc[0] = join(dirname(config_file), inc[0])
                incl_lst = self._read_config(inc[0], inc[1])
                for incl_sct in incl_lst:
                    if incl_sct not in sections_list:
                        sections_list[incl_sct] = incl_lst[incl_sct]
                    else:
                        sections_list[incl_sct].update(incl_lst[incl_sct])

        except ParseError as err:
            self.error_status = ERR_CONFIG_FILE_CONTENT
            self._logger.error('Parse error during config file reading:\n %s' %
                               err)

        return sections_list

    def get_bpl_files(self):
        """
        find all bpl files starting from set directory

        :return: dict { 'basename': {'path': relpath, 'status': 'old'}}
        """
        bpl_files = {}
        for root, _, files in walk(self.bpl_top_dir):
            for bpl_file in files:
                if splitext(bpl_file)[1] != '.bpl':
                    continue
                bpl_path = relpath(root, self.bpl_top_dir)
                # print r'found file %s\%s' % (bpl_path, bpl_file)
                bpl_file_name = str(splitext(bpl_file)[0]).lower()
                bpl_files[bpl_file_name] = {
                    'path': bpl_path,
                    'filename': join(root, bpl_file),
                    'status': 'old'
                }
        return bpl_files

    @staticmethod
    def compare_col_bpl(col_recs, bpl_list):
        """
        compare rec files in passed lists

        :param col_recs: all recording names of a collection
        :type  col_recs: list of names
        :param bpl_list: all rec files in batch play list
        :type  bpl_list: `BplList` - list of `BplListEntries` with 'filepath' and sectionlist
        :return: True if similar lists
        """
        # first check length
        if len(col_recs) != len(bpl_list):
            return False
        # then check if all bpl entries have matching collection entry
        bpl_rec_names = [r.filepath for r in bpl_list]
        for rec in bpl_rec_names:
            if rec not in col_recs:
                return False
        return True

    def create_fct_dir(self, col_name):
        """
        create the directory for the function named in the collection
        based on the current dir bpl_top_dir
        :param col_name: name of the collection
        :type  col_name: string
        :return: name of function
        """
        if len(col_name.split('_')) > 1:
            funct = col_name.split('_')[1]
        else:
            funct = ''
        # prep: create path if needed
        bpl_path = join(self.bpl_top_dir, funct)
        if not exists(bpl_path):
            makedirs(bpl_path)

        return funct

    def generate_bpl_file(self, col_name, rec_list):
        """
        generate a bpl file for a given collection

        uses existing connection to cat db and creates a bpl file with:
          - file name like collection name
          - in a folder named after the function coded in collection name <project>_<function>_<param>

        a missing folder is also generated starting at current bpl_top_dir

        :param col_name: name of collection listing the recordings
        :type  col_name: string
        :param rec_list: list of recordings
        :type rec_list: list
        :return: path/file name of generated file
        """
        dir_name = self.create_fct_dir(col_name)
        bpl_file_name = join(self.bpl_top_dir, dir_name, col_name + '.bpl')
        # make sure this file is not locked by mks or whatever
        if isfile(bpl_file_name):
            chmod(bpl_file_name, S_IWUSR)
        bpl_writer = Bpl(str(bpl_file_name))
        for rec in rec_list:
            bpl_writer.append(BplListEntry(rec))
        bpl_writer.write()

        return bpl_file_name

    def update_directories(self):
        """run through all subfolders and update existing bpl files
        """

        # get all collections to update
        # for each collection:
        collections = self._config.get('update_list')
        for col_name in collections:
            # print 'search for collection "%s"' % col_name
            try:
                _ = self.cat_db.get_collection_id(col_name)
            except AdasDBError as db_err:
                self._logger.warning(db_err)
                self.error_status = ERR_DB_COLL_MISSING
                continue
            # get directory for function
            fct_name = self.create_fct_dir(col_name)
            # create the new bpl file
            bpl_file_name_new = join(self.bpl_top_dir, fct_name,
                                     col_name + '_new.bpl')
            try:
                self.cat_db.export_bpl_for_collection(col_name,
                                                      bpl_file_name_new, True,
                                                      True)
            except AdasDBError as err:
                self._logger.error('problems writing bpl file %s:\n%s' %
                                   (bpl_file_name_new, err))
                self.error_status = ERR_BPL_FILE_CREATION
                continue
            # compare the new bpl file with an existing one (if there is one)
            bpl_file_name = join(self.bpl_top_dir, fct_name, col_name + '.bpl')
            if isfile(bpl_file_name):
                same = fcmp(bpl_file_name, bpl_file_name_new)
                if not same:
                    self._logger.info('update bpl file %s for collection %s' %
                                      (bpl_file_name, col_name))
                    chmod(bpl_file_name, S_IWUSR)
                    remove(bpl_file_name)
                    rename(bpl_file_name_new, bpl_file_name)
                    self.bpl_dict[col_name.lower()]['status'] = 'updated'
                else:
                    self._logger.info('bpl for collection "%s" up to date' %
                                      col_name)
                    remove(bpl_file_name_new)
                    self.bpl_dict[col_name.lower()]['status'] = 'match'
            else:
                # bpl file didn't exist before
                self.bpl_dict[col_name.lower()] = {
                    'status': 'new',
                    'filename': join(self.bpl_top_dir, col_name + '.bsig')
                }
                rename(bpl_file_name_new, bpl_file_name)
                self._logger.info(
                    'created new bpl file "%s" for collection %s' %
                    (bpl_file_name, col_name))

        # check if collections are removed but bpl files exist for that collection
        # and list bpl files that have no matching collections
        all_col_names = self.cat_db.get_all_collection_names()
        for bpl_name in [
                b.lower() for b in self.bpl_dict
                if self.bpl_dict[b]['status'] == 'old'
        ]:
            bpl_file_name = relpath(self.bpl_dict[bpl_name]['filename'],
                                    self.bpl_top_dir)
            if bpl_name in all_col_names:
                self.bpl_dict[bpl_name]['status'] = 'rem_col?'
                self._logger.warning(
                    'collection removed from config? - file %s has matching collection "%s"'
                    % (bpl_file_name, bpl_name))
            else:
                self.bpl_dict[bpl_name]['status'] = 'junk'
                self._logger.warning(
                    'found bpl file with no matching collection: %s' %
                    bpl_file_name)

        # create table with all bpl update results
        with open(join(self.bpl_top_dir, 'bpl_update_result.csv'),
                  'w') as res_file:
            res_file.write('collection; status; bpl file\n')
            for bpl_name in self.bpl_dict:
                res_file.write(bpl_name + '; ' +
                               self.bpl_dict[bpl_name]['status'] + '; ' +
                               relpath(self.bpl_dict[bpl_name]['filename'],
                                       self.bpl_top_dir) + '\n')

        return self.error_status

    def checkin_updated_files(self):
        """
        use internal bpl dict to check in all updated files

        :TODO: currently stk.mks.si does not return sufficient error messages
               checkin_updated_files() does not recognize errors during checkin/checkout
        """
        # first check if bpl top dir contains a mks project file, make sure we have a sandbox
        error = ERR_OK
        task_id = self._config.get('task_id')
        if not task_id:
            self._logger.warning(
                'no mks task configured, if the updates should be checked in define the "task_id" '
                'string in a config section "[mks_settings]"')
            return ERR_OK
        if not exists(join(self.bpl_top_dir, 'project.pj')):
            self._logger.error(
                'bpl files not in a sandbox, can not find file project.pj with mks information.'
            )
            return ERR_NO_SANDBOX
        mks = mks_si.Si()
        mks.setChangePackageId(task_id)
        for name in [
                b.lower() for b in self.bpl_dict
                if self.bpl_dict[b]['status'] == 'updated'
        ]:
            print 'checking in %s' % self.bpl_dict[name]['filename']
            try:
                if mks.co(self.bpl_dict[name]['filename']):
                    error = ERR_CO_ERROR
                    self._logger.error(
                        'can not check out %s: returned error %s' %
                        (self.bpl_dict[name]['filename'], error))
                    continue
            except mks_si.SiException as err:
                self._logger.error('can not check out %s:%s' %
                                   (self.bpl_dict[name]['filename'], err))
                error = ERR_CO_ERROR
                continue
            try:
                if mks.ci(self.bpl_dict[name]['filename'],
                          'modified by bpl_update tool'):
                    error = ERR_CO_ERROR
                    self._logger.error(
                        'check in problems with %s - returned error %s' %
                        (self.bpl_dict[name]['filename'], error))
                    continue
            except mks_si.SiException as err:
                self._logger.error('check in problems with %s:%s' %
                                   (self.bpl_dict[name]['filename'], err))
                error = ERR_CO_ERROR
                continue
            self._logger.info('update in mks for %s' %
                              self.bpl_dict[name]['filename'])

        return error
class GenReport(object):
    """
    generate pdf report and excel table
    """
    def __init__(self):
        self.__report_level = None
        self.__report_type = None
        self.__testrun_id = None
        self.__reftest_id = None
        self.__outfile = None
        self.__db_gbl = None
        self.__db_val = None
        self.__db_cat = None
        self.__dbfile = None
        self.__dbtech = None
        self.__masterdbdrv = None
        self.__masterdbdsn = None
        self.__masterdbdbq = None
        self.__masterdbuser = None
        self.__masterdbpassword = None
        self.__masterdbschemaprefix = None
        self.__db_connector = None
        self.__logger = Logger(self.__class__.__name__, level=MODE)
        self.excel_header = []

        # addon for testing this script:
        self.__val_obs_name = "UNIT_TEST_DEMO_TYPE"
        self.__coll_id = 0
        self.__coll_name = 'TestResultsAPIDemo'
        self.__meas_ids = []

    def __parse_arguments(self):
        """
        get user options

        usage: gen_report.py [-h] [-m | -d | -a] [-f DBFILE | -t SENSOR_TECH | -u MASTERDB_USER]
                             [-p MASTERDB_PASSWORD] [-c MASTERDB_SCHEMAPREFIX]
                             [-b MASTERDB_DSN | -q MASTERDB_DBQ]
                             testrun_id out_file
        """
        opts = ArgumentParser(description=__doc__,
                              formatter_class=RawDescriptionHelpFormatter)
        # mandatory settings:
        opts.add_argument('testrun_id',
                          type=str,
                          help='testrun id as stored in val-db')
        opts.add_argument(
            'out_file',
            type=str,
            help='path/name of report file to generate (*.xls or *.pdf)')
        opts.add_argument(
            '-r',
            '--reftest_id',
            dest='reftest_id',
            type=str,
            help='reference test id as in val-db for regression report')
        # optional: set report type: level of details [-m|-d|-f]
        sel_type = opts.add_mutually_exclusive_group()
        sel_type.add_argument(
            '-m',
            '--management',
            dest='rep_type',
            action='store_const',
            const=AlgoTestReport.REP_MANAGEMENT,
            help='generate management report (no details, no errorlists)')
        sel_type.add_argument(
            '-d',
            '--detailed',
            dest='rep_type',
            action='store_const',
            const=AlgoTestReport.REP_DETAILED,
            help='generate detailed report (default: details, errorlists)')
        sel_type.add_argument(
            '-a',
            '--all',
            dest='rep_type',
            action='store_const',
            const=AlgoTestReport.REP_DEVELOPER,
            help='generate all chapters for developer report')
        # database settings - [-f|[-u,-p,-c,[-b|-q]]
        db_set = opts.add_argument_group('db settings',
                                         'select either SqLite or Oracle')
        db_opts = db_set.add_mutually_exclusive_group()
        db_opts.add_argument("-f",
                             "--dbfile",
                             dest="dbfile",
                             help="The name of the SQlite database file. ")
        db_opts.add_argument(
            "-t",
            "--techname",
            dest="dbtech",
            help="Oracle sensor tech schema name like ARS4XX, MFC4XX or VGA")
        db_opts.add_argument("-u",
                             "--master-db-user",
                             dest="masterdb_user",
                             type=str,
                             help="The name of the oracle database user.")
        db_conn = opts.add_argument_group('oracle db', '')
        db_conn.add_argument("-p",
                             "--master-db-password",
                             dest="masterdb_password",
                             type=str,
                             help="The name of the oracle database password.")
        db_conn.add_argument(
            "-c",
            "--master-db-schema-prefix",
            dest="masterdb_schemaprefix",
            type=str,
            default=DB_MASTER_SCHEMA_PREFIX,
            help="The name of the oracle database schema prefix.")
        dbtype = db_conn.add_mutually_exclusive_group()
        dbtype.add_argument("-b",
                            "--master-db-dsn",
                            dest="masterdb_dsn",
                            help="The name of the DSN, opt.")
        dbtype.add_argument("-q",
                            "--master-db-dbq",
                            dest="masterdb_dbq",
                            help="The name of the DBQ, default: %s" %
                            DB_MASTER_DBQ)
        args = opts.parse_args()
        # default report type: detailed
        if args.rep_type is None:
            args.rep_type = AlgoTestReport.REP_DETAILED
        self.__report_level = args.rep_type
        self.__testrun_id = args.testrun_id
        self.__reftest_id = args.reftest_id
        self.__outfile = args.out_file
        ext = opath.splitext(args.out_file)
        if '.xlsx' == ext[1]:
            self.__report_type = EXCEL_REPORT
        elif ext[1] == '.pdf':
            self.__report_type = PDF_REPORT
        else:
            self.__logger.error(
                'wrong output file extension! Use "*.xlsx" or ".pdf" only!')
            sexit(ERROR)
        # db settings
        if not args.masterdb_dsn and not args.masterdb_dbq:
            args.masterdb_dbq = DB_MASTER_DBQ
        if args.dbfile is not None:
            self.__dbfile = args.dbfile
        elif args.dbtech is not None:
            self.__dbtech = args.dbtech
        elif args.masterdb_user is not None:
            self.__masterdbdsn = args.masterdb_dsn
            self.__masterdbdbq = args.masterdb_dbq
            self.__masterdbuser = args.masterdb_user
            self.__masterdbpassword = args.masterdb_password
            self.__masterdbschemaprefix = args.masterdb_schemaprefix
        else:
            self.__logger.error(
                'no connection to Result DB specified,'
                ' enter either sqlite file or DB connection settings (-u -p -c)!'
            )
            sexit(ERROR)

        if args.reftest_id:
            self.__logger.info(
                'generate Regression Test report with reference test id %s' %
                args.reftest_id)

        return

    def _initialize(self):
        """
        parse arguments,  establish connection
        """
        self.__parse_arguments()

        if release() == "XP":
            self.__masterdbdrv = db_common.DEFAULT_MASTER_DRV_XP
        else:
            self.__masterdbdrv = db_common.DEFAULT_MASTER_DRV

        if self.__dbfile is None and self.__dbtech is None:
            conn_str = "DBQ={};Uid={};Pwd={}".format(self.__masterdbdbq,
                                                     self.__masterdbuser,
                                                     self.__masterdbpassword)
        elif self.__dbtech is not None:
            conn_str = self.__dbtech
        else:
            conn_str = self.__dbfile

        self.__db_val = val.BaseValResDB(conn_str)
        self.__db_cat = cat.BaseRecCatalogDB(self.__db_val.db_connection)
        self.__db_gbl = gbl.BaseGblDB(self.__db_val.db_connection)

    def _terminate(self):
        """
        close database connections
        """
        self.__db_val.close()
        self.__db_cat.close()
        self.__db_gbl.close()

    def gererate_report(self):
        """
        generate the pdf and excel report, main method
        """
        self._initialize()

        if self.__report_level == AlgoTestReport.REP_MANAGEMENT:
            self.__logger.info('generate management report for TestRun %s' %
                               self.__testrun_id)
        if self.__report_level == AlgoTestReport.REP_DETAILED:
            self.__logger.info('generate detailed report for TestRun %s' %
                               self.__testrun_id)
        if self.__report_level == AlgoTestReport.REP_DEVELOPER:
            self.__logger.info(
                'generate full developer report for TestRun %s, all chapters' %
                self.__testrun_id)

        testrun = TestRun()
        testrun.Load(self.__db_val, self.__db_gbl, self.__db_cat,
                     self.__testrun_id)

        # for testing CR 220008 before saving of RuntimeJob is implemented
        # testrun.AddRuntimeJob(3988)  # 3988: 5/0/0  3445:66/66/67
        # testrun.AddRuntimeJob(3445)

        reftest = None
        if self.__reftest_id:
            reftest = TestRun()
            if reftest.Load(self.__db_val, self.__db_gbl, self.__db_cat,
                            self.__reftest_id) is False:
                self.__logger.error(
                    '!! Reference Testrun not found with id: %s !!' %
                    self.__reftest_id)
                self.__logger.error(
                    'Generating normal report instead Regression Test Report!')
                reftest = None

        if testrun is not None:
            for testcase in testrun.GetTestcases():
                testcase.Load(self.__db_val,
                              self.__db_gbl,
                              self.__db_cat,
                              testrun.GetId(),
                              level=ValSaveLoadLevel.VAL_DB_LEVEL_ALL)
            for job in testrun.runtime_details:
                job.LoadHpcIncidents()
            if reftest:
                for testcase in reftest.GetTestcases():
                    testcase.Load(self.__db_val,
                                  self.__db_gbl,
                                  self.__db_cat,
                                  reftest.GetId(),
                                  level=ValSaveLoadLevel.VAL_DB_LEVEL_ALL)

            if self.__report_type == PDF_REPORT:
                self.generate_pdf(testrun, self.__outfile, reftest)
            elif self.__report_type == EXCEL_REPORT:
                self.generate_excel(testrun, self.__outfile)
        return

    def generate_pdf(self, testrun, outfile, reftest=None):
        """
        generate pdf report as specified in call options

        :param testrun: testrun as loaded from ResultDB
        :type testrun:  `TestRun`
        """
        report = AlgoTestReport(testrun, reftest)

        report.build(outfile, level=self.__report_level)

    def generate_excel(self, testrun, outfile):
        """
        generate excel report table as specified in call options

        :param testrun: TestRun Id as in resultDb
        :type testrun:  int
        :param outfile: path and filename of the report file
        :type outfile:  str
        """
        # init the excel stuff
        try:
            unlink(outfile)
        except Exception:  # pylint: disable=W0703
            pass
        xls = Excel()
        xls.create_workbook()
        xls.create_worksheet('testruns')  # str(testrun.name))

        # insert table header
        self.excel_header = [
            '_TestID', '_TestDate', '_TesterName', '_TestResult',
            '_test_status', '_FR_Number'
        ]
        column_widths = [30, 15, 15, 30, 15, 30]
        for iheader in xrange(len(self.excel_header)):
            xls.set_cell_value(1, iheader + 1, self.excel_header[iheader])
            xls.set_cell_font_style(1, iheader + 1, bold=True)
            xls.set_column_width(1, iheader + 1, column_widths[iheader])

        row = 2
        row = self.__add_excel_testrun_rows(xls, row, testrun)
        xls.set_format(1,
                       1,
                       row_to=row,
                       col_to=len(self.excel_header),
                       wrap_text=True)

        try:
            xls.save_workbook(outfile)
            print("Test run successfully exported to '%s'" % outfile)
        except Exception:  # pylint: disable=W0703
            print(":-( couldn't save the workbook to '%s'" % outfile)
        xls.close_workbook()

    def __add_excel_testrun_rows(self, xls, row, testrun):
        """
        fill rows for a test run, can be called recursively for child testruns

        template: ['_TestID', '_Test Date', '_TesterName',
                   '_Test Result', '_test_status', '_FR_Number']

        :param xls: Excel workbook
        :type xls:  `Excel` as in stk.rep.excel
        :param row: start row for this test run
        :type row:  int
        :param testrun: (child-) test run
        :type testrun:  `TestRun`
        :returns: current (next empty) row
        :rtype:   int
        """
        # test run line:
        xls.set_data([testrun.name], row, 1)
        xls.set_cell_color(row, 1, row, len(self.excel_header), 'Light Orange')
        row += 1
        # go through test run childs
        for trun in testrun.GetChildTestRuns():
            row = self.__add_excel_testrun_rows(xls, row, trun)
        # go through test cases
        for tcase in testrun.GetTestcases(inc_child_tr=False):
            tc_result = tcase.test_result
            xls.set_data([tcase.id, '', '', '', tc_result], row, 1)
            xls.set_cell_color(row, 1, row, len(self.excel_header),
                               'Light Yellow')
            row += 1
            # go trough test steps
            for tstep in tcase.GetTestSteps():
                # todo: JHo add tstep.asmt.userid, tstep.asmt.date and
                # change last col to asmt.info
                xls.set_data([
                    str(tstep.id), tstep.date, tstep.user_account,
                    str(tstep.meas_result), tstep.test_result, tstep.issue
                ], row, 1)
                row += 1
        return row
Example #10
0
def main():
    """main function"""
    logger = Logger(str(sys._getframe().f_code.co_name), INFO)

    # Parse command line parameters
    tmp = 'usage: %prog [options] <cfg_files_in> \n   with <cfg_files_in> = '
    tmp += '"<path\\filename>, <path\\filename>, ..."'
    optparser = OptionParser(usage=tmp)
    tmp = "The output files to write. [default=<cfg_file_in>_sorted.cfg]"
    optparser.add_option("-o", "--out-file", dest="outfiles", help=tmp)
    tmp = "The sort mode to use. [0 = default = only sections, 1 = sections + properties]"
    optparser.add_option("-m", "--mode", dest="mode", default='0', help=tmp)

    cmd_options = optparser.parse_args()

    if not cmd_options[1]:
        # call help
        optparser.print_help()
    else:
        # prepare infiles
        infiles = split_strip_string(cmd_options[1][0], ',')

        if cmd_options[0].mode not in list(MODES.keys()):
            logger.error("Sort mode %s unknown, possible modes: \n %s!" %
                         (cmd_options[0].mode, MODES))
        else:

            # prepare outfiles
            if cmd_options[0].outfiles is None:
                outfiles = []
            else:
                outfiles = split_strip_string(cmd_options[0].outfiles, ',')

            # start
            for filecount in range(len(infiles)):
                logger.info("Start sorting file %d: %s\n   ..." %
                            (filecount, infiles[filecount]))
                # outfile name
                if not outfiles or (len(outfiles) < filecount + 1):
                    split_result = infiles[filecount].rsplit('.', 1)
                    outfiles.append(split_result[0] + '_sorted.' +
                                    split_result[1])

                # check outfile name
                if outfiles[filecount] in infiles:
                    # never overwrite infiles
                    logger.error(
                        'Overwrite existing infile is not allowed: %s.' %
                        infiles[filecount])
                    # exc_type, exc_value, exc_traceback = sys.exc_info()
                    logger.error('The original problem occured here: %s' %
                                 str(sys.exc_info()))
                    raise IOError(
                        'Overwrite existing infile is not allowed: %s.' %
                        infiles[filecount])

                elif isfile(outfiles[filecount]):
                    # ask to overwrite if oufile already exists
                    print('   You are going to overwrite the file %s.' %
                          outfiles[filecount])
                    print('   Do you really want to continue?')
                    go_on = str(
                        input(
                            '   press Enter to continue or any key to break\n')
                    )
                    if go_on:
                        print('stopped by user')
                        continue

                # sorting
                mts_cfg = MtsConfig(infiles[filecount], outfiles[filecount],
                                    logger)
                mts_cfg.sort(cmd_options[0].mode)

            # done
            logger.info("Done.")
class DbObjectList(BaseObjectList):
    """
    DbObjectList
    """
    def __init__(self,
                 data_source,
                 meas_id,
                 sensor,
                 list_name,
                 object_filter_if,
                 signal_names=None,
                 generic_objects=None):
        """
        :param data_source: = (stk.db.connect.DBConnect()).Connect(stk.db.obj.objdata)
        :param meas_id: measurement identifier
        :type meas_id: integer
        :param sensor: name of the sensor
        :param list_name: name of the list
        :param object_filter_if: ObjectFilterIf, e.g. ObjectByGateFilter
        :param signal_names: list of names of signals to be loaded, default is LABEL_OBJECT_SIGNAL_NAMES
        :param generic_objects:
        """
        if signal_names is None:
            signal_names = LABEL_OBJECT_SIGNAL_NAMES
        BaseObjectList.__init__(self, data_source, sensor, list_name,
                                object_filter_if, signal_names)

        self.__meas_id = meas_id
        if generic_objects is not None:
            self.__fill_objects_from_generic_object_list(
                data_source, generic_objects)
        self._log = Logger(self.__class__.__name__)

    def load_objects(self,
                     startts=None,
                     stopts=None,
                     load_manual_labels_only=False,
                     testcase_scenarios=None):
        """
        Load all tracked Objects in the given timespan for given measid

        :param startts: absolute start time stamp
        :type startts: long
        :param stopts: absolute stop time stamp
        :type stopts: long
        :param load_manual_labels_only: load only manually labeled or reviewed objects
        :param testcase_scenarios: list of test case scenarios e.g. ['following', 'oncoming']
        :return: True when the query has been successful, otherwise False
        """

        # clear objects:
        self._objects = []

        # get all manual labeled or reviewed rectangular objects for the given meas id
        if load_manual_labels_only:
            object_id_list = self._data_source.get_rect_object_ids(
                self.__meas_id, incl_deleted=False, cls_lblstateids=[2, 3])
        # get all existing rectangular objects for the given meas id
        else:
            object_id_list = self._data_source.get_rect_object_ids(
                self.__meas_id)

        # create LabelRectObject:
        for obj_id in object_id_list:
            oid = obj_id[OBJ_2_DB_NAME[OBJ_RECTOBJECT_ID]]
            if testcase_scenarios is None or not testcase_scenarios:
                self._objects.append(
                    LabelRectObject(oid, startts, stopts, self._data_source,
                                    self._signal_names))
            else:
                for scenario in testcase_scenarios:
                    testcase_obj_sections = self._data_source.get_object_test_cases(
                        oid, scenario)
                    for tc_sec in testcase_obj_sections:
                        startts = tc_sec[COL_NAME_TEST_CASES_BEGINABSTS]
                        stopts = tc_sec[COL_NAME_TEST_CASES_ENDABSTS]
                        # tc_type = tc_sec[COL_NAME_TEST_CASES_TYPEID]
                        self._objects.append(
                            LabelRectObject(oid,
                                            startts,
                                            stopts,
                                            self._data_source,
                                            self._signal_names,
                                            signals=None,
                                            labeling_type=None,
                                            scenario=scenario))

    def filter_labels(self,
                      fov_bin_func=None,
                      obj_algo_gate=None,
                      ego_speed_per_ts=None):
        """
        Filters the list of label objects so that only the field of view (fov) is considered

        If there are more than one sections in the label which are valid separated by invalid gaps,
        it splits the objects to objects sections accordingly, e.g.:

        label for obj#1, * means: out of fov::

          |****------**--------|

        after filtering:

        obj#1, section#1, obj#1, section#2::

              |------| |--------|

        If you get the objects from db object list with get_objects(), it will have 2 obj#1 in the list,
        and the list can be iterated trough as before, and both sections can be used e.g. for object matching.

        Usage example:

        .. python::

            ADMA_LATENCY = 120#ms
            ego_speed_per_ts = OrderedDict()
            timestamps = self._data_manager.GetDataPort(val_gd.TIMESTAMP_PORT_NAME, self._bus_name)
            vdy_data = self._data_manager.GetDataPort(val_gd.VDYDATA_PORT_NAME, self._bus_name)
            ego_speed = vdy_data[sd.PORT_VDY_VEHICLE_SPEED]
            for idx, ts in enumerate(timestamps):
                ego_speed_per_ts[ts] = ego_speed[idx]
            fov = FieldOfView(FieldOfView.PREMIUM_SENSOR)
            db_object_list.interpolate_to_time_system(timestamps, ADMA_LATENCY)
            db_obj_list.filter_labels(fov.is_point_in_fov, fov.is_object_detecteble, ego_speed_per_ts)

            or

            def is_obj_in(x, y):
                FOV_ANGLE = 60.0
                fova = math.radians(FOV_ANGLE)
                if x <= 0 or x > distance:
                    return False
                elif math.fabs(float(y) / float(x)) < math.tan(fova):
                    return True
                else:
                    return False
            db_obj_list.filter_labels(is_obj_in)

        :param fov_bin_func: binary function with distx, disty and a boolean return value
        :type fov_bin_func: function pointer (x,y), return True/False e.g. see below
        :param obj_algo_gate: binary function with ego speed, distx and a boolean return value
        :type fov_bin_func: function pointer (x,y), return True/False e.g. see below
        :param ego_speed_per_ts: ego_speed
        :type ego_speed_per_ts: OrderedDict()


        """

        if fov_bin_func is None and obj_algo_gate is None:
            return

        def default_fov_bin_func(dummy_distx, dummy_disty):
            """default function if no fov_bin_func is passed"""
            return True

        def default_obj_algo_gate(dummy_ego_speed, dummy_distx):
            """default function if no obj_algo_gate is passed"""
            return True

        if fov_bin_func is None:
            fov_bin_func = default_fov_bin_func
        if obj_algo_gate is None or ego_speed_per_ts is None or len(
                ego_speed_per_ts) == 0:
            obj_algo_gate = default_obj_algo_gate

        new_objects = []
        for lab_obj in self._objects:
            sections = []
            section_open_ts = None
            section_close_ts = None
            distxl = lab_obj.get_signal(OBJ_DISTX).GetValue()
            distyl = lab_obj.get_signal(OBJ_DISTY).GetValue()
            timestampl = lab_obj.get_signal(OBJ_DISTX).GetTimestamps()
            max_idx = len(timestampl) - 1
            for idx, timestamp in enumerate(timestampl):
                distx = distxl[idx]
                disty = distyl[idx]
                ego_speed = None
                if ego_speed_per_ts is not None:
                    ego_speed = ego_speed_per_ts.get(timestamp)
                    if ego_speed is None:
                        self._log.error(
                            "time stamp not found, ts of label differs from ts of bsig. "
                            "please use interpolate_to_time_system before filter_labels"
                        )
                if fov_bin_func(distx, disty) and obj_algo_gate(
                        ego_speed, distx):
                    if section_open_ts is None:
                        # open section
                        section_open_ts = timestamp
                    else:
                        # do nothing
                        pass
                    # extend section
                    section_close_ts = timestamp
                else:
                    if section_open_ts is None:
                        # do nothing
                        pass
                    else:
                        # close section
                        sections.append((section_open_ts, section_close_ts))
                        section_open_ts = None
                        section_close_ts = None
                if idx == max_idx and section_open_ts is not None:
                    # close section
                    sections.append((section_open_ts, section_close_ts))
                    section_open_ts = None
                    section_close_ts = None

            # create new sections
            for section in sections:
                new_objects.append(
                    lab_obj.get_subset(startts=section[0], stopts=section[1]))

        # replace objects with the new object section list
        del self._objects
        self._objects = new_objects

    def adjust_label_distance(self,
                              correction_func=utils.adjust_distance_adma):
        """
        the reflection point needs to determined for e.g. ADMA objects
        and the distance x and y is updated to that reflection point (instead of location of GPS device)


        :param correction_func: corr_distx, corr_disty = correction_func(distx, disty, length, width, orient)
        """
        for lab_obj in self._objects:
            lab_obj.adjust_label_distance(correction_func)

    def interpolate_to_time_system(self, new_timestamps, latency=0.0):
        """
        the timestamps need to interpolated to the measurement for e.g. ADMA objects
        all signals incl values and timestamps are updated accordingly

        :param new_timestamps: new_timestamps from the measurement
        :param latency: time in milliseconds, it will be substracted from reference object time
        :type latency: float
        """
        for lab_obj in self._objects:
            lab_obj.interpolate_to_time_system(new_timestamps, latency)

    def get_adma_objects(self):
        """
        returns only the adma objects
        """

        all_obj = self.get_objects()
        adma = []
        for obj in all_obj:
            if obj.get_labeling_type(
            ) == self._data_source.get_adma_associated_type_id():
                adma.append(obj)
        return adma

    def write_objects_into_db(self,
                              assoc_type=DEFAULT_ASSOC_TYPE_ADMA,
                              rect_obj_record_template=None,
                              obj_kinematics_template=None):
        """
        Writes the objects into the DB / data source

        :param assoc_type: association type from DB ASSOCTYPEID, default is DEFAULT_ASSOC_TYPE_ADMA
        :param rect_obj_record_template: rect_obj_record_template from DB, default is DEFAULT_RECT_OBJ_RECORD_TEMPLATE
        :param obj_kinematics_template: obj_kinematics_template from DB, default is DEFAULT_OBJ_KINEMATICS_TEMPLATE
        """
        if rect_obj_record_template is None:
            rect_obj_record_template = DEFAULT_RECT_OBJ_RECORD_TEMPLATE
        if obj_kinematics_template is None:
            obj_kinematics_template = DEFAULT_OBJ_KINEMATICS_TEMPLATE
        for obj in self._objects:
            obj.write_object_into_db(self.__meas_id, assoc_type,
                                     rect_obj_record_template,
                                     obj_kinematics_template)

    def __fill_objects_from_generic_object_list(self, data_source,
                                                generic_object_list):
        """
        Load Objects from GenericObjectList

        :param data_source:
        :param generic_object_list: GenericObjectList
        """
        # clear objects:
        self._objects = []
        for gen_obj in generic_object_list:
            self._objects.append(
                LabelRectObject(gen_obj.get_id(), None, None,
                                data_source, self._signal_names,
                                gen_obj.get_signals(), DEFAULT_LABELING_TYPE))

    @deprecated('write_objects_into_db')
    def WriteObjectsIntoDB(
            self,
            assoc_type=DEFAULT_ASSOC_TYPE_ADMA,  # pylint: disable=C0103
            rect_obj_record_template=None,
            obj_kinematics_template=None):
        """
        :deprecated: use `write_objects_into_db` instead
        """
        if rect_obj_record_template is None:
            rect_obj_record_template = DEFAULT_RECT_OBJ_RECORD_TEMPLATE
        if obj_kinematics_template is None:
            obj_kinematics_template = DEFAULT_OBJ_KINEMATICS_TEMPLATE
        return self.write_objects_into_db(assoc_type, rect_obj_record_template,
                                          obj_kinematics_template)
class RuntimeJob(object):
    """
    **job details for runtime class**

    A Job is a sequence of tasks executed to get simulation or validation results,
    for one complete testrun several jobs might be needed.
    Inside the ResultDb the RuntimeJobs are linked to the according `TestRun`.

    From Jobs executed on HPC cloud we'll get some runtime results of its tasks
    together with the reported incidents using a copy method.

    incidents provided by `HpcErrorDB` interface with
      - COL_NAME_RTL_JOBID:  jobid,
      - COL_NAME_RTL_TASKID: taskid,
      - COL_NAME_RTL_TYPE:   errtype,
      - COL_NAME_RTL_CODE:   errcode,
      - COL_NAME_RTL_DESCRIPTION: desc,
      - COL_NAME_RTL_SOURCE: src
      - COL_NAME_RTL_NODE: node

    methods to get filtered extracts
    """
    def __init__(self, node, jobid):
        """ initialize the incident

        :param jobid: JobId of the HPC job run for the TestRun
        :type jobid:  integer
        """
        self.__node = node
        self.__jobid = jobid
        self.__error_count = 0
        self.__exception_count = 0
        self.__crash_count = 0
        self.__incidents = []
        self._log = Logger(self.__class__.__name__)

    def LoadHpcIncidents(self):  # pylint: disable=C0103
        """Load all incidents with given JobId from HPC error Db
        """
        # Connect to the Hpc Error DB
        with BaseDB('HPC') as hpc:
            for i in hpc.execute(
                    "SELECT HPCTASKID, TYPENAME, CODE, DESCR, SRC "
                    "FROM HPC_NODE INNER JOIN HPC_JOB USING(NODEID) "
                    "INNER JOIN HPC_TASK USING(JOBID) "
                    "INNER JOIN HPC_ERRORS USING(TASKID) "
                    "INNER JOIN HPC_ERRTYPE USING(TYPEID) "
                    "WHERE NODENAME = :node AND HPCJOBID = :job",
                    node=self.__node,
                    job=self.__jobid):
                self.__incidents.append(
                    RuntimeIncident(self.__node, self.__jobid, i[0], i[1],
                                    i[2], i[3], i[4]))

        self.__error_count = self.CountIncidents(TYPE_ERROR)
        self.__exception_count = self.CountIncidents(TYPE_EXCEPTION)
        self.__crash_count = self.CountIncidents(TYPE_CRASH)

    def AddIncidents(self, incident_list):  # pylint: disable=C0103
        """
        **add list of incidents to the runtime job**
        and count occurrence of errors, exceptions and crashes

        job id needs to be equal to runtime job id for all incidents

        :param incident_list: list of incident dicts as returned by `HpcErrorDB`
        :type incident_list: [`RuntimeIncident`,...]
        """
        for incident in incident_list:
            if incident.job_id == self.__jobid:
                self.__incidents.append(
                    RuntimeIncident(incident.node, incident.job_id,
                                    incident.task_id, incident.type,
                                    incident.code, incident.desc,
                                    incident.src))
            else:
                self._log.error(
                    'RuntimeJob list inconsistent, trying to add incident with different job id!! \n'
                    ' expct JobId: %s  added: %s' %
                    (self.__jobid, incident.job_id))
                return False

        self.__error_count = self.CountIncidents(TYPE_ERROR)
        self.__exception_count = self.CountIncidents(TYPE_EXCEPTION)
        self.__crash_count = self.CountIncidents(TYPE_CRASH)
        return True

    def GetAllIncidents(self, itype=None):  # pylint: disable=C0103
        """
        return list of all incidents for given type

        :param itype: type of incident like 'Error', 'Crash',...
        :type itype:  str
        :return: all incidents of a given type or all for no type sorted by task_id
        :rtype:  list(`RuntimeIncident`)
        """
        rlist = self.__incidents
        if itype is not None:
            rlist = [x for x in rlist if x.type == itype]

        return rlist

    def CountIncidents(self, itype=None):  # pylint: disable=C0103
        """
        count the incidents for a given job id and opt. type

        :param itype: type of incident like 'Error', 'Crash',...
        :type itype: str
        :return: number of incidents
        :rtype: int
        """
        return len(self.GetAllIncidents(itype))

    @property
    def node(self):
        """AlgoTestReport Interface overloaded attribute, returns name of HPC node as string.
        """
        return self.__node

    @property
    def jobid(self):
        """AlgoTestReport Interface overloaded attribute, returns id of this job as provided by HPC as int.
        """
        return self.__jobid

    @property
    def error_count(self):
        """AlgoTestReport Interface overloaded attribute, returns number of Errors reported for this job as int.
        """
        return self.__error_count

    @property
    def exception_count(self):
        """AlgoTestReport Interface overloaded attribute, returns number of Exceptions reported for this job as int.
        """
        return self.__exception_count

    @property
    def crash_count(self):
        """AlgoTestReport Interface overloaded attribute, return number of Exceptions reported for this job as int.
        """
        return self.__crash_count

    @property
    def incidents(self):
        """AlgoTestReport Interface overloaded attribute, returns number of Crashes reported for this job as int.
        """
        return self.__incidents
Example #13
0
class Valf(object):
    """
    class defining methods to easily start validation suites
    by calling a python script without additional option settings (double click in win)

    mandatory settings:

    - outputpath (as instantiation parameter)
    - config file with `LoadConfig`
    - sw version of sw under test with `SetSwVersion`

    see `__init__` for additional options

    returns error level::

      RET_VAL_OK = 0       suite returned without error
      RET_GEN_ERROR = -1   general error
      RET_SYS_EXIT = -2    sys.exit called
      RET_CFG_ERROR = -3   error in direct settings or configuration file

    **Example:**

    .. python::

        # Import valf module
        from stk.valf import valf

        # set output path for logging ect., logging level and directory of plugins (if not subdir of current HEADDIR):
        vsuite = valf.Valf(getenv('HPCTaskDataFolder'), 10)  # logging level DEBUG, default level: INFO

        # mandatory: set config file and version of sw under test
        vsuite.LoadConfig(r'demo\\cfg\\bpl_demo.cfg')
        vsuite.SetSwVersion('AL_STK_V02.00.06')

        # additional defines not already set in config files or to be overwritten:
        vsuite.SetBplFile(r'cfg\\bpl.ini')
        vsuite.SetSimPath(r'\\\\Lifs010.cw01.contiwan.com\\data\\MFC310\\SOD_Development')

        # start validation:
        vsuite.Run()

    :author:        Joachim Hospes
    :date:          29.05.2013

    """
    def __init__(self, outpath, *args, **kwargs):
        """
        initialise all needed variables and settings

          - creates/cleans output folder
          - start process manager
          - start logging of all events, therefore the output path must be given

        :param outpath: path to output directory, can be relative to calling script
        :type outpath: str

        :param args: additional argument list which are also covered by keywords in order of occurrence

        :keyword logging_level: level of details to be displayed. default: info
                                (10=debug, 20=info, 30=warning, 40=error, 50=critical, 60=exception)
        :type logging_level: int [10|20|30|40|50]

        :keyword plugin_search_path: default: parent dir of stk folder, normally parallel to validation scripts
        :type plugin_search_path: str

        :keyword clean_folder:  default ``True``, set to ``False`` if the files in output folder should not be deleted
                                during instantiation of Valf
        :type clean_folder: bool

        :keyword logger_name:   name of logger is used for logfile name and printed in log file as base name,
                                if not set name/filename of calling function/module is used
        :type logger_name: str

        :keyword fail_on_error: Switch to control exception behaviour, if set
                                exceptions will be re-thrown rather than omitted or logged.
        :type fail_on_error: bool

        :keyword deprecations: set me to False to remove any deprecation warning outputs inside log
        :type deprecations: bool
        """
        self.__version = "$Revision: 1.6 $"
        self._uncrepl = UncRepl()

        self.__data_bus_names = [
        ]  # store all names of generated data busses like bus#0
        self.__process_mgr = None

        opts = arg_trans(
            [['logging_level', INFO], ['plugin_search_path', None],
             ['clean_folder', True], ['logger_name', None],
             ['fail_on_error', False], ['deprecations', True]], *args,
            **kwargs)

        self._fail_on_error = opts['fail_on_error']

        # prep output directory: create or clear content
        outpath = self._uncrepl(opath.abspath(outpath))
        clear_folder(outpath, opts['clean_folder'])

        logger_name = opts['logger_name']
        if logger_name is None:
            # get name of calling module
            frm = currentframe().f_back  # : disable=W0212
            if frm.f_code.co_filename:
                logger_name = opath.splitext(
                    opath.basename(frm.f_code.co_filename))[0]
            else:
                logger_name = 'Valf'
        # start logger, first with default level, idea for extension: can be changed later
        self.__logger = Logger(logger_name,
                               opts['logging_level'],
                               filename=opath.join(outpath,
                                                   logger_name + ".log"))
        self.__logger.info("Validation started at %s." %
                           strftime('%H:%M:%S', localtime(time())))
        self.__logger.info("Validation based on %s STK %s-%s of %s, CP: %s." %
                           ("original" if stk_checksum(True) else "adapted",
                            RELEASE, INTVERS, RELDATE, MKS_CP))
        self.__logger.info("Logging level is set to %s." % next(
            i
            for i, k in LEVEL_CALL_MAP.items() if k == opts['logging_level']))
        self.__logger.info("Validation arguments have been:")
        for k, v in opts.iteritems():
            self.__logger.info("    %s: %s" % (k, str(v)))

        if not opts['deprecations']:
            self.__logger.warning(
                "Deprecation warnings have been switched off!")
            DeprecationUsage().status = False

        # find all observers down current path
        plugin_search_path = opts['plugin_search_path']
        plugin_folder_list = []
        if plugin_search_path is None:
            plugin_search_path = [HEAD_DIR]
        # take care of fast connections
        plugin_search_path = [self._uncrepl(i) for i in plugin_search_path]
        for spath in plugin_search_path:
            plugin_folder_list.extend([
                dirPath for dirPath in list_folders(spath)
                if "\\stk\\" not in dirPath
            ])
            # left over from testing??? found in vers.1.14, introduced in 1.6
            # else:
            #     print folder_path

            self.__logger.info('added to plugin search path:' + spath)
        # and add all observers down calling script's path
        stk_plugins = [
            opath.join(HEAD_DIR, "stk", "valf"),
            opath.join(HEAD_DIR, "stk", "valf", "obs"),
            opath.join(HEAD_DIR, "stk", "val")
        ]

        plugin_folder_list.extend(plugin_search_path)

        for spath in stk_plugins:
            plugin_folder_list.append(spath)
            self.__logger.debug('added to plugin search path:' + spath)

        # start process manager
        try:
            self.__process_mgr = ProcessManager(plugin_folder_list,
                                                self._fail_on_error)
        except:  # pylint: disable=W0702
            self.__logger.exception(
                "Couldn't instantiate 'ProcessManager' class.")
            if self._fail_on_error:
                raise
            sys.exit(RET_GEN_ERROR)

        self.__process_mgr.set_data_port(OUTPUTDIRPATH_PORT_NAME, outpath)
        self.__logger.debug("OutputDirPath: '%s'" % outpath)

        # set still needed default settings as have been in valf.main
        self.SetMasterDbPrefix(DEFAULT_MASTER_SCHEMA_PREFIX)
        self.SetErrorTolerance(ERROR_TOLERANCE_NONE)

        # should be activated some day, for now not all validation suites can be parallelised
        # if set on default we should invent a method DeactivateHpcAutoSplit to run the remaining or old suites
        # self.SetDataPort("HpcAutoSplit", True, "Global")

    def _check_mandatory_settings(self):
        """ private method

        check if additional mandatory settings are done

        does not run complete sanity check for config, here we just check additional mandatory settings
        that do not prevent the validation to run if they are missing
        e.g. no test if db connection is defined for cat reader, if not set cat reader will stop the initialisation

        :return:   number of missing settings, 0 if settings completed
        :rtype:    integer
        """
        error_cnt = 0

        if self.GetDataPort("SWVersion", "Global") is None:
            self.__logger.error("version of test sw not defined!")
            error_cnt += 1

        if (self.GetDataPort("HpcAutoSplit", "Global") is True
                and self.GetDataPort("SimSelection", "Global") is not None):
            self.__logger.error(
                "DataPort 'SimSelection' used by HPC, not available if 'HpcAutoSplit' is active!"
            )
            self.__logger.error(
                "Set either 'HpcAutoSplit' to False or don't set 'SimSelection'!"
            )
            error_cnt += 1

        return error_cnt

    def _set_hpc_selection(self):
        """ private method

        if the start script is running as HPC task on an HPC machine then
        set SimSelection to use only the entry given by the task number.

        e.g. for HPC task003: set SimSelection to [2]
        """
        # check HPC usage
        if self.GetDataPort("HpcAutoSplit", "Global") is True:
            task_name = getenv("TaskName")
            try:
                # T0000x task ids start with 1,  bpl list index with 0
                task_id = int(match(r'T(\d+)', str(task_name)).group(1)) - 1
            except AttributeError:
                self.__logger.exception(
                    "can't set Hpc Auto Split value as HPC environment variable Task Id"
                    " is empty or not valid: %s" % task_name)
                if self._fail_on_error:
                    raise
                sys.exit(RET_CFG_ERROR)
            self.__logger.info(
                "HpcAutoSplit: using entry %d of the sim collection" % task_id)
            self.SetDataPort("SimSelection", "[%d]" % task_id, "Global")

    def LoadConfig(self, filepath):  # pylint: disable=C0103
        """
        load configuration from path/filename, path can be relative to calling script

        Valid configuration properties are:

            - version: string defining version of config file, added to dict on port "ConfigFileVersions"
            - ClassName: quoted string to determine observer class to include in run (not in section "Global")
            - PortOut: list of port values (quoted strings) which should be exported to given bus name
            - InputData: pythonic list of tuples/lists which are taken and given as input for observer to be configured
            - ConnectBus: list of bus names to connect / register observer to (first one is taken actually)
            - Active: True/False value weather observer should be enabled or not
            - include: file (quoted) to include herein, chapter should be repeated there,
              if include is used within global scope, all chapters from included file are used

        config file example::

            # valf_basic.cfg
            # config for testing Valf class, based on valf_demo settings,

            [Global]
            ; version string will be added to dict on port "ConfigFileVersions":
            version="$Revision: 1.6 $"
            ;PortOut: Informs the name of the port that are set by the component
            PortOut=["ProjectName", "SWVersion", "FunctionName", "Device_Prefix"]
            ;InputData: Declares all input parameters
            InputData=[('ProjectName', 'VALF-test'),
                       ('FunctionName', 'STK_moduletest'),
                       ('SimName', 'N/A'),
                       ('Multiprocess', True ),
                       ('ValName', 'N/A')]
            ;ConnectBus: Specifies the bus connect to the component
            ConnectBus=["Global"]

            ; db connection is needed for the catalog reader only, **deactivated** here!!
            ; connection parameters passed to validation_main.py as options because it will differ for projects
            [DBConnector]
            ClassName="DBConnector"
            InputData=[("UseAllConnections", "True")]
            PortOut=[ "DataBaseObjects"]
            ConnectBus=["DBBus#1"]
            Active=False
            ;Order: Specifies the calling order
            Order=0

            ; bpl reader can be used to read simulation results, but in future the cat_reader should be used
            ;  to test the difference switch Activate setting for BPLReader and CATReader
            [VALF_BPL_test]
            ClassName="BPLReader"
            PortOut=["CurrentMeasFile", "CurrentSimFile"]
            InputData=[("SimFileExt", "bin")]
            ConnectBus=["bus#1"]
            ; read additional config file data for this section, can overwrite complete setting before
            ; so e.g. InputData needs to list all input values,
            ; the values from include-cfg are not added but replace former set!
            Include="..\..\..\04_Test_Data\01a_Input\valf\valf_include_VALF_BPL_test.cfg"
            Active=True
            ;Order: Specifies the calling order
            Order=1

            ; cat reader needs db connector to setup connection to catalog db!
            [VALF_CAT_REF]
            ClassName="CATReader"
            PortOut=[ "CurrentMeasFile", "CurrentSimFile"]
            InputData=[("SimFileExt", "bsig"),("SimFileBaseName", "") ]
            ConnectBus=["Bus#1"]
            Active=False
            Order=1

        general used ports on bus ``Global`` (set by `ProjectManager`):

            - set "ConfigFileVersions"
                dict with file name as key and version as value for each loaded config file
            - read "FileCount"
                to show progress bar
            - read "IsFinished"
                to continue with next state when all sections of a recording are validated (set by `SignalExtractor`)

        Also setting ports as defined in ``InputData``  for the named bus.


        usage (example):

        .. python::

          from stk.valf import Valf

          vrun = stk.valf.Valf()
          vrun.load_config(r'conf/validation.cfg')

        :param filepath: path and filename of the config file to load
        :type filepath:  string
        """
        absfile = self._uncrepl(opath.abspath(filepath))
        # preset of port ConfigFileName currently not supported!!! what was it used for??
        # config_filename = self.__process_mgr.get_data_port(CFG_FILE_PORT_NAME)
        # if config_filename is None:
        #     config_filename = absfile
        # else:
        #     config_filename += ', ' + absfile
        self.__process_mgr.set_data_port(CFG_FILE_PORT_NAME, absfile)
        if self.__logger is not None:
            self.__logger.info("Using configuration file: '%s'" % absfile)
            try:
                if not self.__process_mgr.load_configuration(absfile):
                    sys.exit(RET_CFG_ERROR)
            except ValfError:
                msg = 'Validation error during configuration load'
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                self.__logger.exception(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_SYS_EXIT)
            except SystemExit:
                msg = 'system exit by one module during configuration load'
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                    self.__logger.exception(msg)
                self.__logger.error(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_SYS_EXIT)
            except:
                msg = "unexpected error (%s) during configuration load" % str(
                    sys.exc_info)
                if self.__process_mgr.last_config is not None:
                    msg += (" (%s)" % self.__process_mgr.last_config)
                    self.__logger.exception(msg)
                self.__logger.exception(msg)
                if self._fail_on_error:
                    raise
                sys.exit(RET_GEN_ERROR)

    def SetBplFile(self, filepath):  # pylint: disable=C0103
        """
        set data port ``BplFilePath`` to path/filename of bpl file (.ini or .bpl)
        path can be relative to starting script, checks existence of file and stops in case of errors

        :param filepath: path/filename of batch play list
        :type filepath:  string
        """
        absfilepath = self._uncrepl(opath.abspath(filepath))
        self.__logger.debug("BplFilePath: '%s'" % absfilepath)
        if filepath is not None and opath.isfile(absfilepath):
            self.__process_mgr.set_data_port(PLAY_LIST_FILE_PORT_NAME,
                                             absfilepath)
        else:
            self.__logger.error(
                "Missing mts batch play list: can not open bpl file '%s'" %
                absfilepath)
            sys.exit(RET_CFG_ERROR)

    def SetCollectionName(self, collection_name):  # pylint: disable=C0103
        """
        set data port ``RecCatCollectionName`` giving the collection name of rec files in catalog db
        used by the cat reader to select the recording list for a project

        :param collection_name: name of the collection
        :type collection_name:  string
        """
        self.__process_mgr.set_data_port(COLLECTION_NAME_PORT_NAME,
                                         collection_name)
        self.__logger.debug("Rec file cataloge collection name is: '%s'" %
                            collection_name)

    def SetDataPort(self, port_name, value, bus_name='Global'):  # pylint: disable=C0103
        """
        set named valf data port at named bus with given value,
        can be repeated for different ports and bus names

        in general these ports should be set using the config file ``InputData`` entry!

        :param port_name: valf data port name, not case sensitiv
        :type port_name:  string
        :param value:     port value, type depends on port usage
        :type value:      user defined
        :param bus_name:  valf data bus name, default: ``Global``, not case sensitiv
        :type bus_name:   string
        """
        self.__process_mgr.set_data_port(port_name, value, bus_name)
        self.__logger.debug('valf script setting port "%s" :' % port_name +
                            str(value))

    def SetDbFile(self, filepath):  # pylint: disable=C0103
        """
        set data port ``dbfile`` to define name of sqlite data base file to be used instead of oracle db
        checks existence of the file and raises an error if it's not readable

        :param filepath: path/name of the database file
        :type filepath:  string
        """
        database_filename = self._uncrepl(opath.abspath(filepath))
        if not opath.exists(database_filename):
            self.__logger.error("defined db file '%s' not found" %
                                database_filename)
            sys.exit(RET_CFG_ERROR)
        self.__process_mgr.set_data_port(DB_FILE_PORT_NAME, database_filename,
                                         'DBBus#1')

    def SetErrorTolerance(self, tolerance):  # pylint: disable=C0103
        """
        set data port ``ErrorTolerance`` to a value as defined in `db_commmon`

        :param tolerance: error tolerance value
        :type tolerance:  integer
        """
        self.__process_mgr.set_data_port(ERROR_TOLERANCE_PORT_NAME, tolerance,
                                         "Bus#1")

    @deprecated()
    def SetMasterDbDbq(self, dbq):  # pylint: disable=C0103
        """
        set data port "masterdbdbq" (name defined in `valf.db_connector`) to given name
        default value defined in db.db_common by DEFAULT_MASTER_DBQ

        :param dbq: data base qualifier for oracle data bases
        :type dbq:  string
        :note:      don't use together with DSN setting
        """
        self.__process_mgr.set_data_port(MASTER_DB_DBQ_PORT_NAME, dbq,
                                         "DBBus#1")

    @deprecated()
    def SetMasterDbDsn(self, dsn):  # pylint: disable=C0103
        """
        set data port ``masterdbdsn`` (name defined in `valf.db_connector`) to given name
        default value defined in db.db_common by DEFAULT_MASTER_DSN

        :param dsn: data source name for odbc interface connections
        :type dsn:  string
        :note:      don't use together with DBQ setting
        """
        self.__process_mgr.set_data_port(MASTER_DB_DSN_PORT_NAME, dsn,
                                         "DBBus#1")

    def SetMasterDbUser(self, user):  # pylint: disable=C0103
        """
        set data port ``masterdbuser`` (name defined in `valf.db_connector`) to given name

        :param user: name of data base user
        :type user:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_USR_PORT_NAME, user,
                                         "DBBus#1")

    def SetMasterDbPwd(self, passwd):  # pylint: disable=C0103
        """
        set data port ``masterdbpassword`` (name defined in `valf.db_connector`) to given name

        :param passwd: password for data base user
        :type passwd:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_PW_PORT_NAME, passwd,
                                         "DBBus#1")

    def SetMasterDbPrefix(self, prefix):  # pylint: disable=C0103
        """
        set data port ``masterdbschemaprefix`` (name defined in `valf.db_connector`) to given name

        :param prefix: schema prefix for data base table
        :type prefix:  string
        """
        self.__process_mgr.set_data_port(MASTER_DB_SPX_PORT_NAME, prefix,
                                         "DBBus#1")

    def SetSimPath(self, pathname, bus_name="Bus#1"):  # pylint: disable=C0103
        """
        set data port ``SimOutputPath`` at named bus (default:``Bus#0``) to given path
        where measurement files are stored

        checks if path exists and raises an `ValfError` if not

        for historical reasons the bus_name is set as default to ``bus#0``
        make sure your config sets the similar busses for bpl/cat reader(s)!

        :param pathname: absolute path where simulation result files are stored
        :type pathname:  string
        :param bus_name: data bus name of the bpl/cat reader, default ``bus#0``, not case sensitiv
        :type bus_name:  string
        """
        pathname = self._uncrepl(pathname)
        if opath.exists(pathname):
            self.__process_mgr.set_data_port(SIM_PATH_PORT_NAME, pathname,
                                             bus_name)
            self.__logger.debug(
                "Setting input data. [ Bus='{0}', "
                "PortName='SimOutputPath', PortValue={1}]".format(
                    bus_name, pathname))
            if bus_name not in self.__data_bus_names:
                self.__data_bus_names.append(bus_name)
                self.__process_mgr.set_data_port(DATA_BUS_NAMES,
                                                 self.__data_bus_names)
        else:
            exception_msg = "Sim Output folder providing bsig/csv files does not exist:\n" +\
                            "{}\nPlease check your setup".format(pathname)
            self.__logger.exception(exception_msg)
            raise ValfError(exception_msg)

    def SetSwVersion(self, version):  # pylint: disable=C0103
        """
        set data port ``SWVersion`` to given value
        currently mandatory setting!!

        :param version: sw version of sw under test
        :type version:  string
        """
        self.__process_mgr.set_data_port(SWVERSION_PORT_NAME, version)

    def SetRefSwVersion(self, version):  # pylint: disable=C0103
        """
        set data port ``SWVersion_REG`` to given value (optional)

        :param version: sw version of regression sw under test
        :type version:  string
        """
        self.__process_mgr.set_data_port(SWVERSION_REG_PORT_NAME, version)

    def SetSaveResults(self, saveit=True):  # pylint: disable=C0103
        """
        set data port ``SaveResultInDB`` to given value (optional)

        :param saveit: Save the results into the database, default = True
        :type saveit:  boolean
        """
        self.__process_mgr.set_data_port(SAVE_RESULT_IN_DB, saveit)

    def GetDataPort(self, port_name, bus_name='Global'):  # pylint: disable=C0103
        """
        get named valf data port at named bus,
        can be repeated for different ports and bus names

        :param port_name: valf data port name, not case sensitiv
        :type port_name:  string

        :param bus_name: valf data bus name, default: ``Global``, not case sensitiv
        :type bus_name:  string

        :return: port data
        :rtype:  undefined
        """
        return self.__process_mgr.get_data_port(port_name, bus_name)

    def ActivateHpcAutoSplit(self):  # pylint: disable=C0103
        r"""
        activate auto splitting of bpl/cat list on HPC

        Running on HPC a validation can run in parallel on several tasks. This method sets data port ``HpcAutoSplit``
        to ``True`` so each validation suite running on one task/machine only reads the sim results of one recording::

              bpl / cat list       HPC TaskID
            ---------------------- ----------
            recording_entry_0.rec    T00001
            recording_entry_1.rec    T00002
            recording_entry_2.rec    T00003
            ...                      ...

        **The tasks must be created during job submit,** this is not done by Valf!!

        Example to create an own task for each bpl entry:

        .. python::

            # Create the Validation Tasks
            reclist = bpl.Bpl(BPL_FILE).read()
            task = hpc.TaskFactory(job)
            for rec in reclist:
                task.create_task(r"D:\data\%JobName%\1_Input\valf_tests\custom\demo\run_valf_demo_bpl.py")

        """
        self.SetDataPort(HPC_AUTO_SPLIT_PORT_NAME, True, 'global')

    def Run(self):
        """ start the validation after all needed preparations

        :return:  success or error value during validation run
        :rtype:   error codes:
          RET_VAL_OK = 0
          RET_GEN_ERROR = -1
          RET_SYS_EXIT = -2
          RET_CFG_ERROR = -3

        """
        if LooseVersion(sqlite_version) <= LooseVersion(MIN_SQLITE_VERSION):
            self.__logger.error(
                "error in setup: please update your sqlite3.dll!\n"
                "Just call batch script listed on Validation wiki -> needed tools."
            )
            sys.exit(RET_CFG_ERROR)

        if self._check_mandatory_settings() is not 0:
            self.__logger.error("error in setup: mandatory settings missing")
            sys.exit(RET_CFG_ERROR)
        tstart = time()
        self._set_hpc_selection()
        try:
            ret_val = self.__process_mgr.run()
        except Exception:
            self.__logger.exception("unexpected runtime error")
            if self._fail_on_error:
                raise
            sys.exit(RET_GEN_ERROR)

        if ret_val is not RET_VAL_OK:
            self.__logger.error(
                "runtime error in validation suite, error level %d" % ret_val)

        self.__logger.info("Test duration(hh:mm:ss): " +
                           strftime('%H:%M:%S', gmtime(time() - tstart)))

        self.__logger.info("Logging statistics: " + ", ".join([
            "%s: %d" % (k, v)
            for k, v in self.__logger.get_statistics().items() if v > 0
        ]))

        print('val run ended with result', ret_val)
        return ret_val
class PluginManager(object):
    """
    class to search for pluging classes based on 'BaseComponentInterface'
    to be used as observer components

    can check for dublicated class names to throw an error if it finds one
    """
    def __init__(self, folder_path_list, cls):
        """
        initialise a new object, adds existing folders of folder_path_list to sys.path

        :param folder_path_list: list [] of folders to check recursively
        :param cls: base class of which to find subclasses
        """
        self._uncrepl = UncRepl()
        self.__folder_path_list = [
            self._uncrepl(fpl) for fpl in folder_path_list
        ]
        self.__cls = cls

        self.__logger = Logger(self.__class__.__name__)

        self.__folder_path_list = folder_path_list
        for folder_path in self.__folder_path_list:
            if folder_path not in sys.path:
                sys.path.append(folder_path)

    def __get_plugin_list(self, module_name_list):
        """
        returns list with plugins

        :param module_name_list: list of modules to search in
        :return: list of plugin classes

        """
        plugin_list = []

        for module_name in module_name_list:
            self.__logger.debug("Checking: %s.py..." % module_name)
            try:
                # use relative or absolute (for all stk modules) import method
                if isinstance(module_name, (list, tuple)):
                    module = __import__(module_name[0], globals(), locals(),
                                        module_name[1], 0)
                else:
                    module = __import__(module_name)
            except Exception as msg:
                self.__logger.warning(
                    "Couldn't import module '%s' due to '%s'" %
                    (str(module_name), str(msg)))
                continue

            # look through this dictionary for classes
            # that are subclass of PluginInterface but are not PluginInterface itself
            module_candidates = list(module.__dict__.items())

            for class_name, entry in module_candidates:
                if class_name == self.__cls.__name__:
                    continue

                if entry is None:
                    continue

                if str(entry).find("PyQt4") > -1:
                    continue

                try:
                    if issubclass(entry, self.__cls):
                        self.__logger.debug(
                            "Found plugin.[Module: '%s', Class: '%s']." %
                            (module_name, class_name))
                        plugin_list.append({"type": entry, "name": class_name})
                except TypeError:
                    # this happens when a non-type is passed in to issubclass. We
                    # don't care as it can't be a subclass of PluginInterface if
                    # it isn't a type
                    continue

        if len(plugin_list) > 0:
            return plugin_list

        return None

    def get_plugin_class_list(self, remove_duplicates=False):
        """searches stk path to find classes

        :param remove_duplicates: wether duplicates should be removed
        :return: list of classes
        """
        module_name_list = []
        for folder_path in self.__folder_path_list:
            try:
                file_list = os.listdir(folder_path)
            except OSError:
                continue

            # For all modules within the stk use absolute module path to
            # avoid problems with dublicate package names
            lst = []
            stk_found = False
            path = folder_path
            module_path = ""
            while stk_found is False:
                head, tail = os.path.split(path)

                if tail == '':
                    if head != '':
                        lst.insert(0, head)
                    break
                else:
                    lst.insert(0, tail)
                    path = head
                    if tail == 'stk':
                        stk_found = True
                        for p_k in lst:
                            module_path += p_k + "."

            for file_name in file_list:
                if file_name.endswith(".py") and not file_name.startswith(
                        "__") and not file_name.startswith("stk"):
                    module_name = file_name.rsplit('.', 1)[0]
                    if module_path == "":
                        module_name_list.append(module_name)
                    else:
                        # add stk path to module name
                        module_name_list.append(
                            [module_path + module_name, module_name])

        plugin_list = self.__get_plugin_list(module_name_list)
        if len(plugin_list) > 0:
            check_duplicates = self.__check_for_duplicate_classes(plugin_list)
            if check_duplicates == -1 and remove_duplicates is True:
                plugin_list = self.__remove_duplicate_classes(plugin_list)
                return plugin_list
            elif check_duplicates == 0:
                return plugin_list

        return None

    def __check_for_duplicate_classes(self, plugin_list):
        """ Check if there are any duplicates in the class list and throw an error if found.
        @param plugin_list: A list of the plugins found.
        @return: 0 for success and -1 if duplicate is found.
        """
        num_modules = len(plugin_list)
        for idx, module_name in enumerate(plugin_list):
            for i in range(idx + 1, num_modules):
                if module_name["name"] == plugin_list[i]["name"]:
                    self.__logger.error("Duplicate class name found: %s" %
                                        (module_name["name"]))
                    return -1
        return 0

    @staticmethod
    def __remove_duplicate_classes(plugin_list):
        """removes duplicate classes form plugin list
        """
        temp_mem = []
        copy_plugin_list = []

        for idx, module_name in enumerate(plugin_list):
            if module_name['name'] not in temp_mem:
                copy_plugin_list.append(plugin_list[idx])
                temp_mem.append(module_name['name'])

        return copy_plugin_list
Example #15
0
class ValAssessmentWorkFlows(object):
    """ Base class for assessments workflows
    """
    ASS_WF_AUTO = "automatic"
    ASS_WF_MANUAL = "manual"
    ASS_WF_REVIEWED = "verified"
    ASS_WF_REJECTED = "rejected"

    def __init__(self):
        """ Initialize the workflow class
        """
        #  List of Workflow States
        self.__workflows = []
        self.__workflow_list = [
            ValAssessmentWorkFlows.ASS_WF_AUTO,
            ValAssessmentWorkFlows.ASS_WF_MANUAL,
            ValAssessmentWorkFlows.ASS_WF_REVIEWED,
            ValAssessmentWorkFlows.ASS_WF_REJECTED
        ]
        #  Observer Type
        self.__type = type
        self._logger = Logger(self.__class__.__name__)

    def load(self, dbi_gbl):
        """ Load the assessment states

        :param dbi_gbl: global db interface
        :return: True on passed, False on Error
        """
        if not issubclass(dbi_gbl.__class__, db_gbl.BaseGblDB):
            self._logger.error("GBL Database interface undefined")
            return False

        for wf_name in self.__workflow_list:
            wflow = dbi_gbl.get_workflow(wf_name)
            if db_gbl.COL_NAME_WORKFLOW_WFID in wflow:
                self.__workflows.append(wflow)
        return True

    def get_states(self):
        """ Return the list of workflow states
        """
        state_list = []
        for state in self.__workflows:
            state.append(state[db_gbl.COL_NAME_WORKFLOW_NAME])

        return state_list

    def get_state_id(self, wf_name):
        """ Get Workflow State

        :param wf_name: name of workflow
        """
        for state in self.__workflows:
            if wf_name.lower() == state[db_gbl.COL_NAME_WORKFLOW_NAME].lower():
                return state[db_gbl.COL_NAME_WORKFLOW_WFID]

        return None

    def get_state_name(self, wf_id):
        """ Get Workflow State

        :param wf_id: id of workflow
        """
        for state in self.__workflows:
            if wf_id == state[db_gbl.COL_NAME_WORKFLOW_WFID]:
                return state[db_gbl.COL_NAME_WORKFLOW_NAME]

        return None

    @deprecated('load')
    def Load(self, dbi_gbl):  # pylint: disable=C0103
        """deprecated"""
        return self.load(dbi_gbl)

    @deprecated('get_states')
    def GetStates(self):  # pylint: disable=C0103
        """deprecated"""
        return self.get_states()

    @deprecated('get_state_id')
    def GetStateId(self, wf_name):  # pylint: disable=C0103
        """deprecated"""
        return self.get_state_id(wf_name)

    @deprecated('get_state_name')
    def GetStateName(self, wf_id):  # pylint: disable=C0103
        """deprecated"""
        return self.get_state_name(wf_id)
class ProcessManager(object):
    r"""
    valf internal class to provide essential processing for observers

    - initialize

        - start logger
        - initialize data_manager
        - search classes based on class BaseComponentInterface

    - load configuration

        - import declared observer modules
        - set data ports

    - run validation

        - call all methods of all observers sequentially
        - use bpl_reader or similar to run through all recordings

    This class also is responsible to read out configuration and interpretation from config file.

    general used ports on bus ``Global``:

        - set "ConfigFileVersions"
            dict with file name as key and version as value for each loaded config file
        - read "FileCount"
            to show progress bar
        - read "IsFinished"
            to continue with next state when all sections of a recording are validated (set by `SignalExtractor`)

    Also setting ports as defined in ``InputData``  for the named bus.

    """
    def __init__(self, plugin_dir, fail_on_error=False):
        """init essencials

        :param plugin_dir: path or list of paths where to start search for observers
        :type plugin_dir:  string or list of strings

        :param fail_on_error: flag to break immediately if an exception is found
        :type fail_on_error:  boolean
        """
        self._logger = Logger(self.__class__.__name__)
        self._logger.debug()

        self._component_list = []

        self._version = "$Revision: 1.11 $"

        self._progressbar = None
        self._file_count = 0
        self._object_map_list = []
        self._config_file_loaded = False
        self._fail_on_error = fail_on_error
        self._configfiles = []  # used as stack to load configs recursively
        self._config_file_versions = {}

        self._uncrepl = UncRepl()

        plugin_dir.extend([
            self._uncrepl(dir_) for dir_ in OBS_DIRS if dir_ not in plugin_dir
        ])

        self._logger.info("Searching for plug-ins. Please wait...")
        class_map_list, self._plugin_error_list = find_class(
            bci, plugin_dir, with_error_list=True)
        if class_map_list is None:
            self._logger.error("No plug-ins found.")
            return

        self._logger.debug("%d plug-ins found: %s." %
                           (len(class_map_list), ", ".join(
                               [i['name'] for i in class_map_list])))
        self._plugin_map = {
            plugin['name']: plugin["type"]
            for plugin in class_map_list
        }

        # Create data manager object
        try:
            self._data_manager = DataManager()
        except:
            self._logger.exception("Couldn't instantiate 'DataManager' class.")
            if self._fail_on_error:
                raise
            sexit(bci.RET_VAL_ERROR)

    def _initialize(self):
        """calls initialize and post_initialize of ordered observers
        """
        self._logger.debug()

        # Calls Initialize for each component in the list
        for component in self._component_list:
            try:
                if component.Initialize() != bci.RET_VAL_OK:
                    self._logger.error(
                        "Class '%s' returned with error from Initialize() method."
                        % component.__class__.__name__)
                    return bci.RET_VAL_ERROR
            except:
                self._logger.exception(
                    'EXCEPTION during Initialize of %s:\n%s' %
                    (component.__class__.__name__, format_exc()))
                if self._fail_on_error:
                    raise
                return bci.RET_VAL_ERROR

        # Calls PostInitialize for each component in the list
        for component in self._component_list:
            try:
                if component.PostInitialize() != bci.RET_VAL_OK:
                    self._logger.error(
                        "Class '%s' returned with error from PostInitialize() method."
                        % component.__class__.__name__)
                    return bci.RET_VAL_ERROR
            except:
                self._logger.exception(
                    'EXCEPTION during PostInitialize of %s:\n%s' %
                    (component.__class__.__name__, format_exc()))
                if self._fail_on_error:
                    raise
                return bci.RET_VAL_ERROR

        self._file_count = self.get_data_port("FileCount")
        if self._file_count > 0:
            self._progressbar = ProgressBar(0,
                                            self._file_count,
                                            multiline=True)
        else:
            self._file_count = 0

        self._logger.debug("all components ready to run!")
        self._logger.mem_usage()
        return bci.RET_VAL_OK

    def _process_data(self):
        """calls load_data, process_data as well as post_process_data of ordered observers
        """
        self._logger.debug()

        if self._file_count == 0:
            self._logger.debug(
                str(_getframe().f_code.co_name) + "No files to process.")
            return RET_VAL_OK

        ret = bci.RET_VAL_ERROR
        counter = 0

        while not self.get_data_port("IsFinished"):
            # update progressbar position
            self._progressbar(counter)

            counter += 1

            # Calls LoadData for each component in the list
            for component in self._component_list:
                try:
                    ret = component.LoadData()
                    if ret is bci.RET_VAL_ERROR:
                        self._logger.error(
                            "Class '%s' returned with error from LoadData() method, "
                            "continue with next sim file." %
                            component.__class__.__name__)
                        break
                except:
                    self._logger.exception(
                        'exception raised during LoadData of %s:\n%s, '
                        'continue with next sim file.' %
                        (component.__class__.__name__, format_exc()))
                    ret = bci.RET_VAL_ERROR
                    if self._fail_on_error:
                        raise
                    break

            if ret is bci.RET_VAL_ERROR:
                continue

            # Calls ProcessData for each component in the list
            for component in self._component_list:
                try:
                    ret = component.ProcessData()
                    if ret is bci.RET_VAL_ERROR:
                        self._logger.error(
                            "Class '%s' returned with error from ProcessData() method, "
                            "continue with next sim file." %
                            component.__class__.__name__)
                        break
                except:
                    self._logger.exception(
                        'EXCEPTION during ProcessData of %s:\n%s, '
                        'continue with next sim file.' %
                        (component.__class__.__name__, format_exc()))
                    ret = bci.RET_VAL_ERROR
                    if self._fail_on_error:
                        raise
                    break

            if ret is bci.RET_VAL_ERROR:
                continue

            # Calls PostProcessData for each component in the list
            for component in self._component_list:
                try:
                    ret = component.PostProcessData()
                    if ret is bci.RET_VAL_ERROR:
                        self._logger.error(
                            "Class '%s' returned with error from PostProcessData() method, "
                            "continue with next sim file." %
                            component.__class__.__name__)
                        break
                except:
                    self._logger.exception(
                        'EXCEPTION during PostProcessData of %s:\n%s, '
                        'continue with next sim file.' %
                        (component.__class__.__name__, format_exc()))
                    ret = bci.RET_VAL_ERROR
                    if self._fail_on_error:
                        raise
                    break

            if ret is bci.RET_VAL_ERROR:
                continue

            # we have processed correctly at least a file,
            # set _process_data return value to OK in order to finish it's process

            self._logger.mem_usage()
            ret = bci.RET_VAL_OK

        if counter > 0:
            self._progressbar(counter)

        return ret

    def _terminate(self):
        """calls pre_terminate and terminate of ordered observers
        """
        self._logger.debug()

        # Calls PreTerminate for each component in the list
        for component in self._component_list:
            try:
                if component.PreTerminate() != bci.RET_VAL_OK:
                    self._logger.error(
                        "Class '%s' returned with error from PreTerminate() method."
                        % component.__class__.__name__)
                    return bci.RET_VAL_ERROR
            except Exception:
                self._logger.exception(
                    'EXCEPTION during PreTerminate of observer %s:\n%s' %
                    (component.__class__.__name__, format_exc()))
                if self._fail_on_error:
                    raise
                return bci.RET_VAL_ERROR

        # Calls Terminate for each component in the list
        for component in self._component_list:
            try:
                if component.Terminate() != bci.RET_VAL_OK:
                    self._logger.exception(
                        "Class '%s' returned with error from Terminate() method."
                        % component.__class__.__name__)
                    return bci.RET_VAL_ERROR
            except:
                self._logger.exception(
                    'EXCEPTION during Terminate of observer %s:\n%s' %
                    (component.__class__.__name__, format_exc()))
                if self._fail_on_error:
                    raise
                return bci.RET_VAL_ERROR

        return bci.RET_VAL_OK

    def get_data_port(self, port_name, bus_name="Global"):
        """gets data from a bus/port

        :param port_name: port name to use
        :param bus_name: bus name to use
        :return: data from bus/port
        """
        return self._data_manager.get_data_port(port_name, bus_name)

    def set_data_port(self, port_name, port_value, bus_name="Global"):
        """sets data to a bus/port

        :param port_name: port name to use
        :param port_value: data value to be set
        :param bus_name: bus name to use
        :return: data from bus/port
        """
        self._data_manager.set_data_port(port_name, port_value, bus_name)

    def _get_err_trace(self):
        """returns error trace from error list
        """
        if self._plugin_error_list:
            err_trace = '\n'.join('++ file: {0}.py -- {1}\n'.format(
                e[0], e[1].replace('\n', '\n--> '))
                                  for e in self._plugin_error_list)
        else:
            err_trace = 'no detailed info about failure'

        return err_trace

    def load_configuration(self, configfile):
        """loads configuration from cfg-file

        see more details in `Valf.LoadConfig`

        :param configfile: path/to/file.cfg
        :return: success (bool)
        """
        configfile = self._uncrepl(configfile)
        cls_obj = None

        if not opath.exists(configfile):
            raise ValfError(
                "Configuration file '%s' doesn't exist or is invalid." %
                configfile)
            # self._logger.error("Configuration file '%s' doesn't exist or is invalid." % configfile)
            # return False

        self.set_data_port(CFG_FILE_VERSION_PORT_NAME,
                           self._config_file_versions)
        autoorder = [-1]
        component_map = self._read_config(configfile)
        self._logger.info(
            "loading version: '%s' of config file '%s'" %
            (self._config_file_versions.get(configfile, ""), configfile))
        for componentname in component_map:
            try:  # retrieve details
                class_name = eval(component_map[componentname].get(
                    "ClassName", "None"))
                # port_in_list = component_map[componentname].get("PortIn")
                port_out_list = eval(component_map[componentname].get(
                    "PortOut", "[]"))
                input_data_list = eval(component_map[componentname].get(
                    "InputData", "[]"))
                connect_bus_list = eval(component_map[componentname].get(
                    "ConnectBus", "Bus#1"))
                order = component_map[componentname].get(
                    "Order",
                    max(autoorder) + 1)
                if order in autoorder:
                    self._logger.info(
                        "order %d for component %s already in use!" %
                        (order, componentname))
                autoorder.append(order)
                # check them, they should be there all!
                if (componentname != "Global" and
                    (class_name is None or port_out_list is None
                     or input_data_list is None or connect_bus_list is None)):
                    msg = "Invalid port value or syntax wrong on component: '%s' with parsed settings\n" \
                          "ClassName: %s, PortOut: %s,\n" \
                          "InputData: %s, \n" \
                          "ConnectBus: %s\n"\
                          "  only ClassName for 'Global' can be None, compare parsed settings with defines in config." \
                          % (componentname, class_name, port_out_list, input_data_list, connect_bus_list)
                    raise ValueError(msg)
            except Exception, err:
                self._logger.error(err)
                if self._fail_on_error:
                    raise
                continue

            if type(connect_bus_list) not in (list, tuple):
                connect_bus_list = [connect_bus_list]

            if class_name in self._plugin_map:
                # Observer can be loaded -> Everything fine.
                # self._logger.debug("Loading plug-in: '%s'." % componentname)
                cls_obj = self._plugin_map[class_name](self._data_manager,
                                                       componentname,
                                                       connect_bus_list)
            elif componentname != "Global":
                # Observer can NOT be loaded -> Create Log Entry and raise Exception !
                err_trace = self._get_err_trace()

                # Create Log Entry
                self._logger.error('some python modules have coding errors')
                self._logger.error(
                    'Please check following list for more details:')
                self._logger.error(err_trace)

                msg = "Observer with ClassName %s not found, please check log for more info!" % class_name
                self._logger.error(msg)
                self._logger.error("File: \"valf.log\"")
                raise ValfError(msg, ValfError.ERR_OBSERVER_CLASS_NOT_FOUND)

            for port_out in port_out_list:
                for bus_name in connect_bus_list:
                    tmp = "Register port: Provider="
                    tmp += "'%s', PortName='%s', Bus='%s'." % (
                        componentname, port_out, bus_name)
                    self._logger.debug(tmp)
                    self.set_data_port(port_out, None, bus_name)

            if type(input_data_list) == list:  # do it the usual way
                for input_data in input_data_list:
                    param_name = input_data[0]
                    param_value = input_data[1]
                    for bus_name in connect_bus_list:
                        tmp = "Setting input data.[Component='%s', " % componentname
                        tmp += "Bus='%s', PortName='%s', " % (bus_name,
                                                              param_name)
                        tmp += "PortValue=%s]" % str(param_value)
                        self._logger.debug(tmp)
                        self.set_data_port(param_name, param_value, bus_name)
            elif type(input_data_list
                      ) == dict:  # we've got key value pairs already
                for param_name, param_value in input_data_list.iteritems():
                    for bus_name in connect_bus_list:
                        tmp = "Setting input data.[Component='%s', " % componentname
                        tmp += "Bus='%s', PortName='%s', " % (bus_name,
                                                              param_name)
                        tmp += "PortValue=%s]" % str(param_value)
                        self._logger.debug(tmp)
                        self.set_data_port(param_name, param_value, bus_name)

            if componentname != "Global":
                self._object_map_list.append({
                    "Order": order,
                    "ComponentName": componentname,
                    "ClsObj": cls_obj
                })

        # If whole Observer loading is done successfully,
        # we write anyway all found coding errors into the Log File as warnings
        if self._plugin_error_list:
            err_trace = self._get_err_trace()
            self._logger.warning('some python modules have coding errors')
            self._logger.warning(
                'Please check following list for more details:')
            self._logger.warning(err_trace)

        self._component_list = []
        if len(self._object_map_list):
            self._object_map_list.sort(key=lambda x: x["Order"])

            for object_map in self._object_map_list:
                self._component_list.append(object_map["ClsObj"])

        if not self._component_list:
            self._logger.error(
                "No component loaded. Please check config file '%s'." %
                str(configfile))
            return False

        self._config_file_loaded = True

        return True
class DataManager(DictWatch):
    """handling ports to exchange data between components
    """
    def __init__(self, default=None):
        """datamanager

        :param default: value to return when bus / port doesn't exist (via get_data_port)
        """
        self._logger = Logger(self.__class__.__name__)
        DictWatch.__init__(self)
        self._default = default

    def __str__(self):
        """returns the name
        """
        return self.__class__.__name__

    def __del__(self):
        """mgr is being remove from mem, valf has finished, I guess
        """
        print("DataManager '%s' exited" % self.__class__.__name__)

    def set_data_port(self, port, value, bus='global'):
        """Registers port data with given name, value and bus

        If a bus or port is not already declared it will be defined.

        :param port: name of port
        :type port: str
        :param value: value to set port to
        :type value: object
        :param bus: opt. name of bus to use, default "global"
        :type bus: str
        """
        if bus in self:
            self[bus][port] = value
        else:
            self[bus] = DictWatch({port: value})

    def get_data_port(self, port, bus="global"):
        """
        returns value of the named data port / bus from data manger

        If the port or bus is not defined the data manager default (see `__init__`) will be returned.
        There is no exception raised and no error in the log file.

        :param port: name of value to be returned
        :type  port: str
        :param bus: opt. name of the bus providing the port, default "global"
        :type  bus: str
        :return: object
        """
        if self.exists_data_port(port, bus):
            return self[bus][port]

        return self._default

    def exists_data_port(self, port_name, bus_name="global"):
        """checks weather port at bus exits or not

        :param port_name: port name to check
        :type  port_name: str
        :param bus_name: bus name to check
        :return: wether data port is registred
        :type  bus_name: str
        :rtype: bool
        """
        return bus_name in self and port_name in self[bus_name]

    def clear_data_ports(self, port_list, bus="global"):
        """
        deletes all ports in given list

        :param port_list: list [] of ports
        :type port_list: list
        :param bus: opt. bus name, default "BUS_BASE"
        :type bus: str
        :return: success status
        :rtyp: bool
        """
        if bus not in self:
            return False

        if type(port_list) == str:
            port_list = [port_list]

        for port in port_list:
            del self[bus][port]
        return True

    def get_registered_bus_list(self):
        """
        provides list of all registerd busses

        :return: bus list or None
        """
        return self.keys()

    def get_registered_ports(self, bus='global'):
        """
        returns registered ports for specified bus

        :param bus: name of bus to get ports from
        :type  bus: str
        :return: list of port names
        :rtype:  list(str)
        """
        if bus in self:
            return self[bus].keys()

        return []

    def port_access_stat(self):
        """
        writes statistic in logger of all unused ports (only read, only written)
        """
        for bus, ports in self.items():
            self._logger.error("Status of: '%s'..." % str(bus))
            for port in ports:
                if ports.stats[port][0] == 0:
                    self._logger.error("...Port '%s' was never read from." %
                                       str(port))
                if ports.stats[port][1] == 1:
                    self._logger.error("...Port '%s' was only set once." %
                                       str(port))
        self._logger.error("End of port status.")

    # @deprecated('set_data_port')
    def RegisterDataPort(self, port_name, port_value, bus_name="Global"):  # pylint: disable=C0103
        """deprecated"""
        self.set_data_port(port_name, port_value, bus_name)

    # @deprecated('set_data_port')
    def SetDataPort(self, port_name, port_value, bus_name="Global"):  # pylint: disable=C0103
        """deprecated"""
        self.set_data_port(port_name, port_value, bus_name)

    # @deprecated('get_data_port')
    def GetDataPort(self, port_name, bus_name="Global"):  # pylint: disable=C0103
        """deprecated"""
        return self.get_data_port(port_name, bus_name)

    # @deprecated('exists_data_port')
    def ExistsDataPort(self, port_name, bus_name="BUS_BASE"):  # pylint: disable=C0103
        """deprecated"""
        return self.exists_data_port(port_name, bus_name)

    # @deprecated('clear_data_port')
    def ClearDataPorts(self,
                       port_name_list,
                       apl_name="Global",
                       bus_name="BUS_BASE"):  # pylint: disable=C0103
        """deprecated"""
        return self.clear_data_ports(port_name_list, bus_name)

    # @deprecated()
    def GetDataPortPool(self):  # pylint: disable=C0103
        """deprecated
        """
        return self