Exemple #1
0
    def _convert_to_array(self, input_data, row):
        """
        Method used when the type of an input is array, to parse or read.

        If the user set an equation for computing a model parameter then the
        value of that parameter will be a dictionary which contains all the data
        needed for computing that parameter for each vertex from the used surface.
        """
        if KEY_EQUATION in str(input_data) and KEY_FOCAL_POINTS in str(
                input_data) and KEY_SURFACE_GID in str(input_data):
            try:
                input_data = eval(str(input_data))
                # TODO move at a different level
                equation_type = input_data.get(KEY_DTYPE)
                if equation_type is None:
                    self.log.warning(
                        "Cannot figure out type of equation from input dictionary: %s. "
                        "Returning []." % input_data)
                    return []
                eq_class = get_class_by_name(equation_type)
                equation = eq_class.from_json(input_data[KEY_EQUATION])
                focal_points = json.loads(input_data[KEY_FOCAL_POINTS])
                surface_gid = input_data[KEY_SURFACE_GID]
                surface = load_entity_by_gid(surface_gid)
                return surface.compute_equation(focal_points, equation)
            except Exception:
                self.log.exception(
                    "The parameter %s was ignored. None value was returned.",
                    row['name'])
                return None

        dtype = None
        if KEY_DTYPE in row:
            dtype = row[KEY_DTYPE]
        return string2array(str(input_data), ",", dtype)
    def _convert_to_array(self, input_data, row):
        """
        Method used when the type of an input is array, to parse or read.

        If the user set an equation for computing a model parameter then the
        value of that parameter will be a dictionary which contains all the data
        needed for computing that parameter for each vertex from the used surface.
        """
        if KEY_EQUATION in str(input_data) and KEY_FOCAL_POINTS in str(input_data) and KEY_SURFACE_GID in str(input_data):
            try:
                input_data = eval(str(input_data))
                # TODO move at a different level
                equation_type = input_data.get(KEY_DTYPE)
                if equation_type is None:
                    self.log.warning("Cannot figure out type of equation from input dictionary: %s. "
                                     "Returning []." % input_data)
                    return []
                eq_class = get_class_by_name(equation_type)
                equation = eq_class.from_json(input_data[KEY_EQUATION])
                focal_points = json.loads(input_data[KEY_FOCAL_POINTS])
                surface_gid = input_data[KEY_SURFACE_GID]
                surface = load_entity_by_gid(surface_gid)
                return surface.compute_equation(focal_points, equation)
            except Exception:
                self.log.exception("The parameter %s was ignored. None value was returned.", row['name'])
                return None

        dtype = None
        if KEY_DTYPE in row:
            dtype = row[KEY_DTYPE]
        return string2array(str(input_data), ",", dtype)
Exemple #3
0
    def _prepare_metric_operation(self, sim_operation):
        # type: (Operation) -> Operation
        metric_algo = dao.get_algorithm_by_module(MEASURE_METRICS_MODULE, MEASURE_METRICS_CLASS)
        datatype_index = h5.REGISTRY.get_index_for_datatype(TimeSeries)
        time_series_index = dao.get_generic_entity(datatype_index, sim_operation.id, 'fk_from_operation')[0]
        ga = self.prepare_metadata(metric_algo.algorithm_category, time_series_index.fk_parent_burst)
        ga.visible = False

        view_model = get_class_by_name("{}.{}".format(MEASURE_METRICS_MODULE, MEASURE_METRICS_MODEL_CLASS))()
        view_model.time_series = time_series_index.gid
        view_model.algorithms = tuple(ALGORITHMS.keys())
        view_model.generic_attributes = ga

        parent_burst = dao.get_generic_entity(BurstConfiguration, time_series_index.fk_parent_burst, 'gid')[0]
        metric_op_group = dao.get_operationgroup_by_id(parent_burst.fk_metric_operation_group)
        metric_operation_group_id = parent_burst.fk_metric_operation_group
        range_values = sim_operation.range_values
        view_model.operation_group_gid = uuid.UUID(metric_op_group.gid)
        view_model.ranges = json.dumps(parent_burst.ranges)
        view_model.range_values = range_values
        view_model.is_metric_operation = True
        metric_operation = Operation(view_model.gid.hex, sim_operation.fk_launched_by, sim_operation.fk_launched_in,
                                     metric_algo.id, user_group=ga.operation_tag, op_group_id=metric_operation_group_id,
                                     range_values=range_values)
        metric_operation.visible = False
        metric_operation = dao.store_entity(metric_operation)

        metrics_datatype_group = dao.get_generic_entity(DataTypeGroup, metric_operation_group_id,
                                                        'fk_operation_group')[0]
        if metrics_datatype_group.fk_from_operation is None:
            metrics_datatype_group.fk_from_operation = metric_operation.id
            dao.store_entity(metrics_datatype_group)

        self.store_view_model(metric_operation, sim_operation.project, view_model)
        return metric_operation
 def populate_option_values_for_dtype(self, project_id, type_name, filter_condition=None,
                                      category_key=None, complex_dt_attributes=None):
     '''
     Converts all datatypes that match the project_id, type_name and filter_condition
     to a {name: , value:} dict used to populate options in the input tree ui
     '''
     data_type_cls = get_class_by_name(type_name)
     #todo: send category instead of category_key to avoid redundant queries
     #NOTE these functions are coupled via data_list, _populate_values makes no sense without _get_available_datatypes
     data_list, total_count = get_filtered_datatypes(project_id, data_type_cls,
                                                     filter_condition)
     values = self._populate_values(data_list, data_type_cls,
                                    category_key, complex_dt_attributes)
     return values, total_count
    def _get_backend_client(adapter_instance):
        # type: (ABCAdapter) -> BackendClient

        # For the moment run only simulations on HPC
        if TvbProfile.current.hpc.IS_HPC_RUN and type(adapter_instance) is get_class_by_name(
                "{}.{}".format(SIMULATOR_MODULE, SIMULATOR_CLASS)):
            if not TvbProfile.current.hpc.CAN_RUN_HPC:
                raise InvalidSettingsException("We can not enable HPC run. Most probably pyunicore is not installed!")
            # Return an entity capable to submit jobs to HPC.
            return HPCSchedulerClient()
        if TvbProfile.current.cluster.IS_DEPLOY:
            # Return an entity capable to submit jobs to the cluster.
            return ClusterSchedulerClient()
        # Return a thread launcher.
        return StandAloneClient()
Exemple #6
0
    def review_operation_inputs(self, parameters, flat_interface):
        """
        Find out which of the submitted parameters are actually DataTypes and
        return a list holding all the dataTypes in parameters.
        :returns: list of dataTypes and changed parameters.
        """
        inputs_datatypes = []
        changed_parameters = dict()

        for field_dict in flat_interface:
            eq_flat_interface_name = self._find_field_submitted_name(
                parameters, field_dict[KEY_NAME])

            if eq_flat_interface_name is not None:
                is_datatype = False
                if field_dict.get(KEY_DATATYPE):
                    eq_datatype = load_entity_by_gid(
                        parameters.get(str(eq_flat_interface_name)))
                    if eq_datatype is not None:
                        inputs_datatypes.append(eq_datatype)
                        is_datatype = True
                elif isinstance(field_dict[KEY_TYPE], basestring):
                    try:
                        class_entity = get_class_by_name(field_dict[KEY_TYPE])
                        if issubclass(class_entity, MappedType):
                            data_gid = parameters.get(str(
                                field_dict[KEY_NAME]))
                            data_type = load_entity_by_gid(data_gid)
                            if data_type:
                                inputs_datatypes.append(data_type)
                                is_datatype = True
                    except ImportError:
                        pass

                if is_datatype:
                    changed_parameters[field_dict[
                        KEY_LABEL]] = inputs_datatypes[-1].display_name
                else:
                    if field_dict[KEY_NAME] in parameters and (
                            KEY_DEFAULT not in field_dict
                            or str(field_dict[KEY_DEFAULT]) != str(
                                parameters[field_dict[KEY_NAME]])):
                        changed_parameters[field_dict[KEY_LABEL]] = str(
                            parameters[field_dict[KEY_NAME]])

        return inputs_datatypes, changed_parameters
Exemple #7
0
 def populate_option_values_for_dtype(self, project_id, type_name, filter_condition=None,
                                      category_key=None):
     '''
     Converts all datatypes that match the project_id, type_name and filter_condition
     to a {name: , value:} dict used to populate options in the input tree ui
     '''
     # todo: normalize all itree[KEY_TYPE] to be a python type, not a str, not a None etc
     if isinstance(type_name, basestring):
         data_type_cls = get_class_by_name(type_name)
     else:
         data_type_cls = type_name
     #todo: send category instead of category_key to avoid redundant queries
     #NOTE these functions are coupled via data_list, _populate_values makes no sense without _get_available_datatypes
     data_list, total_count = get_filtered_datatypes(project_id, data_type_cls,
                                                     filter_condition)
     values = self._populate_values(data_list, data_type_cls, category_key)
     return values, total_count
 def populate_option_values_for_dtype(self,
                                      project_id,
                                      type_name,
                                      filter_condition=None,
                                      category_key=None,
                                      complex_dt_attributes=None):
     '''
     Converts all datatypes that match the project_id, type_name and filter_condition
     to a {name: , value:} dict used to populate options in the input tree ui
     '''
     data_type_cls = get_class_by_name(type_name)
     #todo: send category instead of category_key to avoid redundant queries
     #NOTE these functions are coupled via data_list, _populate_values makes no sense without _get_available_datatypes
     data_list, total_count = get_filtered_datatypes(
         project_id, data_type_cls, filter_condition)
     values = self._populate_values(data_list, data_type_cls, category_key,
                                    complex_dt_attributes)
     return values, total_count
Exemple #9
0
    def _prepare_metric_operation(self, sim_operation):
        # type: (Operation) -> Operation
        metric_algo = dao.get_algorithm_by_module(MEASURE_METRICS_MODULE,
                                                  MEASURE_METRICS_CLASS)
        datatype_index = h5.REGISTRY.get_index_for_datatype(TimeSeries)
        time_series_index = dao.get_generic_entity(datatype_index,
                                                   sim_operation.id,
                                                   'fk_from_operation')[0]

        view_model = get_class_by_name("{}.{}".format(
            MEASURE_METRICS_MODULE, MEASURE_METRICS_MODEL_CLASS))()
        view_model.time_series = time_series_index.gid
        view_model.algorithms = tuple(choices.values())

        range_values = sim_operation.range_values
        metadata = {
            DataTypeMetaData.KEY_BURST: time_series_index.fk_parent_burst
        }
        metadata, user_group = self._prepare_metadata(
            metadata, metric_algo.algorithm_category, None, {})
        meta_str = json.dumps(metadata)

        parent_burst = dao.get_generic_entity(
            BurstConfiguration, time_series_index.fk_parent_burst, 'id')[0]
        metric_operation_group_id = parent_burst.fk_metric_operation_group
        metric_operation = Operation(sim_operation.fk_launched_by,
                                     sim_operation.fk_launched_in,
                                     metric_algo.id,
                                     json.dumps({'gid': view_model.gid.hex}),
                                     meta_str,
                                     op_group_id=metric_operation_group_id,
                                     range_values=range_values)
        metric_operation.visible = False
        stored_metric_operation = dao.store_entity(metric_operation)

        metrics_datatype_group = dao.get_generic_entity(
            DataTypeGroup, metric_operation_group_id, 'fk_operation_group')[0]
        if metrics_datatype_group.fk_from_operation is None:
            metrics_datatype_group.fk_from_operation = metric_operation.id

        self._store_view_model(stored_metric_operation, sim_operation.project,
                               view_model)
        return stored_metric_operation
    def review_operation_inputs(self, parameters, flat_interface):
        """
        Find out which of the submitted parameters are actually DataTypes and
        return a list holding all the dataTypes in parameters.
        :returns: list of dataTypes and changed parameters.
        """
        inputs_datatypes = []
        changed_parameters = dict()

        for field_dict in flat_interface:
            eq_flat_interface_name = self._find_field_submitted_name(parameters, field_dict[KEY_NAME])

            if eq_flat_interface_name is not None:
                is_datatype = False
                if field_dict.get(KEY_DATATYPE):
                    eq_datatype = load_entity_by_gid(parameters.get(str(eq_flat_interface_name)))
                    if eq_datatype is not None:
                        inputs_datatypes.append(eq_datatype)
                        is_datatype = True
                elif isinstance(field_dict[KEY_TYPE], basestring):
                    try:
                        class_entity = get_class_by_name(field_dict[KEY_TYPE])
                        if issubclass(class_entity, MappedType):
                            data_gid = parameters.get(str(field_dict[KEY_NAME]))
                            data_type = load_entity_by_gid(data_gid)
                            if data_type:
                                inputs_datatypes.append(data_type)
                                is_datatype = True
                    except ImportError:
                        pass

                if is_datatype:
                    changed_parameters[field_dict[KEY_LABEL]] = inputs_datatypes[-1].display_name
                else:
                    if field_dict[KEY_NAME] in parameters and (KEY_DEFAULT not in field_dict
                                    or str(field_dict[KEY_DEFAULT]) != str(parameters[field_dict[KEY_NAME]])):
                        changed_parameters[field_dict[KEY_LABEL]] = str(parameters[field_dict[KEY_NAME]])

        return inputs_datatypes, changed_parameters
Exemple #11
0
    def _load_entity(self, row, datatype_gid, kwargs, metadata_out):
        """
        Load specific DataType entities, as specified in DATA_TYPE table.
        Check if the GID is for the correct DataType sub-class, otherwise throw an exception.
        Updates metadata_out with the metadata of this entity
        """

        entity = load_entity_by_gid(datatype_gid)
        if entity is None:
            ## Validate required DT one more time, after actual retrieval from DB:
            if row.get(KEY_REQUIRED):
                raise InvalidParameterException(
                    "Empty DataType value for required parameter %s [%s]" %
                    (row[KEY_LABEL], row[KEY_NAME]))

            return None

        expected_dt_class = row[KEY_TYPE]
        if isinstance(expected_dt_class, basestring):
            expected_dt_class = get_class_by_name(expected_dt_class)
        if not isinstance(entity, expected_dt_class):
            raise InvalidParameterException(
                "Expected param %s [%s] of type %s but got type %s." %
                (row[KEY_LABEL], row[KEY_NAME], expected_dt_class.__name__,
                 entity.__class__.__name__))

        result = entity

        ## Step 2 of updating Meta-data from parent DataType.
        if entity.fk_parent_burst:
            ## Link just towards the last Burst identified.
            metadata_out[DataTypeMetaData.KEY_BURST] = entity.fk_parent_burst

        if entity.user_tag_1 and DataTypeMetaData.KEY_TAG_1 not in metadata_out:
            metadata_out[DataTypeMetaData.KEY_TAG_1] = entity.user_tag_1

        current_subject = metadata_out[DataTypeMetaData.KEY_SUBJECT]
        if current_subject == DataTypeMetaData.DEFAULT_SUBJECT:
            metadata_out[DataTypeMetaData.KEY_SUBJECT] = entity.subject
        else:
            if entity.subject != current_subject and entity.subject not in current_subject.split(
                    ','):
                metadata_out[
                    DataTypeMetaData.
                    KEY_SUBJECT] = current_subject + ',' + entity.subject
        ##  End Step 2 - Meta-data Updates

        ## Validate current entity to be compliant with specified ROW filters.
        dt_filter = row.get(KEY_CONDITION)
        if dt_filter is not None and entity is not None and not dt_filter.get_python_filter_equivalent(
                entity):
            ## If a filter is declared, check that the submitted DataType is in compliance to it.
            raise InvalidParameterException(
                "Field %s [%s] did not pass filters." %
                (row[KEY_LABEL], row[KEY_NAME]))

        # In case a specific field in entity is to be used, use it
        if KEY_FIELD in row:
            # note: this cannot be replaced by getattr(entity, row[KEY_FIELD])
            # at least BCT has 'fields' like scaled_weights()
            result = eval('entity.' + row[KEY_FIELD])
        if ATT_METHOD in row:
            # The 'shape' attribute of an arraywrapper is overridden by us
            # the following check is made only to improve performance
            # (to find data in the dictionary with O(1)) on else the data is found in O(n)
            prefix = row[KEY_NAME] + "_" + row[ATT_PARAMETERS]
            if hasattr(entity, 'shape'):
                param_dict = {}
                for i in range(1, len(entity.shape)):
                    param_key = prefix + "_" + str(i - 1)
                    if param_key in kwargs:
                        param_dict[param_key] = kwargs[param_key]
            else:
                param_dict = dict(
                    (k, v) for k, v in kwargs.items() if k.startswith(prefix))
            result = getattr(entity, row[ATT_METHOD])(param_dict)
        return result
    def _load_entity(self, row, datatype_gid, kwargs, metadata_out):
        """
        Load specific DataType entities, as specified in DATA_TYPE table.
        Check if the GID is for the correct DataType sub-class, otherwise throw an exception.
        Updates metadata_out with the metadata of this entity
        """

        entity = load_entity_by_gid(datatype_gid)
        if entity is None:
            ## Validate required DT one more time, after actual retrieval from DB:
            if row.get(KEY_REQUIRED):
                raise InvalidParameterException("Empty DataType value for required parameter %s [%s]" % (
                    row[KEY_LABEL], row[KEY_NAME]))

            return None

        expected_dt_class = row[KEY_TYPE]
        if isinstance(expected_dt_class, basestring):
            expected_dt_class = get_class_by_name(expected_dt_class)
        if not isinstance(entity, expected_dt_class):
            raise InvalidParameterException("Expected param %s [%s] of type %s but got type %s." % (
                row[KEY_LABEL], row[KEY_NAME], expected_dt_class.__name__, entity.__class__.__name__))

        result = entity

        ## Step 2 of updating Meta-data from parent DataType.
        if entity.fk_parent_burst:
            ## Link just towards the last Burst identified.
            metadata_out[DataTypeMetaData.KEY_BURST] = entity.fk_parent_burst

        if entity.user_tag_1 and DataTypeMetaData.KEY_TAG_1 not in metadata_out:
            metadata_out[DataTypeMetaData.KEY_TAG_1] = entity.user_tag_1

        current_subject = metadata_out[DataTypeMetaData.KEY_SUBJECT]
        if current_subject == DataTypeMetaData.DEFAULT_SUBJECT:
            metadata_out[DataTypeMetaData.KEY_SUBJECT] = entity.subject
        else:
            if entity.subject != current_subject and entity.subject not in current_subject.split(','):
                metadata_out[DataTypeMetaData.KEY_SUBJECT] = current_subject + ',' + entity.subject
        ##  End Step 2 - Meta-data Updates

        ## Validate current entity to be compliant with specified ROW filters.
        dt_filter = row.get(KEY_CONDITION)
        if dt_filter is not None and entity is not None and not dt_filter.get_python_filter_equivalent(entity):
            ## If a filter is declared, check that the submitted DataType is in compliance to it.
            raise InvalidParameterException("Field %s [%s] did not pass filters." % (row[KEY_LABEL],
                                                                                     row[KEY_NAME]))

        # In case a specific field in entity is to be used, use it
        if KEY_FIELD in row:
            # note: this cannot be replaced by getattr(entity, row[KEY_FIELD])
            # at least BCT has 'fields' like scaled_weights()
            result = eval('entity.' + row[KEY_FIELD])
        if ATT_METHOD in row:
            # The 'shape' attribute of an arraywrapper is overridden by us
            # the following check is made only to improve performance
            # (to find data in the dictionary with O(1)) on else the data is found in O(n)
            prefix = row[KEY_NAME] + "_" + row[ATT_PARAMETERS]
            if hasattr(entity, 'shape'):
                param_dict = {}
                for i in xrange(1, len(entity.shape)):
                    param_key = prefix + "_" + str(i - 1)
                    if param_key in kwargs:
                        param_dict[param_key] = kwargs[param_key]
            else:
                param_dict = dict((k, v) for k, v in kwargs.items() if k.startswith(prefix))
            result = getattr(entity, row[ATT_METHOD])(param_dict)
        return result
Exemple #13
0
    def remove_datatype(self, project_id, datatype_gid, skip_validation=False, existing_dt_links=None):
        """
        Method used for removing a dataType. If the given dataType is a DatatypeGroup
        or a dataType from a DataTypeGroup than this method will remove the entire group.
        The operation(s) used for creating the dataType(s) will also be removed.
        """
        datatype = dao.get_datatype_by_gid(datatype_gid)
        if datatype is None:
            self.logger.warning("Attempt to delete DT[%s] which no longer exists." % datatype_gid)
            return

        if datatype.parent_operation.fk_launched_in != int(project_id):
            self.logger.warning("Datatype with GUID [%s] has been moved to another project and does "
                                "not need to be deleted anymore." % datatype_gid)
            return

        is_datatype_group = False
        datatype_group = None
        new_dt_links = []

        # Datatype Groups were already handled when the first DatatypeMeasureIndex has been found
        if dao.is_datatype_group(datatype_gid):
            is_datatype_group = True
            datatype_group = datatype
        # Found the first DatatypeMeasureIndex from a group
        elif datatype.fk_datatype_group is not None:
            is_datatype_group = True
            # We load it this way to make sure we have the 'fk_operation_group' in every case
            datatype_group_gid = dao.get_datatype_by_id(datatype.fk_datatype_group).gid
            datatype_group = h5.load_entity_by_gid(datatype_group_gid)

        operations_set = [datatype.fk_from_operation]
        correct = True

        if is_datatype_group:
            operations_set = [datatype_group.fk_from_operation]
            self.logger.debug("Removing datatype group %s" % datatype_group)

            datatypes = self.get_all_datatypes_from_data(datatype_group)
            first_datatype = datatypes[0]

            if hasattr(first_datatype, 'fk_source_gid'):
                ts = h5.load_entity_by_gid(first_datatype.fk_source_gid)
                ts_group = dao.get_datatypegroup_by_op_group_id(ts.parent_operation.fk_operation_group)
                dm_group = datatype_group
            else:
                dt_measure_index = get_class_by_name("{}.{}".format(DATATYPE_MEASURE_INDEX_MODULE,
                                                                    DATATYPE_MEASURE_INDEX_CLASS))
                dm_group = dao.get_datatype_measure_group_from_ts_from_pse(first_datatype.gid, dt_measure_index)
                ts_group = datatype_group

            links = []

            if ts_group:
                links.extend(dao.get_links_for_datatype(ts_group.id))
                correct = correct and self._remove_operation_group(ts_group.fk_operation_group, project_id,
                                                                   skip_validation, operations_set, links)

            if dm_group:
                links.extend(dao.get_links_for_datatype(dm_group.id))
                correct = correct and self._remove_operation_group(dm_group.fk_operation_group, project_id,
                                                                   skip_validation, operations_set, links)

            if len(links) > 0:
                # We want to get the links for the first TSIndex directly
                # This code works for all cases
                datatypes = dao.get_datatype_in_group(ts_group.id)
                ts = datatypes[0]

                new_dt_links = self._add_links_for_datatype_references(ts, links[0].fk_to_project, links[0].id,
                                                                       existing_dt_links)

        else:
            self.logger.debug("Removing datatype %s" % datatype)
            links = dao.get_links_for_datatype(datatype.id)

            if len(links) > 0:
                new_dt_links = self._add_links_for_datatype_references(datatype, links[0].fk_to_project, links[0].id,
                                                                       existing_dt_links)

            self._remove_project_node_files(project_id, datatype.gid, links, skip_validation)

        # Remove Operation entity in case no other DataType needs them.
        project = dao.get_project_by_id(project_id)
        for operation_id in operations_set:
            dependent_dt = dao.get_generic_entity(DataType, operation_id, "fk_from_operation")
            if len(dependent_dt) > 0:
                # Do not remove Operation in case DataType still exist referring it.
                continue
            correct = correct and dao.remove_entity(Operation, operation_id)
            # Make sure Operation folder is removed
            self.storage_interface.remove_operation_data(project.name, operation_id)

        self.storage_interface.push_folder_to_sync(project.name)
        if not correct:
            raise RemoveDataTypeException("Could not remove DataType " + str(datatype_gid))
        return new_dt_links