Esempio n. 1
0
    def __check_integrity(self, result):
        """
         Check that the returned parameters for LAUNCH operation
        are of the type specified in the adapter's interface.
        """
        entity_id = self.__module__ + '.' + self.__class__.__name__

        for result_entity in result:
            if type(result_entity) == list and len(result_entity) > 0:
                #### Determine the first element not None
                first_item = None
                for res in result_entity:
                    if res is not None:
                        first_item = res
                        break
                if first_item is None:
                    return
                    #### All list items are None
                #### Now check if the first item has a supported type
                if not self.__is_data_in_supported_types(first_item):
                    msg = "Unexpected DataType %s"
                    raise InvalidParameterException(msg % type(first_item))

                first_item_type = type(first_item)
                for res in result_entity:
                    if not isinstance(res, first_item_type):
                        msg = '%s-Heterogeneous types (%s).Expected %s list.'
                        raise InvalidParameterException(
                            msg % (entity_id, type(res), first_item_type))
            else:
                if not self.__is_data_in_supported_types(result_entity):
                    msg = "Unexpected DataType %s"
                    raise InvalidParameterException(msg % type(result_entity))
Esempio n. 2
0
 def __check_integrity(self, result):
     """
     Check that the returned parameters for LAUNCH operation
     are of the type specified in the adapter's interface.
     """
     for result_entity in result:
         if result_entity is None:
             continue
         if not self.__is_data_in_supported_types(result_entity):
             msg = "Unexpected output DataType %s"
             raise InvalidParameterException(msg % type(result_entity))
    def launch(self, view_model):
        # type: (SimulatorAdapterModel) -> [TimeSeriesIndex, SimulationHistoryIndex]
        """
        Called from the GUI to launch a simulation.
          *: string class name of chosen model, etc...
          *_parameters: dictionary of parameters for chosen model, etc...
          connectivity: tvb.datatypes.connectivity.Connectivity object.
          surface: tvb.datatypes.surfaces.CorticalSurface: or None.
          stimulus: tvb.datatypes.patters.* object
        """
        result_h5 = dict()
        result_indexes = dict()
        start_time = self.algorithm.current_step * self.algorithm.integrator.dt

        self.algorithm.configure(full_configure=False)
        if self.branch_simulation_state_gid is not None:
            history = self.load_traited_by_gid(self.branch_simulation_state_gid)
            assert isinstance(history, SimulationHistory)
            history.fill_into(self.algorithm)

        region_map, region_volume_map = self._try_load_region_mapping()
        for monitor in self.algorithm.monitors:

            if monitor.period > view_model.simulation_length:
                raise InvalidParameterException("Sampling period for monitors can not be bigger "
                                                "than the simulation length!")

            m_name = type(monitor).__name__
            ts = monitor.create_time_series(self.algorithm.connectivity, self.algorithm.surface, region_map,
                                            region_volume_map)
            self.log.debug("Monitor created the TS")
            ts.start_time = start_time

            ts_index_class = h5.REGISTRY.get_index_for_datatype(type(ts))
            ts_index = ts_index_class()
            ts_index.fill_from_has_traits(ts)
            ts_index.data_ndim = 4
            ts_index.state = 'INTERMEDIATE'

            state_variable_dimension_name = ts.labels_ordering[1]
            if m_name in self.HAVE_STATE_VARIABLES:
                selected_vois = [self.algorithm.model.variables_of_interest[idx] for idx in monitor.voi]
                ts.labels_dimensions[state_variable_dimension_name] = selected_vois
                ts_index.labels_dimensions = json.dumps(ts.labels_dimensions)

            ts_h5_class = h5.REGISTRY.get_h5file_for_datatype(type(ts))
            ts_h5_path = h5.path_for(self._get_output_path(), ts_h5_class, ts.gid)
            self.log.info("Generating Timeseries at: {}".format(ts_h5_path))
            ts_h5 = ts_h5_class(ts_h5_path)
            ts_h5.store(ts, scalars_only=True, store_references=False)
            ts_h5.sample_rate.store(ts.sample_rate)
            ts_h5.nr_dimensions.store(ts_index.data_ndim)
            # Storing GA also here redundant, except for HPC
            ts_h5.store_generic_attributes(self.generic_attributes)
            ts_h5.store_references(ts)

            result_indexes[m_name] = ts_index
            result_h5[m_name] = ts_h5

        # Run simulation
        self.log.debug("Starting simulation...")
        for result in self.algorithm(simulation_length=self.algorithm.simulation_length):
            for j, monitor in enumerate(self.algorithm.monitors):
                if result[j] is not None:
                    m_name = type(monitor).__name__
                    ts_h5 = result_h5[m_name]
                    ts_h5.write_time_slice([result[j][0]])
                    ts_h5.write_data_slice([result[j][1]])

        self.log.debug("Completed simulation, starting to store simulation state ")
        # Now store simulator history, at the simulation end
        results = []
        if not self._is_group_launch():
            simulation_history = SimulationHistory()
            simulation_history.populate_from(self.algorithm)
            self.generic_attributes.visible = False
            history_index = h5.store_complete(simulation_history, self._get_output_path(), self.generic_attributes)
            self.generic_attributes.visible = True
            history_index.fixed_generic_attributes = True
            results.append(history_index)

        self.log.debug("Simulation state persisted, returning results ")
        for monitor in self.algorithm.monitors:
            m_name = type(monitor).__name__
            ts_shape = result_h5[m_name].read_data_shape()
            result_indexes[m_name].fill_shape(ts_shape)
            result_h5[m_name].close()
        self.log.debug("%s: Adapter simulation finished!!" % str(self))
        results.extend(result_indexes.values())
        return results
Esempio n. 4
0
    def convert_ui_inputs(self,
                          flat_input_interface,
                          kwargs,
                          metadata_out,
                          validation_required=True):
        """
        Convert HTTP POST parameters into Python parameters.
        """
        kwa = {}
        simple_select_list, to_skip_dict_subargs = [], []
        for row in flat_input_interface:
            row_attr = row[KEY_NAME]
            row_type = row[KEY_TYPE]
            ## If required attribute was submitted empty no point to continue, so just raise exception
            if validation_required and row.get(KEY_REQUIRED) and kwargs.get(
                    row_attr) == "":
                msg = "Parameter %s [%s] is required for %s but no value was submitted! Please relaunch with valid parameters."
                raise InvalidParameterException(
                    msg %
                    (row[KEY_LABEL], row[KEY_NAME], self.__class__.__name__))

            try:
                if row_type == TYPE_DICT:
                    kwa[row_attr], taken_keys = self._get_dictionary(
                        row, **kwargs)
                    for key in taken_keys:
                        if key in kwa:
                            del kwa[key]
                        to_skip_dict_subargs.append(key)
                    continue
                ## Dictionary subargs that were previously processed should be ignored
                if row_attr in to_skip_dict_subargs:
                    continue

                if row_attr not in kwargs:
                    ## DataType sub-attributes are not submitted with GID in their name...
                    kwa_name = self._find_field_submitted_name(
                        kwargs, row_attr, True)
                    if kwa_name is None:
                        ## Do not populate attributes not submitted
                        continue
                    kwargs[row_attr] = kwargs[kwa_name]
                    ## del kwargs[kwa_name] don't remove the original param, as it is useful for retrieving op.input DTs
                elif self._is_parent_not_submitted(row, kwargs):
                    ## Also do not populate sub-attributes from options not selected
                    del kwargs[row_attr]
                    continue

                if row_type == TYPE_ARRAY:
                    kwa[row_attr] = self._convert_to_array(
                        kwargs[row_attr], row)
                    self._validate_range_for_array_input(kwa[row_attr], row)
                elif row_type == TYPE_LIST:
                    if not isinstance(kwargs[row_attr], list):
                        kwa[row_attr] = json.loads(kwargs[row_attr])
                elif row_type == TYPE_BOOL:
                    kwa[row_attr] = bool(kwargs[row_attr])
                elif row_type == TYPE_INT:
                    if kwargs[row_attr] in [None, '', 'None']:
                        kwa[row_attr] = None
                    else:
                        kwa[row_attr] = int(kwargs[row_attr])
                        self._validate_range_for_value_input(
                            kwa[row_attr], row)
                elif row_type == TYPE_FLOAT:
                    if kwargs[row_attr] in ['', 'None']:
                        kwa[row_attr] = None
                    else:
                        kwa[row_attr] = float(kwargs[row_attr])
                        self._validate_range_for_value_input(
                            kwa[row_attr], row)
                elif row_type == TYPE_STR:
                    kwa[row_attr] = kwargs[row_attr]
                elif row_type in [TYPE_SELECT, TYPE_MULTIPLE]:
                    val = kwargs[row_attr]
                    if row_type == TYPE_MULTIPLE and not isinstance(val, list):
                        val = [val]
                    kwa[row_attr] = val
                    if row_type == TYPE_SELECT:
                        simple_select_list.append(row_attr)
                elif row_type == TYPE_UPLOAD:
                    kwa[row_attr] = kwargs[row_attr]
                else:
                    ## DataType parameter to be processed:
                    simple_select_list.append(row_attr)
                    datatype_gid = kwargs[row_attr]
                    ## Load filtered and trimmed attribute (e.g. field is applied if specified):
                    kwa[row_attr] = self._load_entity(row, datatype_gid,
                                                      kwargs, metadata_out)
                    if KEY_FIELD in row:
                        # Add entity_GID to the parameters to recognize original input
                        kwa[row_attr + '_gid'] = datatype_gid

            except TVBException:
                raise
            except Exception:
                self.log.exception('convert_ui_inputs failed')
                raise InvalidParameterException(
                    "Invalid or missing value in field %s [%s]" %
                    (row[KEY_LABEL], row[KEY_NAME]))

        return collapse_params(kwa, simple_select_list)
Esempio n. 5
0
    def _load_entity(self, row, datatype_gid, kwargs, metadata_out):
        """
        Load specific DataType entities, as specified in DATA_TYPE table.
        Check if the GID is for the correct DataType sub-class, otherwise throw an exception.
        Updates metadata_out with the metadata of this entity
        """

        entity = load_entity_by_gid(datatype_gid)
        if entity is None:
            ## Validate required DT one more time, after actual retrieval from DB:
            if row.get(KEY_REQUIRED):
                raise InvalidParameterException(
                    "Empty DataType value for required parameter %s [%s]" %
                    (row[KEY_LABEL], row[KEY_NAME]))

            return None

        expected_dt_class = row[KEY_TYPE]
        if isinstance(expected_dt_class, basestring):
            expected_dt_class = get_class_by_name(expected_dt_class)
        if not isinstance(entity, expected_dt_class):
            raise InvalidParameterException(
                "Expected param %s [%s] of type %s but got type %s." %
                (row[KEY_LABEL], row[KEY_NAME], expected_dt_class.__name__,
                 entity.__class__.__name__))

        result = entity

        ## Step 2 of updating Meta-data from parent DataType.
        if entity.fk_parent_burst:
            ## Link just towards the last Burst identified.
            metadata_out[DataTypeMetaData.KEY_BURST] = entity.fk_parent_burst

        if entity.user_tag_1 and DataTypeMetaData.KEY_TAG_1 not in metadata_out:
            metadata_out[DataTypeMetaData.KEY_TAG_1] = entity.user_tag_1

        current_subject = metadata_out[DataTypeMetaData.KEY_SUBJECT]
        if current_subject == DataTypeMetaData.DEFAULT_SUBJECT:
            metadata_out[DataTypeMetaData.KEY_SUBJECT] = entity.subject
        else:
            if entity.subject != current_subject and entity.subject not in current_subject.split(
                    ','):
                metadata_out[
                    DataTypeMetaData.
                    KEY_SUBJECT] = current_subject + ',' + entity.subject
        ##  End Step 2 - Meta-data Updates

        ## Validate current entity to be compliant with specified ROW filters.
        dt_filter = row.get(KEY_CONDITION)
        if dt_filter is not None and entity is not None and not dt_filter.get_python_filter_equivalent(
                entity):
            ## If a filter is declared, check that the submitted DataType is in compliance to it.
            raise InvalidParameterException(
                "Field %s [%s] did not pass filters." %
                (row[KEY_LABEL], row[KEY_NAME]))

        # In case a specific field in entity is to be used, use it
        if KEY_FIELD in row:
            # note: this cannot be replaced by getattr(entity, row[KEY_FIELD])
            # at least BCT has 'fields' like scaled_weights()
            result = eval('entity.' + row[KEY_FIELD])
        if ATT_METHOD in row:
            # The 'shape' attribute of an arraywrapper is overridden by us
            # the following check is made only to improve performance
            # (to find data in the dictionary with O(1)) on else the data is found in O(n)
            prefix = row[KEY_NAME] + "_" + row[ATT_PARAMETERS]
            if hasattr(entity, 'shape'):
                param_dict = {}
                for i in range(1, len(entity.shape)):
                    param_key = prefix + "_" + str(i - 1)
                    if param_key in kwargs:
                        param_dict[param_key] = kwargs[param_key]
            else:
                param_dict = dict(
                    (k, v) for k, v in kwargs.items() if k.startswith(prefix))
            result = getattr(entity, row[ATT_METHOD])(param_dict)
        return result