def launch(self, view_model):
        # type: (CrossCorrelateAdapterModel) -> [CrossCorrelationIndex]
        """ 
        Launch algorithm and build results.
        Compute the node-pairwise cross-correlation of the source 4D TimeSeries represented by the index given as input.

        Return a CrossCorrelationIndex. Create a CrossCorrelationH5 that contains the cross-correlation
        sequences for all possible combinations of the nodes.

        See: http://www.scipy.org/doc/api_docs/SciPy.signal.signaltools.html#correlate

        :param view_model: the ViewModel keeping the algorithm inputs
        :return: the cross correlation index for the given time series
        :rtype: `CrossCorrelationIndex`
        """
        # --------- Prepare CrossCorrelationIndex and CrossCorrelationH5 objects for result ------------##
        cross_corr_index = CrossCorrelationIndex()
        cross_corr_h5_path = h5.path_for(self.storage_path, CrossCorrelationH5, cross_corr_index.gid)
        cross_corr_h5 = CrossCorrelationH5(cross_corr_h5_path)

        node_slice = [slice(self.input_shape[0]), None, slice(self.input_shape[2]), slice(self.input_shape[3])]
        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()

        with h5.h5_file_for_index(self.input_time_series_index) as ts_h5:
            small_ts.sample_period = ts_h5.sample_period.load()
            small_ts.sample_period_unit = ts_h5.sample_period_unit.load()
            partial_cross_corr = None
            for var in range(self.input_shape[1]):
                node_slice[1] = slice(var, var + 1)
                small_ts.data = ts_h5.read_data_slice(tuple(node_slice))
                partial_cross_corr = self._compute_cross_correlation(small_ts, ts_h5)
                cross_corr_h5.write_data_slice(partial_cross_corr)

        partial_cross_corr.source.gid = view_model.time_series
        partial_cross_corr.gid = uuid.UUID(cross_corr_index.gid)

        cross_corr_index.fill_from_has_traits(partial_cross_corr)
        self.fill_index_from_h5(cross_corr_index, cross_corr_h5)

        cross_corr_h5.store(partial_cross_corr, scalars_only=True)
        cross_corr_h5.close()

        return cross_corr_index
Beispiel #2
0
    def launch(self, view_model):
        # type: (NodeCoherenceModel) -> [CoherenceSpectrumIndex]
        """
        Launch algorithm and build results. 
        """
        # --------- Prepare a CoherenceSpectrum object for result ------------##
        coherence_spectrum_index = CoherenceSpectrumIndex()
        time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)

        dest_path = h5.path_for(self.storage_path, CoherenceSpectrumH5,
                                coherence_spectrum_index.gid)
        coherence_h5 = CoherenceSpectrumH5(dest_path)
        coherence_h5.gid.store(uuid.UUID(coherence_spectrum_index.gid))
        coherence_h5.source.store(time_series_h5.gid.load())
        coherence_h5.nfft.store(self.algorithm.nfft)

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
        input_shape = time_series_h5.data.shape
        node_slice = [
            slice(input_shape[0]), None,
            slice(input_shape[2]),
            slice(input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()
        small_ts.sample_period = time_series_h5.sample_period.load()
        partial_coh = None
        for var in range(input_shape[1]):
            node_slice[1] = slice(var, var + 1)
            small_ts.data = time_series_h5.read_data_slice(tuple(node_slice))
            self.algorithm.time_series = small_ts
            partial_coh = self.algorithm.evaluate()
            coherence_h5.write_data_slice(partial_coh)
        coherence_h5.frequency.store(partial_coh.frequency)
        coherence_h5.close()
        coherence_spectrum_index.ndim = len(coherence_h5.array_data.shape)
        time_series_h5.close()

        coherence_spectrum_index.fk_source_gid = self.input_time_series_index.gid
        coherence_spectrum_index.nfft = partial_coh.nfft
        coherence_spectrum_index.frequencies = partial_coh.frequency

        return coherence_spectrum_index
Beispiel #3
0
    def test_node_coherence_adapter(self, tmpdir, time_series_index_factory):
        # algorithm returns complex values instead of float
        storage_folder = str(tmpdir)
        ts_index = time_series_index_factory()

        node_coherence_adapter = NodeCoherenceAdapter()
        node_coherence_adapter.storage_path = storage_folder
        view_model = node_coherence_adapter.get_view_model_class()()
        view_model.time_series = ts_index.gid
        node_coherence_adapter.configure(view_model)

        disk = node_coherence_adapter.get_required_disk_size(view_model)
        mem = node_coherence_adapter.get_required_memory_size(view_model)

        coherence_spectrum_idx = node_coherence_adapter.launch(view_model)

        result_h5 = h5.path_for(storage_folder, CoherenceSpectrumH5,
                                coherence_spectrum_idx.gid)
        assert os.path.exists(result_h5)
Beispiel #4
0
    def test_cross_correlation_adapter(self, tmpdir,
                                       time_series_index_factory):
        storage_folder = str(tmpdir)
        ts_index = time_series_index_factory()

        cross_correlation_adapter = CrossCorrelateAdapter()
        cross_correlation_adapter.storage_path = storage_folder
        view_model = cross_correlation_adapter.get_view_model_class()()
        view_model.time_series = ts_index.gid
        cross_correlation_adapter.configure(view_model)

        disk = cross_correlation_adapter.get_required_disk_size(view_model)
        mem = cross_correlation_adapter.get_required_memory_size(view_model)

        cross_correlation_idx = cross_correlation_adapter.launch(view_model)

        result_h5 = h5.path_for(storage_folder, CrossCorrelationH5,
                                cross_correlation_idx.gid)
        assert os.path.exists(result_h5)
Beispiel #5
0
    def launch(self, view_model):
        # type: (ICAAdapterModel) -> [IndependentComponentsIndex]
        """
        Launch algorithm and build results.
        :param view_model: the ViewModel keeping the algorithm inputs
        :return: the ica index for the specified time series
        """
        # --------------------- Prepare result entities ---------------------##
        ica_index = IndependentComponentsIndex()
        result_path = h5.path_for(self.storage_path, IndependentComponentsH5,
                                  ica_index.gid)
        ica_h5 = IndependentComponentsH5(path=result_path)

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
        time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)
        input_shape = time_series_h5.data.shape
        node_slice = [
            slice(input_shape[0]), None,
            slice(input_shape[2]),
            slice(input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()
        for var in range(input_shape[1]):
            node_slice[1] = slice(var, var + 1)
            small_ts.data = time_series_h5.read_data_slice(tuple(node_slice))
            partial_ica = compute_ica_decomposition(small_ts,
                                                    view_model.n_components)
            ica_h5.write_data_slice(partial_ica)

        time_series_h5.close()

        partial_ica.source.gid = view_model.time_series
        partial_ica.gid = uuid.UUID(ica_index.gid)

        ica_h5.store(partial_ica, scalars_only=True)
        ica_h5.close()

        ica_index.fill_from_has_traits(partial_ica)

        return ica_index
    def launch(self, view_model):
        # type: (ICAAdapterModel) -> [IndependentComponentsIndex]
        """ 
        Launch algorithm and build results. 
        """
        # --------- Prepare a IndependentComponents object for result ----------##
        ica_index = IndependentComponentsIndex()
        ica_index.fk_source_gid = view_model.time_series.hex

        time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)

        result_path = h5.path_for(self.storage_path, IndependentComponentsH5,
                                  ica_index.gid)
        ica_h5 = IndependentComponentsH5(path=result_path)
        ica_h5.gid.store(uuid.UUID(ica_index.gid))
        ica_h5.source.store(view_model.time_series)
        ica_h5.n_components.store(self.algorithm.n_components)

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
        input_shape = time_series_h5.data.shape
        node_slice = [
            slice(input_shape[0]), None,
            slice(input_shape[2]),
            slice(input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()
        for var in range(input_shape[1]):
            node_slice[1] = slice(var, var + 1)
            small_ts.data = time_series_h5.read_data_slice(tuple(node_slice))
            self.algorithm.time_series = small_ts
            partial_ica = self.algorithm.evaluate()
            ica_h5.write_data_slice(partial_ica)
        array_metadata = ica_h5.unmixing_matrix.get_cached_metadata()
        ica_index.array_has_complex = array_metadata.has_complex
        ica_index.shape = json.dumps(ica_h5.unmixing_matrix.shape)
        ica_index.ndim = len(ica_h5.unmixing_matrix.shape)
        ica_h5.close()
        time_series_h5.close()

        return ica_index
Beispiel #7
0
    def __copy_linked_datatype_before_delete(self, op, datatype, project, fk_to_project):
        new_op = Operation(op.view_model_gid,
                           dao.get_system_user().id,
                           fk_to_project,
                           datatype.parent_operation.fk_from_algo,
                           datatype.parent_operation.status,
                           datatype.parent_operation.start_date,
                           datatype.parent_operation.completion_date,
                           datatype.parent_operation.fk_operation_group,
                           datatype.parent_operation.additional_info,
                           datatype.parent_operation.user_group,
                           datatype.parent_operation.range_values)
        new_op.visible = datatype.parent_operation.visible
        new_op = dao.store_entity(new_op)
        to_project = self.find_project(fk_to_project)
        to_project_path = self.storage_interface.get_project_folder(to_project.name)

        full_path = h5.path_for_stored_index(datatype)
        old_folder = self.storage_interface.get_project_folder(project.name, str(op.id))
        file_paths = h5.gather_references_of_view_model(op.view_model_gid, old_folder, only_view_models=True)[0]
        file_paths.append(full_path)

        # The BurstConfiguration h5 file has to be moved only when we handle the time series which has the operation
        # folder containing the file
        if datatype.is_ts and datatype.fk_parent_burst is not None:
            bc_path = h5.path_for(datatype.parent_operation.id, BurstConfigurationH5, datatype.fk_parent_burst,
                                  project.name)
            if os.path.exists(bc_path):
                file_paths.append(bc_path)

                bc = dao.get_burst_for_operation_id(op.id)
                bc.fk_simulation = new_op.id
                dao.store_entity(bc)

        # Move all files to the new operation folder
        self.storage_interface.move_datatype_with_sync(to_project, to_project_path, new_op.id, file_paths)

        datatype.fk_from_operation = new_op.id
        datatype.parent_operation = new_op
        dao.store_entity(datatype)

        return new_op
    def launch(self, view_model):
        # type: (NodeCovarianceAdapterModel) -> [CovarianceIndex]
        """ 
        Launch algorithm and build results.

        :returns: the `CovarianceIndex` built with the given time_series index as source
        """
        # Create an index for the computed covariance.
        covariance_index = CovarianceIndex()
        covariance_h5_path = h5.path_for(self.storage_path, CovarianceH5,
                                         covariance_index.gid)
        covariance_h5 = CovarianceH5(covariance_h5_path)

        # NOTE: Assumes 4D, Simulator timeSeries.
        node_slice = [
            slice(self.input_shape[0]), None,
            slice(self.input_shape[2]), None
        ]

        with h5.h5_file_for_index(self.input_time_series_index) as ts_h5:
            for mode in range(self.input_shape[3]):
                for var in range(self.input_shape[1]):
                    small_ts = TimeSeries()
                    node_slice[1] = slice(var, var + 1)
                    node_slice[3] = slice(mode, mode + 1)
                    small_ts.data = ts_h5.read_data_slice(tuple(node_slice))
                    partial_cov = self._compute_node_covariance(
                        small_ts, ts_h5)
                    covariance_h5.write_data_slice(partial_cov.array_data)
            ts_array_metadata = covariance_h5.array_data.get_cached_metadata()

        covariance_index.source_gid = self.input_time_series_index.gid
        covariance_index.subtype = type(covariance_index).__name__
        covariance_index.array_data_min = ts_array_metadata.min
        covariance_index.array_data_max = ts_array_metadata.max
        covariance_index.array_data_mean = ts_array_metadata.mean
        covariance_index.ndim = len(covariance_h5.array_data.shape)

        covariance_h5.gid.store(uuid.UUID(covariance_index.gid))
        covariance_h5.source.store(view_model.time_series)
        covariance_h5.close()
        return covariance_index
Beispiel #9
0
    def launch(self, view_model):
        # type: (PCAAdapterModel) -> [PrincipalComponentsIndex]
        """ 
        Launch algorithm and build results.
        :param view_model: the ViewModel keeping the algorithm inputs
        :return: the `PrincipalComponentsIndex` object built with the given timeseries as source
        """
        # --------------------- Prepare result entities ----------------------##
        principal_components_index = PrincipalComponentsIndex()
        dest_path = h5.path_for(self.storage_path, PrincipalComponentsH5,
                                principal_components_index.gid)
        pca_h5 = PrincipalComponentsH5(path=dest_path)

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
        time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)
        input_shape = time_series_h5.data.shape
        node_slice = [
            slice(input_shape[0]), None,
            slice(input_shape[2]),
            slice(input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()
        for var in range(input_shape[1]):
            node_slice[1] = slice(var, var + 1)
            small_ts.data = time_series_h5.read_data_slice(tuple(node_slice))
            self.time_series = small_ts.gid
            partial_pca = compute_pca(small_ts)
            pca_h5.write_data_slice(partial_pca)

        time_series_h5.close()

        partial_pca.source.gid = view_model.time_series
        partial_pca.gid = uuid.UUID(principal_components_index.gid)
        principal_components_index.fill_from_has_traits(partial_pca)

        pca_h5.store(partial_pca, scalars_only=True)
        pca_h5.close()

        return principal_components_index
Beispiel #10
0
    def launch(self, data_file, dataset_name, connectivity):
        """
        Execute import operations:
        """
        try:
            data = self.read_matlab_data(data_file, dataset_name)
            measurement_count, node_count = data.shape

            if node_count != connectivity.number_of_regions:
                raise LaunchException(
                    'The measurements are for %s nodes but the selected connectivity'
                    ' contains %s nodes' %
                    (node_count, connectivity.number_of_regions))

            measures = []
            for i in range(measurement_count):
                cm_idx = ConnectivityMeasureIndex()
                cm_idx.type = ConnectivityMeasureIndex.__name__
                cm_idx.connectivity_gid = connectivity.gid

                cm_data = data[i, :]
                cm_idx.array_data_ndim = cm_data.ndim
                cm_idx.ndim = cm_data.ndim
                cm_idx.array_data_min, cm_idx.array_data_max, cm_idx.array_data_mean = from_ndarray(
                    cm_data)

                cm_h5_path = h5.path_for(self.storage_path,
                                         ConnectivityMeasureH5, cm_idx.gid)
                with ConnectivityMeasureH5(cm_h5_path) as cm_h5:
                    cm_h5.array_data.store(data[i, :])
                    cm_h5.connectivity.store(uuid.UUID(connectivity.gid))
                    cm_h5.gid.store(uuid.UUID(cm_idx.gid))

                cm_idx.user_tag_2 = "nr.-%d" % (i + 1)
                cm_idx.user_tag_3 = "conn_%d" % node_count
                measures.append(cm_idx)
            return measures
        except ParseException as excep:
            logger = get_logger(__name__)
            logger.exception(excep)
            raise LaunchException(excep)
    def launch(self, view_model):
        # type: (BalloonModelAdapterModel) -> [TimeSeriesRegionIndex]
        """
        Launch algorithm and build results.

        :param time_series: the input time-series used as neural activation in the Balloon Model
        :returns: the simulated BOLD signal
        :rtype: `TimeSeries`
        """
        input_time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)
        time_line = input_time_series_h5.read_time_page(0, self.input_shape[0])

        bold_signal_index = TimeSeriesRegionIndex()
        bold_signal_h5_path = h5.path_for(self.storage_path, TimeSeriesRegionH5, bold_signal_index.gid)
        bold_signal_h5 = TimeSeriesRegionH5(bold_signal_h5_path)
        bold_signal_h5.gid.store(uuid.UUID(bold_signal_index.gid))
        self._fill_result_h5(bold_signal_h5, input_time_series_h5)

        # ---------- Iterate over slices and compose final result ------------##

        node_slice = [slice(self.input_shape[0]), slice(self.input_shape[1]), None, slice(self.input_shape[3])]
        small_ts = TimeSeries()
        small_ts.sample_period = self.input_time_series_index.sample_period
        small_ts.sample_period_unit = self.input_time_series_index.sample_period_unit
        small_ts.time = time_line

        for node in range(self.input_shape[2]):
            node_slice[2] = slice(node, node + 1)
            small_ts.data = input_time_series_h5.read_data_slice(tuple(node_slice))
            self.algorithm.time_series = small_ts
            partial_bold = self.algorithm.evaluate()
            bold_signal_h5.write_data_slice_on_grow_dimension(partial_bold.data, grow_dimension=2)

        bold_signal_h5.write_time_slice(time_line)
        bold_signal_shape = bold_signal_h5.data.shape
        bold_signal_h5.nr_dimensions.store(len(bold_signal_shape))
        bold_signal_h5.close()
        input_time_series_h5.close()

        self._fill_result_index(bold_signal_index, bold_signal_shape)
        return bold_signal_index
Beispiel #12
0
    def test_export(self):
        op = TestFactory.create_operation(test_user=self.test_user, test_project=self.test_project)
        burst_config = BurstConfiguration(self.test_project.id)
        burst_config.fk_simulation = op.id
        burst_config.simulator_gid = self.session_stored_simulator.gid.hex
        burst_config = dao.store_entity(burst_config)

        storage_path = FilesHelper().get_project_folder(self.test_project, str(op.id))
        h5_path = h5.path_for(storage_path, SimulatorH5, self.session_stored_simulator.gid)
        with SimulatorH5(h5_path) as h5_file:
            h5_file.store(self.session_stored_simulator)

        burst = dao.get_bursts_for_project(self.test_project.id)
        self.sess_mock['burst_id'] = str(burst[0].id)

        with patch('cherrypy.session', self.sess_mock, create=True):
            common.add2session(common.KEY_BURST_CONFIG, self.session_stored_simulator)
            common.add2session(common.KEY_BURST_CONFIG, burst_config)
            result = self.simulator_controller.export(str(burst[0].id))

        assert path.exists(result.input.name), "Simulation was not exported!"
Beispiel #13
0
    def launch_operation(self, project_gid, algorithm_class, view_model, temp_folder):
        h5_file_path = h5.path_for(temp_folder, ViewModelH5, view_model.gid)

        h5_file = ViewModelH5(h5_file_path, view_model)
        h5_file.store(view_model)
        h5_file.close()

        model_file_obj = open(h5_file_path, 'rb')
        files = {RequestFileKey.LAUNCH_ANALYZERS_MODEL_FILE.value: (os.path.basename(h5_file_path), model_file_obj)}

        if issubclass(algorithm_class, ABCUploader):
            for key in algorithm_class().get_form_class().get_upload_information().keys():
                path = getattr(view_model, key)
                data_file_obj = open(path, 'rb')
                files[key] = (os.path.basename(path), data_file_obj)

        return self.secured_request().post(self.build_request_url(RestLink.LAUNCH_OPERATION.compute_url(True, {
            LinkPlaceholder.PROJECT_GID.value: project_gid,
            LinkPlaceholder.ALG_MODULE.value: algorithm_class.__module__,
            LinkPlaceholder.ALG_CLASSNAME.value: algorithm_class.__name__
        })), files=files)
    def launch(self, view_model):
        # type: (NodeCovarianceAdapterModel) -> [CovarianceIndex]
        """ 
        Launch algorithm and build results.
        :param view_model: the ViewModel keeping the algorithm inputs
        :return: the `CovarianceIndex` built with the given time_series index as source
        """
        # -------------------- Prepare result entities ---------------------##
        covariance_index = CovarianceIndex()
        covariance_h5_path = h5.path_for(self.storage_path, CovarianceH5,
                                         covariance_index.gid)
        covariance_h5 = CovarianceH5(covariance_h5_path)

        # ------------ NOTE: Assumes 4D, Simulator timeSeries -------------##
        node_slice = [
            slice(self.input_shape[0]), None,
            slice(self.input_shape[2]), None
        ]
        ts_h5 = h5.h5_file_for_index(self.input_time_series_index)

        for mode in range(self.input_shape[3]):
            for var in range(self.input_shape[1]):
                small_ts = TimeSeries()
                node_slice[1] = slice(var, var + 1)
                node_slice[3] = slice(mode, mode + 1)
                small_ts.data = ts_h5.read_data_slice(tuple(node_slice))
                partial_cov = self._compute_node_covariance(small_ts, ts_h5)
                covariance_h5.write_data_slice(partial_cov.array_data)

        ts_h5.close()

        partial_cov.source.gid = view_model.time_series
        partial_cov.gid = uuid.UUID(covariance_index.gid)

        covariance_index.fill_from_has_traits(partial_cov)
        self.fill_index_from_h5(covariance_index, covariance_h5)

        covariance_h5.store(partial_cov, scalars_only=True)
        covariance_h5.close()
        return covariance_index
Beispiel #15
0
    def launch(self, view_model):
        datatype = self._base_before_launch(view_model.data_file,
                                            view_model.region_volume)

        # note the streaming parsing, we do not load the dataset in memory at once
        tract_gen, hdr = trackvis.read(view_model.data_file, as_generator=True)

        vox2ras = _SpaceTransform(hdr)
        tract_start_indices = [0]
        tract_region = []

        with TractsH5(path_for(self.storage_path, TractsH5,
                               datatype.gid)) as tracts_h5:
            # we process tracts in bigger chunks to optimize disk write costs
            for tract_bundle in chunk_iter(tract_gen, self.READ_CHUNK):
                tract_bundle = [tr[0] for tr in tract_bundle]

                for tr in tract_bundle:
                    tract_start_indices.append(tract_start_indices[-1] +
                                               len(tr))
                    if view_model.region_volume is not None:
                        tract_region.append(self._get_tract_region(tr[0]))

                vertices = numpy.concatenate(tract_bundle)  # in voxel space
                datatype.vertices = vox2ras.transform(vertices)
                tracts_h5.write_vertices_slice(datatype.vertices)

            datatype.tract_start_idx = numpy.array(tract_start_indices)
            datatype.tract_region = numpy.array(tract_region,
                                                dtype=numpy.int16)

            tracts_h5.tract_region.store(datatype.tract_region)
            tracts_h5.tract_start_idx.store(datatype.tract_start_idx)
            tracts_h5.region_volume_map.store(view_model.region_volume)

        self.region_volume_h5.close()

        tracts_index = TractsIndex()
        tracts_index.fill_from_has_traits(datatype)
        return tracts_index
Beispiel #16
0
    def launch(self, view_model):
        # type: (PCAAdapterModel) -> [PrincipalComponentsIndex]
        """ 
        Launch algorithm and build results.

        :returns: the `PrincipalComponents` object built with the given timeseries as source
        """
        # --------- Prepare a PrincipalComponents object for result ----------##
        principal_components_index = PrincipalComponentsIndex()
        principal_components_index.fk_source_gid = view_model.time_series.hex

        time_series_h5 = h5.h5_file_for_index(self.input_time_series_index)

        dest_path = h5.path_for(self.storage_path, PrincipalComponentsH5,
                                principal_components_index.gid)
        pca_h5 = PrincipalComponentsH5(path=dest_path)
        pca_h5.source.store(time_series_h5.gid.load())
        pca_h5.gid.store(uuid.UUID(principal_components_index.gid))

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
        input_shape = time_series_h5.data.shape
        node_slice = [
            slice(input_shape[0]), None,
            slice(input_shape[2]),
            slice(input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------##
        small_ts = TimeSeries()
        for var in range(input_shape[1]):
            node_slice[1] = slice(var, var + 1)
            small_ts.data = time_series_h5.read_data_slice(tuple(node_slice))
            self.algorithm.time_series = small_ts
            partial_pca = self.algorithm.evaluate()
            pca_h5.write_data_slice(partial_pca)
        pca_h5.close()
        time_series_h5.close()

        return principal_components_index
Beispiel #17
0
    def create_region_ts(self, data_shape, connectivity):
        if connectivity.number_of_regions != data_shape[1]:
            raise LaunchException(
                "Data has %d channels but the connectivity has %d nodes" %
                (data_shape[1], connectivity.number_of_regions))
        ts_idx = TimeSeriesRegionIndex()
        ts_idx.fk_connectivity_gid = connectivity.gid

        region_map_indexes = dao.get_generic_entity(RegionMappingIndex,
                                                    connectivity.gid,
                                                    'fk_connectivity_gid')
        ts_idx.has_surface_mapping = False
        if len(region_map_indexes) > 0:
            ts_idx.fk_region_mapping_gid = region_map_indexes[0].gid
            ts_idx.has_surface_mapping = True

        ts_h5_path = h5.path_for(self.storage_path, TimeSeriesRegionH5,
                                 ts_idx.gid)
        ts_h5 = TimeSeriesRegionH5(ts_h5_path)
        ts_h5.connectivity.store(uuid.UUID(connectivity.gid))

        return TimeSeriesRegion(), ts_idx, ts_h5
Beispiel #18
0
    def test_server_launch_operation(self, mocker, time_series_index_factory):
        self._mock_user(mocker)
        algorithm_module = "tvb.adapters.analyzers.fourier_adapter"
        algorithm_class = "FourierAdapter"

        input_ts_index = time_series_index_factory()

        fft_model = FFTAdapterModel()
        fft_model.time_series = UUID(input_ts_index.gid)
        fft_model.window_function = list(SUPPORTED_WINDOWING_FUNCTIONS)[0]

        input_folder = self.files_helper.get_project_folder(self.test_project)
        view_model_h5_path = h5.path_for(input_folder, ViewModelH5,
                                         fft_model.gid)

        view_model_h5 = ViewModelH5(view_model_h5_path, fft_model)
        view_model_h5.store(fft_model)
        view_model_h5.close()

        # Mock flask.request.files to return a dictionary
        request_mock = mocker.patch.object(flask, 'request')
        fp = open(view_model_h5_path, 'rb')
        request_mock.files = {
            RequestFileKey.LAUNCH_ANALYZERS_MODEL_FILE.value:
            FileStorage(fp, os.path.basename(view_model_h5_path))
        }

        # Mock launch_operation() call and current_user
        mocker.patch.object(OperationService, 'launch_operation')

        operation_gid, status = self.launch_resource.post(
            project_gid=self.test_project.gid,
            algorithm_module=algorithm_module,
            algorithm_classname=algorithm_class)

        fp.close()

        assert type(operation_gid) is str
        assert len(operation_gid) > 0
Beispiel #19
0
    def test_fmri_balloon_adapter(self, tmpdir, time_series_region_index_factory,
                                  connectivity_factory, region_mapping_factory, surface_factory):
        # To be fixed once we have the migrated importers
        storage_folder = str(tmpdir)
        connectivity = connectivity_factory()
        surface = surface_factory()
        region_mapping = region_mapping_factory(surface=surface, connectivity=connectivity)
        ts_index = time_series_region_index_factory(connectivity=connectivity, region_mapping=region_mapping)

        fmri_balloon_adapter = BalloonModelAdapter()
        fmri_balloon_adapter.storage_path = storage_folder
        view_model = fmri_balloon_adapter.get_view_model_class()()
        view_model.time_series = ts_index.gid
        fmri_balloon_adapter.configure(view_model)

        disk = fmri_balloon_adapter.get_required_disk_size(view_model)
        mem = fmri_balloon_adapter.get_required_memory_size(view_model)

        ts_index = fmri_balloon_adapter.launch(view_model)

        result_h5 = h5.path_for(storage_folder, TimeSeriesRegionH5, ts_index.gid)
        assert os.path.exists(result_h5)
Beispiel #20
0
    def test_adapter_huge_memory_requirement(self, test_adapter_factory):
        """
        Test that an MemoryException is raised in case adapter cant launch due to lack of memory.
        """
        # Prepare adapter
        test_adapter_factory(adapter_class=TestAdapterHugeMemoryRequired)
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHugeMemoryRequired")

        # Simulate receiving POST data
        form = TestAdapterHugeMemoryRequiredForm()
        adapter.submit_form(form)

        view_model = form.get_view_model()()
        view_model.test = 5

        # Prepare operation for launch
        operation = model_operation.Operation(
            self.test_user.id,
            self.test_project.id,
            adapter.stored_adapter.id,
            json.dumps({'gid': view_model.gid.hex}),
            json.dumps({}),
            status=model_operation.STATUS_STARTED)
        operation = dao.store_entity(operation)

        # Store ViewModel in H5
        parent_folder = FilesHelper().get_project_folder(
            self.test_project, str(operation.id))
        view_model_path = os.path.join(
            parent_folder,
            h5.path_for(parent_folder, ViewModelH5, view_model.gid))
        with ViewModelH5(view_model_path, view_model) as view_model_h5:
            view_model_h5.store(view_model)

        # Launch operation
        with pytest.raises(NoMemoryAvailableException):
            OperationService().initiate_prelaunch(operation, adapter)
Beispiel #21
0
    def load_datatype_from_file(self,
                                storage_folder,
                                file_name,
                                op_id,
                                datatype_group=None,
                                move=True,
                                final_storage=None):
        """
        Creates an instance of datatype from storage / H5 file 
        :returns: DatatypeIndex
        """
        self.logger.debug("Loading DataType from file: %s" % file_name)
        datatype, generic_attributes = h5.load_with_references(
            os.path.join(storage_folder, file_name))
        index_class = h5.REGISTRY.get_index_for_datatype(datatype.__class__)
        datatype_index = index_class()
        datatype_index.fill_from_has_traits(datatype)
        datatype_index.fill_from_generic_attributes(generic_attributes)

        # Add all the required attributes
        if datatype_group is not None:
            datatype_index.fk_datatype_group = datatype_group.id
        datatype_index.fk_from_operation = op_id

        associated_file = h5.path_for_stored_index(datatype_index)
        if os.path.exists(associated_file):
            datatype_index.disk_size = FilesHelper.compute_size_on_disk(
                associated_file)

        # Now move storage file into correct folder if necessary
        if move and final_storage is not None:
            current_file = os.path.join(storage_folder, file_name)
            h5_type = h5.REGISTRY.get_h5file_for_datatype(datatype.__class__)
            final_path = h5.path_for(final_storage, h5_type, datatype.gid)
            if final_path != current_file and move:
                shutil.move(current_file, final_path)

        return datatype_index
Beispiel #22
0
    def test_fcd_adapter(self, tmpdir, time_series_region_index_factory,
                         connectivity_factory, region_mapping_factory, surface_factory):
        storage_folder = str(tmpdir)
        connectivity = connectivity_factory()
        surface = surface_factory()
        region_mapping = region_mapping_factory(surface=surface, connectivity=connectivity)
        ts_index = time_series_region_index_factory(connectivity=connectivity, region_mapping=region_mapping)

        fcd_adapter = FunctionalConnectivityDynamicsAdapter()
        fcd_adapter.storage_path = storage_folder
        view_model = fcd_adapter.get_view_model_class()()
        view_model.sw = 0.5
        view_model.sp = 0.2
        view_model.time_series = ts_index.gid
        fcd_adapter.configure(view_model)

        disk = fcd_adapter.get_required_disk_size(view_model)
        mem = fcd_adapter.get_required_memory_size(view_model)

        fcd_idx = fcd_adapter.launch(view_model)

        result_h5 = h5.path_for(storage_folder, FcdH5, fcd_idx[0].gid)
        assert os.path.exists(result_h5)
Beispiel #23
0
    def launch(self, view_model):
        # type: (TrackImporterModel) -> [TractsIndex]
        datatype = self._base_before_launch(view_model.data_file,
                                            view_model.region_volume)
        tracts_h5 = TractsH5(
            path_for(self.storage_path, TractsH5, datatype.gid))

        tract_start_indices = [0]
        tract_region = []

        for tractf in sorted(
                self.storage_interface.get_filenames_in_zip(
                    view_model.data_file)):  # one track per file
            if not tractf.endswith(
                    '.txt'):  # omit directories and other non track files
                continue
            vertices_file = self.storage_interface.open_tvb_zip(
                view_model.data_file, tractf)
            datatype.tract_vertices = numpy.loadtxt(vertices_file,
                                                    dtype=numpy.float32)

            tract_start_indices.append(tract_start_indices[-1] +
                                       len(datatype.tract_vertices))
            tracts_h5.write_vertices_slice(datatype.tract_vertices)

            if view_model.region_volume is not None:
                tract_region.append(
                    self._get_tract_region(datatype.tract_vertices[0]))
            vertices_file.close()

        tracts_h5.close()
        self.region_volume_h5.close()

        datatype.tract_start_idx = tract_start_indices
        datatype.tract_region = numpy.array(tract_region, dtype=numpy.int16)
        return datatype
Beispiel #24
0
    def launch(self, view_model):
        # type: (FFTAdapterModel) -> [FourierSpectrumIndex]
        """
        Launch algorithm and build results.

        :param time_series: the input time series to which the fft is to be applied
        :param segment_length: the block size which determines the frequency resolution \
                               of the resulting power spectra
        :param window_function: windowing functions can be applied before the FFT is performed
        :type  window_function: None; ‘hamming’; ‘bartlett’; ‘blackman’; ‘hanning’
        :returns: the fourier spectrum for the specified time series
        :rtype: `FourierSpectrumIndex`

        """
        fft_index = FourierSpectrumIndex()
        fft_index.fk_source_gid = view_model.time_series.hex

        block_size = int(math.floor(self.input_shape[2] / self.memory_factor))
        blocks = int(math.ceil(self.input_shape[2] / block_size))

        input_time_series_h5 = h5.h5_file_for_index(
            self.input_time_series_index)

        dest_path = h5.path_for(self.storage_path, FourierSpectrumH5,
                                fft_index.gid)
        spectra_file = FourierSpectrumH5(dest_path)
        spectra_file.gid.store(uuid.UUID(fft_index.gid))
        spectra_file.source.store(uuid.UUID(self.input_time_series_index.gid))

        # ------------- NOTE: Assumes 4D, Simulator timeSeries. --------------
        node_slice = [
            slice(self.input_shape[0]),
            slice(self.input_shape[1]), None,
            slice(self.input_shape[3])
        ]

        # ---------- Iterate over slices and compose final result ------------
        small_ts = TimeSeries()
        small_ts.sample_period = input_time_series_h5.sample_period.load()

        for block in range(blocks):
            node_slice[2] = slice(
                block * block_size,
                min([(block + 1) * block_size, self.input_shape[2]]), 1)
            small_ts.data = input_time_series_h5.read_data_slice(
                tuple(node_slice))
            self.algorithm.time_series = small_ts
            partial_result = self.algorithm.evaluate()

            if blocks <= 1 and len(partial_result.array_data) == 0:
                self.add_operation_additional_info(
                    "Fourier produced empty result (most probably due to a very short input TimeSeries)."
                )
                return None
            spectra_file.write_data_slice(partial_result)
        fft_index.ndim = len(spectra_file.array_data.shape)
        input_time_series_h5.close()

        fft_index.windowing_function = self.algorithm.window_function
        fft_index.segment_length = self.algorithm.segment_length
        fft_index.detrend = self.algorithm.detrend
        fft_index.frequency_step = partial_result.freq_step
        fft_index.max_frequency = partial_result.max_freq

        spectra_file.segment_length.store(self.algorithm.segment_length)
        spectra_file.windowing_function.store(
            str(self.algorithm.window_function))
        spectra_file.close()

        self.log.debug("partial segment_length is %s" %
                       (str(partial_result.segment_length)))
        return fft_index
    def launch(self, view_model):
        # type: (SimulatorAdapterModel) -> [TimeSeriesIndex, SimulationHistoryIndex]
        """
        Called from the GUI to launch a simulation.
          *: string class name of chosen model, etc...
          *_parameters: dictionary of parameters for chosen model, etc...
          connectivity: tvb.datatypes.connectivity.Connectivity object.
          surface: tvb.datatypes.surfaces.CorticalSurface: or None.
          stimulus: tvb.datatypes.patters.* object
        """
        result_h5 = dict()
        result_indexes = dict()
        start_time = self.algorithm.current_step * self.algorithm.integrator.dt

        self.algorithm.configure(full_configure=False)
        if self.branch_simulation_state_gid is not None:
            history_index = dao.get_datatype_by_gid(
                self.branch_simulation_state_gid.hex)
            history = h5.load_from_index(history_index)
            assert isinstance(history, SimulationHistory)
            history.fill_into(self.algorithm)

        region_map, region_volume_map = self._try_load_region_mapping()

        for monitor in self.algorithm.monitors:
            m_name = type(monitor).__name__
            ts = monitor.create_time_series(self.algorithm.connectivity,
                                            self.algorithm.surface, region_map,
                                            region_volume_map)
            self.log.debug("Monitor created the TS")
            ts.start_time = start_time

            ts_index_class = h5.REGISTRY.get_index_for_datatype(type(ts))
            ts_index = ts_index_class()
            ts_index.fill_from_has_traits(ts)
            ts_index.data_ndim = 4
            ts_index.state = 'INTERMEDIATE'

            state_variable_dimension_name = ts.labels_ordering[1]
            if m_name in self.HAVE_STATE_VARIABLES:
                selected_vois = [
                    self.algorithm.model.variables_of_interest[idx]
                    for idx in monitor.voi
                ]
                ts.labels_dimensions[
                    state_variable_dimension_name] = selected_vois
                ts_index.labels_dimensions = json.dumps(ts.labels_dimensions)

            ts_h5_class = h5.REGISTRY.get_h5file_for_datatype(type(ts))
            ts_h5_path = h5.path_for(self.storage_path, ts_h5_class, ts.gid)
            ts_h5 = ts_h5_class(ts_h5_path)
            ts_h5.store(ts, scalars_only=True, store_references=False)
            ts_h5.sample_rate.store(ts.sample_rate)
            ts_h5.nr_dimensions.store(ts_index.data_ndim)

            if self.algorithm.surface:
                ts_index.fk_surface_gid = self.algorithm.surface.region_mapping_data.surface.gid.hex
                ts_h5.surface.store(self.algorithm.surface.gid)
            else:
                ts_h5.store_references(ts)

            result_indexes[m_name] = ts_index
            result_h5[m_name] = ts_h5

        # Run simulation
        self.log.debug("Starting simulation...")
        for result in self.algorithm(
                simulation_length=self.algorithm.simulation_length):
            for j, monitor in enumerate(self.algorithm.monitors):
                if result[j] is not None:
                    m_name = type(monitor).__name__
                    ts_h5 = result_h5[m_name]
                    ts_h5.write_time_slice([result[j][0]])
                    ts_h5.write_data_slice([result[j][1]])

        self.log.debug(
            "Completed simulation, starting to store simulation state ")
        # Now store simulator history, at the simulation end
        results = []
        if not self._is_group_launch():
            simulation_history = SimulationHistory()
            simulation_history.populate_from(self.algorithm)
            history_index = h5.store_complete(simulation_history,
                                              self.storage_path)
            results.append(history_index)

        self.log.debug("Simulation state persisted, returning results ")
        for monitor in self.algorithm.monitors:
            m_name = type(monitor).__name__
            ts_shape = result_h5[m_name].read_data_shape()
            result_indexes[m_name].fill_shape(ts_shape)
            result_h5[m_name].close()
        self.log.debug("%s: Adapter simulation finished!!" % str(self))
        results.extend(result_indexes.values())
        return results
Demo script on how to use tvb-framework default read/write capabilities

.. moduleauthor:: Lia Domide <*****@*****.**>
"""

from tvb.core.neocom import h5
from tvb.basic.profile import TvbProfile
from tvb.datatypes.connectivity import Connectivity
from tvb.adapters.datatypes.h5.connectivity_h5 import ConnectivityH5

if __name__ == '__main__':
    TvbProfile.set_profile(TvbProfile.COMMAND_PROFILE)

    # Read from a ZIP
    conn_ht = Connectivity.from_file()
    conn_ht.configure()

    # Store in a given folder the HasTraits entity
    PATH = "."
    h5.store_complete(conn_ht, PATH)

    # Reproduce the just written file name containing GUID
    file_name = h5.path_for(PATH, ConnectivityH5, conn_ht.gid)

    # Load back from a file name a HasTraits instance
    conn_back = h5.load(file_name)

    # Check that the loaded and written entities are correct
    assert conn_ht.number_of_regions == 76
    assert conn_ht.number_of_regions == conn_back.number_of_regions
    def launch(self, simulator_gid):
        """
        Called from the GUI to launch a simulation.
          *: string class name of chosen model, etc...
          *_parameters: dictionary of parameters for chosen model, etc...
          connectivity: tvb.datatypes.connectivity.Connectivity object.
          surface: tvb.datatypes.surfaces.CorticalSurface: or None.
          stimulus: tvb.datatypes.patters.* object
        """
        result_h5 = dict()
        result_indexes = dict()
        start_time = self.algorithm.current_step * self.algorithm.integrator.dt

        self.algorithm.configure(full_configure=False)
        if self.branch_simulation_state_gid is not None:
            simulation_state_index = dao.get_datatype_by_gid(
                self.branch_simulation_state_gid.hex)
            self.branch_simulation_state_path = h5.path_for_stored_index(
                simulation_state_index)

            with SimulationStateH5(self.branch_simulation_state_path
                                   ) as branch_simulation_state_h5:
                branch_simulation_state_h5.load_into(self.algorithm)

        region_map, region_volume_map = self._try_load_region_mapping()

        for monitor in self.algorithm.monitors:
            m_name = monitor.__class__.__name__
            ts = monitor.create_time_series(self.algorithm.connectivity,
                                            self.algorithm.surface, region_map,
                                            region_volume_map)
            self.log.debug("Monitor created the TS")
            ts.start_time = start_time

            ts_index_class = h5.REGISTRY.get_index_for_datatype(type(ts))
            ts_index = ts_index_class()
            ts_index.fill_from_has_traits(ts)
            ts_index.data_ndim = 4
            ts_index.state = 'INTERMEDIATE'

            # state_variable_dimension_name = ts.labels_ordering[1]
            # if ts_index.user_tag_1:
            #     ts_index.labels_dimensions[state_variable_dimension_name] = ts.user_tag_1.split(';')
            # elif m_name in self.HAVE_STATE_VARIABLES:
            #     selected_vois = [self.algorithm.model.variables_of_interest[idx] for idx in monitor.voi]
            #     ts.labels_dimensions[state_variable_dimension_name] = selected_vois

            ts_h5_class = h5.REGISTRY.get_h5file_for_datatype(type(ts))
            ts_h5_path = h5.path_for(self.storage_path, ts_h5_class, ts.gid)
            ts_h5 = ts_h5_class(ts_h5_path)
            ts_h5.store(ts, scalars_only=True, store_references=False)
            ts_h5.sample_rate.store(ts.sample_rate)
            ts_h5.nr_dimensions.store(ts_index.data_ndim)

            if self.algorithm.surface:
                ts_index.surface_gid = self.algorithm.surface.region_mapping_data.surface.gid.hex
                ts_h5.surface.store(self.algorithm.surface.gid)
            else:
                ts_index.connectivity_gid = self.algorithm.connectivity.gid.hex
                ts_h5.connectivity.store(self.algorithm.connectivity.gid)
                if region_map:
                    ts_index.region_mapping_gid = region_map.gid.hex
                    ts_h5.region_mapping.store(region_map.gid)
                if region_volume_map:
                    ts_index.region_mapping_volume_gid = region_volume_map.gid.hex
                    ts_h5.region_mapping_volume.store(region_volume_map.gid)

            result_indexes[m_name] = ts_index
            result_h5[m_name] = ts_h5

        # Run simulation
        self.log.debug("Starting simulation...")
        for result in self.algorithm(simulation_length=self.simulation_length):
            for j, monitor in enumerate(self.algorithm.monitors):
                if result[j] is not None:
                    m_name = monitor.__class__.__name__
                    ts_h5 = result_h5[m_name]
                    ts_h5.write_time_slice([result[j][0]])
                    ts_h5.write_data_slice([result[j][1]])

        self.log.debug(
            "Completed simulation, starting to store simulation state ")
        # Populate H5 file for simulator state. This step could also be done while running sim, in background.
        if not self._is_group_launch():
            simulation_state_index = SimulationStateIndex()
            simulation_state_path = h5.path_for(self.storage_path,
                                                SimulationStateH5,
                                                self.algorithm.gid)
            with SimulationStateH5(
                    simulation_state_path) as simulation_state_h5:
                simulation_state_h5.store(self.algorithm)
            self._capture_operation_results([simulation_state_index])

        self.log.debug("Simulation state persisted, returning results ")
        for monitor in self.algorithm.monitors:
            m_name = monitor.__class__.__name__
            ts_shape = result_h5[m_name].read_data_shape()
            result_indexes[m_name].fill_shape(ts_shape)
            result_h5[m_name].close()
        # self.log.info("%s: Adapter simulation finished!!" % str(self))
        return list(result_indexes.values())
Beispiel #28
0
 def _update_vm_generic_operation_tag(view_model, operation):
     project = dao.get_project_by_id(operation.fk_launched_in)
     h5_path = h5.path_for(operation.id, ViewModelH5, view_model.gid, project.name, type(view_model).__name__)
     with ViewModelH5(h5_path, view_model) as vm_h5:
         vm_h5.operation_tag.store(operation.user_group)
Beispiel #29
0
 def store_burst_configuration(self, burst_config, storage_path):
     bc_path = h5.path_for(storage_path, BurstConfigurationH5, burst_config.gid)
     with BurstConfigurationH5(bc_path) as bc_h5:
         bc_h5.store(burst_config)
Beispiel #30
0
 def path_for(self, h5_file_class, gid, dt_class=None):
     project = dao.get_project_by_id(self.current_project_id)
     return h5.path_for(self.operation_id, h5_file_class, gid, project.name, dt_class)