def _create_time_series(self, volume, title): # Now create TimeSeries and fill it with data from NIFTI image time_series = TimeSeriesVolume() time_series.title = title time_series.labels_ordering = ["Time", "X", "Y", "Z"] time_series.start_time = 0.0 time_series.volume = volume if len(self.parser.zooms) > 3: time_series.sample_period = float(self.parser.zooms[3]) else: # If no time dim, set sampling to 1 sec time_series.sample_period = 1 if self.parser.units is not None and len(self.parser.units) > 1: time_series.sample_period_unit = self.parser.units[1] ts_h5_path = h5.path_for(self.storage_path, TimeSeriesVolumeH5, time_series.gid) nifti_data = self.parser.parse() with TimeSeriesVolumeH5(ts_h5_path) as ts_h5: ts_h5.store(time_series, scalars_only=True, store_references=True) for i in range(self.parser.time_dim_size): ts_h5.write_data_slice([nifti_data[:, :, :, i, ...]]) data_shape = ts_h5.read_data_shape() ts_idx = TimeSeriesVolumeIndex() ts_idx.fill_from_has_traits(time_series) ts_idx.data_ndim = len(data_shape) ts_idx.data_length_1d, ts_idx.data_length_2d, ts_idx.data_length_3d, ts_idx.data_length_4d = prepare_array_shape_meta( data_shape) return ts_idx
def launch(self, data_file, apply_corrections=False, connectivity=None): """ Execute import operations: """ try: parser = NIFTIParser(data_file) # Create volume DT volume = Volume(storage_path=self.storage_path) volume.set_operation_id(self.operation_id) volume.origin = [[0.0, 0.0, 0.0]] volume.voxel_size = [parser.zooms[0], parser.zooms[1], parser.zooms[2]] if parser.units is not None and len(parser.units) > 0: volume.voxel_unit = parser.units[0] if parser.has_time_dimension or not connectivity: # Now create TimeSeries and fill it with data from NIFTI image time_series = TimeSeriesVolume(storage_path=self.storage_path) time_series.set_operation_id(self.operation_id) time_series.volume = volume time_series.title = "NIFTI Import - " + os.path.split(data_file)[1] time_series.labels_ordering = ["Time", "X", "Y", "Z"] time_series.start_time = 0.0 if len(parser.zooms) > 3: time_series.sample_period = float(parser.zooms[3]) else: # If no time dim, set sampling to 1 sec time_series.sample_period = 1 if parser.units is not None and len(parser.units) > 1: time_series.sample_period_unit = parser.units[1] parser.parse(time_series, True) return [volume, time_series] else: region2volume_mapping = RegionVolumeMapping(storage_path=self.storage_path) region2volume_mapping.set_operation_id(self.operation_id) region2volume_mapping.volume = volume region2volume_mapping.connectivity = connectivity region2volume_mapping.title = "NIFTI Import - " + os.path.split(data_file)[1] region2volume_mapping.dimensions_labels = ["X", "Y", "Z"] region2volume_mapping.apply_corrections = apply_corrections parser.parse(region2volume_mapping, False) return [volume, region2volume_mapping] except ParseException, excep: logger = get_logger(__name__) logger.exception(excep) raise LaunchException(excep)
def _create_time_series(self, volume): # Now create TimeSeries and fill it with data from NIFTI image time_series = TimeSeriesVolume(storage_path=self.storage_path) time_series.set_operation_id(self.operation_id) time_series.volume = volume time_series.title = "NIFTI Import - " + os.path.split(self.data_file)[1] time_series.labels_ordering = ["Time", "X", "Y", "Z"] time_series.start_time = 0.0 if len(self.parser.zooms) > 3: time_series.sample_period = float(self.parser.zooms[3]) else: # If no time dim, set sampling to 1 sec time_series.sample_period = 1 if self.parser.units is not None and len(self.parser.units) > 1: time_series.sample_period_unit = self.parser.units[1] self.parser.parse(time_series, True) return time_series
def _create_time_series(self, volume): # Now create TimeSeries and fill it with data from NIFTI image time_series = TimeSeriesVolume(storage_path=self.storage_path) time_series.volume = volume time_series.title = "NIFTI Import - " + os.path.split(self.data_file)[1] time_series.labels_ordering = ["Time", "X", "Y", "Z"] time_series.start_time = 0.0 if len(self.parser.zooms) > 3: time_series.sample_period = float(self.parser.zooms[3]) else: # If no time dim, set sampling to 1 sec time_series.sample_period = 1 if self.parser.units is not None and len(self.parser.units) > 1: time_series.sample_period_unit = self.parser.units[1] self.parser.parse(time_series, True) return time_series
class NIFTIParser(): """ This class reads content of a NIFTI file and builds / returns a TimeSeries instance filled with details. """ def __init__(self, storage_path, operation_id): self.logger = get_logger(__name__) self.storage_path = storage_path self.operation_id = operation_id def parse(self, data_file): """ Parse NIFTI file and returns TimeSeries for it. """ if data_file is None: raise ParseException( "Please select NIFTI file which contains data to import") if not os.path.exists(data_file): raise ParseException("Provided file %s does not exists" % data_file) try: nifti_image = nib.load(data_file) except nib.spatialimages.ImageFileError, e: self.logger.exception(e) msg = "File: %s does not have a valid NIFTI-1 format." % data_file raise ParseException(msg) nifti_image_hdr = nifti_image.get_header() # Create volume for time series volume = Volume(storage_path=self.storage_path) volume.set_operation_id(self.operation_id) volume.origin = [[0.0, 0.0, 0.0]] # Now create TimeSeries and fill it with data from NIFTI image time_series = TimeSeriesVolume(storage_path=self.storage_path) time_series.set_operation_id(self.operation_id) time_series.volume = volume time_series.title = "NIFTI Import - " + os.path.split(data_file)[1] time_series.labels_ordering = ["Time", "X", "Y", "Z"] time_series.start_time = 0.0 # Copy data from NIFTI file to our TVB storage # In NIFTI format time si the 4th dimension, while our TimeSeries has # it as first dimension, so we have to adapt imported data # Check if there is a time dimensions (4th dimension). nifti_data_shape = nifti_image_hdr.get_data_shape() has_time_dimension = len(nifti_data_shape) > 3 time_dim_size = nifti_data_shape[3] if has_time_dimension else 1 nifti_data = nifti_image.get_data() if has_time_dimension: for i in range(time_dim_size): time_series.write_data_slice([nifti_data[:, :, :, i, ...]]) else: time_series.write_data_slice([nifti_data]) time_series.close_file() # Force closing HDF5 file # Extract sample unit measure units = nifti_image_hdr.get_xyzt_units() if units is not None and len(units) == 2: volume.voxel_unit = units[0] time_series.sample_period_unit = units[1] # Extract sample rate # Usually zooms defines values for x, y, z, time and other dimensions zooms = nifti_image_hdr.get_zooms() if has_time_dimension: time_series.sample_period = float(zooms[3]) else: time_series.sample_period = 1.0 # If no time dim, set sampling to 1 sec # Get voxtel dimensions for x,y, z volume.voxel_size = [zooms[0], zooms[1], zooms[2]] return time_series
def __init__(self, input=numpy.array([[], []]), **kwargs): if isinstance(input, (Timeseries, TimeSeries)): if isinstance(input, Timeseries): self._tvb = deepcopy(input._tvb) self.ts_type = str(input.ts_type) elif isinstance(input, TimeSeries): self._tvb = deepcopy(input) if isinstance(input, TimeSeriesRegion): self.ts_type = "Region" if isinstance(input, TimeSeriesSEEG): self.ts_type = "SEEG" elif isinstance(input, TimeSeriesEEG): self.ts_type = "EEG" elif isinstance(input, TimeSeriesMEG): self.ts_type = "MEG" elif isinstance(input, TimeSeriesEEG): self.ts_type = "EEG" elif isinstance(input, TimeSeriesVolume): self.ts_type = "Volume" elif isinstance(input, TimeSeriesSurface): self.ts_type = "Surface" else: self.ts_type = "" warning( "Input TimeSeries %s is not one of the known TVB TimeSeries classes!" % str(input)) for attr, value in kwargs.items(): try: setattr(self, attr, value) except: setattr(self._tvb, attr, value) elif isinstance(input, numpy.ndarray): input = prepare_4D(input, self.logger) time = kwargs.pop("time", None) if time is not None: start_time = float( kwargs.pop("start_time", kwargs.pop("start_time", time[0]))) sample_period = float( kwargs.pop( "sample_period", kwargs.pop("sample_period", numpy.mean(numpy.diff(time))))) kwargs.update({ "start_time": start_time, "sample_period": sample_period }) # Initialize self.ts_type = kwargs.pop("ts_type", "Region") labels_ordering = kwargs.get("labels_ordering", None) # Get input sensors if any input_sensors = None if isinstance(kwargs.get("sensors", None), (TVBSensors, Sensors)): if isinstance(kwargs["sensors"], Sensors): input_sensors = kwargs["sensors"]._tvb self.ts_type = "%s sensor" % input_sensors.sensors_type kwargs.update({"sensors": input_sensors}) else: input_sensors = kwargs["sensors"] # Create Timeseries if isinstance(input_sensors, TVBSensors) or \ self.ts_type in ["SEEG sensor", "Internal sensor", "EEG sensor", "MEG sensor"]: # ...for Sensor Timeseries if labels_ordering is None: labels_ordering = LABELS_ORDERING labels_ordering[2] = "%s sensor" % self.ts_type kwargs.update({"labels_ordering": labels_ordering}) if isinstance(input_sensors, TVBSensorsInternal) or isequal_string(self.ts_type, "Internal sensor")\ or isequal_string(self.ts_type, "SEEG sensor"): self._tvb = TimeSeriesSEEG(data=input, **kwargs) self.ts_type = "SEEG sensor" elif isinstance(input_sensors, TVBSensorsEEG) or isequal_string( self.ts_type, "EEG sensor"): self._tvb = TimeSeriesEEG(data=input, **kwargs) self.ts_type = "EEG sensor" elif isinstance(input_sensors, TVBSensorsMEG) or isequal_string( self.ts_type, "MEG sensor"): self._tvb = TimeSeriesMEG(data=input, **kwargs) self.ts_type = "MEG sensor" else: raise_value_error( "Not recognizing sensors of type %s:\n%s" % (self.ts_type, str(input_sensors))) else: input_surface = kwargs.pop("surface", None) if isinstance( input_surface, (Surface, TVBSurface)) or self.ts_type == "Surface": self.ts_type = "Surface" if isinstance(input_surface, Surface): kwargs.update({"surface": input_surface._tvb}) else: kwargs.update({"surface": input_surface}) if labels_ordering is None: labels_ordering = LABELS_ORDERING labels_ordering[2] = "Vertex" kwargs.update({"labels_ordering": labels_ordering}) self._tvb = TimeSeriesSurface(data=input, **kwargs) elif isequal_string(self.ts_type, "Region"): if labels_ordering is None: labels_ordering = LABELS_ORDERING labels_ordering[2] = "Region" kwargs.update({"labels_ordering": labels_ordering}) self._tvb = TimeSeriesRegion(data=input, **kwargs) # , **kwargs elif isequal_string(self.ts_type, "Volume"): if labels_ordering is None: labels_ordering = ["Time", "X", "Y", "Z"] kwargs.update({"labels_ordering": labels_ordering}) self._tvb = TimeSeriesVolume(data=input, **kwargs) else: self._tvb = TimeSeries(data=input, **kwargs) if not numpy.all([ dim_label in self._tvb.labels_dimensions.keys() for dim_label in self._tvb.labels_ordering ]): warning( "Lack of correspondance between timeseries labels_ordering %s\n" "and labels_dimensions!: %s" % (self._tvb.labels_ordering, self._tvb.labels_dimensions.keys())) self._tvb.configure() self.configure_time() self.configure_sample_rate() if len(self.title) == 0: self._tvb.title = "%s Time Series" % self.ts_type