Esempio n. 1
0
def prepare_4D(data, logger=initialize_logger(__name__)):
    if data.ndim < 2:
        logger.error("The data array is expected to be at least 2D!")
        raise ValueError
    if data.ndim < 4:
        if data.ndim == 2:
            data = numpy.expand_dims(data, 2)
        data = numpy.expand_dims(data, 3)
    return data
Esempio n. 2
0
class H5Reader(object):

    logger = initialize_logger(__name__)

    H5_TYPE_ATTRIBUTE = H5Writer().H5_TYPE_ATTRIBUTE
    H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE
    H5_TYPES_ATTRUBUTES = [H5_TYPE_ATTRIBUTE, H5_SUBTYPE_ATTRIBUTE]

    def _open_file(self, name, path=None, h5_file=None):
        if h5_file is None:
            if not os.path.isfile(path):
                raise ValueError("%s file %s does not exist" % (name, path))

            self.logger.info("Starting to read %s from: %s" % (name, path))
            h5_file = h5py.File(path, 'r', libver='latest')
        return h5_file

    def _close_file(self, h5_file, close_file=True):
        if close_file:
            h5_file.close()

    def _log_success(self, name, path=None):
        if path is not None:
            self.logger.info("Successfully read %s from: %s" % (name, path))

    def read_dictionary(self, path=None, h5_file=None, type=None, close_file=True):
        """
        :param path: Path towards a dictionary H5 file
        :return: dict
        """
        h5_file = self._open_file("Dictionary", path, h5_file)
        dictionary = H5GroupHandlers(self.H5_SUBTYPE_ATTRIBUTE).read_dictionary_from_group(h5_file, type)
        self._close_file(h5_file, close_file)
        self._log_success("Dictionary", path)
        return dictionary

    def read_list_of_dicts(self, path=None, h5_file=None, type=None, close_file=True):
        h5_file = self._open_file("List of dictionaries", path, h5_file)
        list_of_dicts = []
        id = 0
        h5_group_handlers = H5GroupHandlers(self.H5_SUBTYPE_ATTRIBUTE)
        while 1:
            try:
                dict_group = h5_file[str(id)]
            except:
                break
            list_of_dicts.append(h5_group_handlers.read_dictionary_from_group(dict_group, type))
            id += 1
        self._close_file(h5_file, close_file)
        self._log_success("List of dictionaries", path)
        return list_of_dicts
Esempio n. 3
0
def read_edf(path,
             sensors,
             rois_selection=None,
             label_strip_fun=None,
             time_units="ms",
             exclude_channels=[]):
    logger = initialize_logger(__name__)

    logger.info("Reading empirical dataset from edf file...")
    try:
        data, times, channel_names = read_edf_with_mne(path, exclude_channels)
    except:
        logger.warn(
            "Reading edf file with mne failed! Trying with pyEDFlib...")
        try:
            data, times, channel_names = read_edf_with_pyedflib(
                path, exclude_channels)
        except:
            logger.error("Failed to read edf file both with MNE and pyEDFlib!")

    if not callable(label_strip_fun):
        label_strip_fun = lambda label: label

    channel_names = [label_strip_fun(s) for s in channel_names]

    rois = []
    rois_inds = []
    rois_lbls = []
    if len(rois_selection) == 0:
        rois_selection = sensors.labels

    logger.info("Selecting target signals from dataset...")
    for sensor_ind, sensor_label in enumerate(sensors.labels):
        if sensor_label in rois_selection and sensor_label in channel_names:
            rois.append(channel_names.index(sensor_label))
            rois_inds.append(sensor_ind)
            rois_lbls.append(sensor_label)

    data = data[rois].T

    # Assuming that edf file time units is "sec"
    if ensure_string(time_units).find("ms") == 0:
        times = 1000 * times
    # sort_inds = np.argsort(rois_lbls)
    rois = np.array(rois)  # [sort_inds]
    rois_inds = np.array(rois_inds)  # [sort_inds]
    rois_lbls = np.array(rois_lbls)  # [sort_inds]
    # data = data[:, sort_inds]

    return data, times, rois, rois_inds, rois_lbls
Esempio n. 4
0
class HeadService(object):

    logger = initialize_logger(__name__)

    def _assert_indices_from_labels(self, labels_or_indices, labels):
        indices = []
        labels_list = list(labels)
        for lbl_or_ind in labels_or_indices:
            if isinstance(lbl_or_ind, string_types):
                indices.append(labels_list.index(lbl_or_ind))
            else:
                indices.append(lbl_or_ind)
        return indices

    def slice_connectivity(self, connectivity, labels_or_indices):
        labels_or_indices = np.array(self._assert_indices_from_labels(labels_or_indices, connectivity.region_labels))
        out_conn = deepcopy(connectivity)
        out_conn.weights = connectivity.weights[labels_or_indices][:, labels_or_indices]
        out_conn.tract_lengths = connectivity.tract_lengths[labels_or_indices][:, labels_or_indices]
        out_conn.centres = connectivity.centres[labels_or_indices]
        out_conn.areas = connectivity.areas[labels_or_indices]
        out_conn.orientations = connectivity.orientations[labels_or_indices]
        out_conn.cortical = connectivity.cortical[labels_or_indices]
        out_conn.hemispheres = connectivity.hemispheres[labels_or_indices]
        out_conn.region_labels = connectivity.region_labels[labels_or_indices]
        out_conn.configure()
        return out_conn

    def slice_sensors(self, sensors, labels_or_indices):
        labels_or_indices = np.array(self._assert_indices_from_labels(labels_or_indices, sensors.labels))
        out_sensors = deepcopy(sensors)
        out_sensors.labels = sensors.labels[labels_or_indices]
        out_sensors.locations = sensors.locations[labels_or_indices]
        if out_sensors.has_orientations:
            out_sensors.orientations = sensors.orientations[labels_or_indices]
        out_sensors.configure()
        return out_sensors

    def sensors_in_electrodes_disconnectivity(self, sensors, sensors_labels=[]):
        if len(sensors_labels) < 2:
            sensors_labels = sensors.labels
        n_sensors = len(sensors_labels)
        elec_labels, elec_inds = sensors.group_sensors_to_electrodes(sensors_labels)
        if len(elec_labels) >= 2:
            disconnectivity = np.ones((n_sensors, n_sensors))
            for ch in elec_inds:
                disconnectivity[np.meshgrid(ch, ch)] = 0.0
        return disconnectivity
Esempio n. 5
0
class HeadService(object):

    logger = initialize_logger(__name__)

    def sensors_in_electrodes_disconnectivity(self,
                                              sensors,
                                              sensors_labels=[]):
        if len(sensors_labels) < 2:
            sensors_labels = sensors.labels
        n_sensors = len(sensors_labels)
        elec_labels, elec_inds = sensors.group_sensors_to_electrodes(
            sensors_labels)
        if len(elec_labels) >= 2:
            disconnectivity = np.ones((n_sensors, n_sensors))
            for ch in elec_inds:
                disconnectivity[np.meshgrid(ch, ch)] = 0.0
        return disconnectivity
class TVBReader(object):
    logger = initialize_logger(__name__)

    def read_connectivity(self, path):
        if os.path.isfile(path):
            conn = Connectivity.from_file(path)
            conn.file_path = path
            conn.configure()
            return conn
        else:
            raise_value_error(
                ("\n No Connectivity file found at path %s!" % str(path)))

    def read_cortical_surface(self, path, surface_class):
        if os.path.isfile(path):
            surf = surface_class.from_file(path)
            surf.configure()
            return surf
        else:
            self.logger.warning("\nNo %s Surface file found at path %s!" %
                                (surface_class.surface_subtype, str(path)))
            return None

    def read_region_mapping(self, path):
        if os.path.isfile(path):
            return region_mapping.RegionMapping.from_file(path)
        else:
            self.logger.warning("\nNo Region Mapping file found at path %s!" %
                                str(path))
            return None

    def read_volume_mapping(self, path):
        if os.path.isfile(path):
            return region_mapping.RegionVolumeMapping.from_file(path)
        else:
            self.logger.warning("\nNo Volume Mapping file found at path %s!" %
                                str(path))
            return None

    def read_t1(self, path):
        if os.path.isfile(path):
            return structural.StructuralMRI.from_file(path)
        else:
            self.logger.warning("\nNo Structural MRI file found at path %s!" %
                                str(path))
            return None

    def read_multiple_sensors_and_projections(self,
                                              sensors_files,
                                              root_folder,
                                              s_type,
                                              atlas=""):
        sensors_set = OrderedDict()
        if isinstance(sensors_files, (list, tuple)):
            if isinstance(sensors_files, tuple):
                sensors_files = [sensors_files]
            for s_files in sensors_files:
                sensors, projection = \
                    self.read_sensors_and_projection(s_files, root_folder, s_type, atlas)
                sensors_set[sensors] = projection
        return sensors_set

    def read_sensors_and_projection(self,
                                    filename,
                                    root_folder,
                                    s_type,
                                    atlas=""):
        def get_sensors_name(sensors_file, name):
            if len(sensors_file) > 1:
                gain_file = sensors_file[1]
            else:
                gain_file = ""
            return name + gain_file.replace(".txt", "").replace(name, "")

        filename = ensure_list(filename)
        path = os.path.join(root_folder, filename[0])
        if os.path.isfile(path):
            sensors = \
                SensorTypesToClassesDict.get(s_type, Sensors).from_file(path)
            sensors.configure()
            if len(filename) > 1:
                projection = self.read_projection(
                    os.path.join(root_folder, atlas, filename[1]), s_type)
            else:
                projection = None
            sensors.name = get_sensors_name(filename, sensors._ui_name)
            sensors.configure()
            return sensors, projection
        else:
            self.logger.warning("\nNo Sensor file found at path %s!" %
                                str(path))
            return None

    def read_projection(self, path, projection_type):
        if os.path.isfile(path):
            return SensorTypesToProjectionDict.get(
                projection_type, ProjectionMatrix).from_file(path)
        else:
            self.logger.warning(
                "\nNo Projection Matrix file found at path %s!" % str(path))
            return None

    def read_head(self,
                  root_folder,
                  name='',
                  atlas="default",
                  connectivity_file="connectivity.zip",
                  cortical_surface_file="surface_cort.zip",
                  subcortical_surface_file="surface_subcort.zip",
                  cortical_region_mapping_file="region_mapping_cort.txt",
                  subcortical_region_mapping_file="region_mapping_subcort.txt",
                  eeg_sensors_files=[("eeg_brainstorm_65.txt",
                                      "gain_matrix_eeg_65_surface_16k.npy")],
                  meg_sensors_files=[("meg_brainstorm_276.txt",
                                      "gain_matrix_meg_276_surface_16k.npy")],
                  seeg_sensors_files=[
                      ("seeg_xyz.txt", "seeg_dipole_gain.txt"),
                      ("seeg_xyz.txt", "seeg_distance_gain.txt"),
                      ("seeg_xyz.txt", "seeg_regions_distance_gain.txt"),
                      ("seeg_588.txt", "gain_matrix_seeg_588_surface_16k.npy")
                  ],
                  vm_file="aparc+aseg.nii.gz",
                  t1_file="T1.nii.gz"):

        conn = self.read_connectivity(
            os.path.join(root_folder, atlas, connectivity_file))
        cort_srf = \
            self.read_cortical_surface(os.path.join(root_folder, cortical_surface_file), CorticalSurface)
        cort_rm = self.read_region_mapping(
            os.path.join(root_folder, atlas, cortical_region_mapping_file))
        if cort_rm is not None:
            cort_rm.connectivity = conn._tvb
            if cort_srf is not None:
                cort_rm.surface = cort_srf._tvb
        subcort_srf = \
            self.read_cortical_surface(os.path.join(root_folder, subcortical_surface_file), CorticalSurface)
        subcort_rm = self.read_region_mapping(
            os.path.join(root_folder, atlas, subcortical_region_mapping_file))
        if subcort_rm is not None:
            subcort_rm.connectivity = conn._tvb
            if subcort_srf is not None:
                subcort_rm.surface = subcort_srf._tvb
        vm = self.read_volume_mapping(os.path.join(root_folder, atlas,
                                                   vm_file))
        t1 = self.read_t1(os.path.join(root_folder, t1_file))
        sensors = OrderedDict()
        sensors[SensorTypes.TYPE_EEG.value] = \
            self.read_multiple_sensors_and_projections(eeg_sensors_files, root_folder,
                                                       SensorTypes.TYPE_EEG.value, atlas)
        sensors[SensorTypes.TYPE_MEG.value] = \
            self.read_multiple_sensors_and_projections(meg_sensors_files, root_folder,
                                                       SensorTypes.TYPE_MEG.value, atlas)
        sensors[SensorTypes.TYPE_SEEG.value] = \
            self.read_multiple_sensors_and_projections(seeg_sensors_files, root_folder,
                                                       SensorTypes.TYPE_SEEG.value, atlas)
        if len(name) == 0:
            name = atlas
        return Head(conn, sensors, cort_srf, subcort_srf, cort_rm, subcort_rm,
                    vm, t1, name)
Esempio n. 7
0
    def __init__(self, logger=initialize_logger(__name__)):

        self.logger = logger
Esempio n. 8
0
class TimeSeriesService(object):
    logger = initialize_logger(__name__)

    def __init__(self, logger=initialize_logger(__name__)):

        self.logger = logger

    def decimate(self, time_series, decim_ratio, **kwargs):
        if decim_ratio > 1:
            return time_series.duplicate(data=time_series.data[0:time_series.time_length:decim_ratio],
                                         sample_period=float(decim_ratio*time_series.sample_period), **kwargs)
        else:
            return time_series.duplicate()

    def decimate_by_filtering(self, time_series, decim_ratio, **kwargs):
        if decim_ratio > 1:
            decim_data, decim_time, decim_dt, decim_n_times = decimate_signals(time_series.squeezed,
                                                                               time_series.time, decim_ratio)
            return time_series.duplicate(data=decim_data, sample_period=float(decim_dt), **kwargs)
        else:
            return time_series.duplicate(**kwargs)

    def convolve(self, time_series, win_len=None, kernel=None, **kwargs):
        n_kernel_points = np.int(np.round(win_len))
        if kernel is None:
            kernel = np.ones((n_kernel_points, 1, 1, 1)) / n_kernel_points
        else:
            kernel = kernel * np.ones((n_kernel_points, 1, 1, 1))
        return time_series.duplicate(data=convolve(time_series.data, kernel, mode='same'), **kwargs)

    def hilbert_envelope(self, time_series, **kwargs):
        return time_series.duplicate(data=np.abs(hilbert(time_series.data, axis=0)), **kwargs)

    def spectrogram_envelope(self, time_series, lpf=None, hpf=None, nperseg=None, **kwargs):
        data, time = spectrogram_envelope(time_series.squeezed, time_series.sample_rate, lpf, hpf, nperseg)
        if len(time_series.sample_period_unit) > 0 and time_series.sample_period_unit[0] == "m":
            time *= 1000
        return time_series.duplicate(data=data, start_time=time_series.start_time + time[0],
                                     sample_period=np.diff(time).mean(), **kwargs)

    def abs_envelope(self, time_series, **kwargs):
        return time_series.duplicate(data=abs_envelope(time_series.data), **kwargs)

    def detrend(self, time_series, type='linear', **kwargs):
        return time_series.duplicate(data=detrend(time_series.data, axis=0, type=type), **kwargs)

    def normalize(self, time_series, normalization=None, axis=None, percent=None, **kwargs):
        return time_series.duplicate(data=normalize_signals(time_series.data, normalization, axis, percent), **kwargs)

    def filter(self, time_series, lowcut=None, highcut=None, mode='bandpass', order=3, **kwargs):
        return time_series.duplicate(data=filter_data(time_series.data, time_series.sample_rate,
                                                     lowcut, highcut, mode, order), **kwargs)

    def log(self, time_series, **kwargs):
        return time_series.duplicate(data=np.log(time_series.data), **kwargs)

    def exp(self, time_series, **kwargs):
        return time_series.duplicate(data=np.exp(time_series.data), **kwargs)

    def abs(self, time_series, **kwargs):
        return time_series.duplicate(data=np.abs(time_series.data), **kwargs)

    def power(self, time_series):
        return np.sum(self.square(self.normalize(time_series, "mean", axis=0)).squeezed, axis=0)

    def square(self, time_series, **kwargs):
        return time_series.duplicate(data=time_series.data ** 2, **kwargs)

    def correlation(self, time_series):
        return np.corrcoef(time_series.squeezed.T)

    def _compile_select_fun(self, **kwargs):
        select_fun = []
        for dim, lbl in enumerate(["times", "variables", "labels",  "samples"]):
            index = ensure_list(kwargs.pop(lbl, []))
            if len(index) > 0:
                select_fun.append(lambda ts: getattr(ts, "get_subset")(index, dim))
        return select_fun

    def select(self, time_series, select_fun=None, **kwargs):
        if select_fun is None:
            select_fun = self._compile_select_fun(**kwargs)
        for fun in select_fun:
            time_series = fun(time_series)
        return time_series, select_fun

    def concatenate(self, time_series_list, dim, **kwargs):
        time_series_list = ensure_list(time_series_list)
        n_ts = len(time_series_list)
        if n_ts > 0:
            out_time_series, select_fun = self.select(time_series_list[0], **kwargs)
            if n_ts > 1:
                for id, time_series in enumerate(time_series_list[1:]):
                    if np.float32(out_time_series.sample_period) != np.float32(time_series.sample_period):
                        raise_value_error("Timeseries concatenation failed!\n"
                                          "Timeseries %d have a different time step (%s) \n "
                                          "than the concatenated ones (%s)!" %
                                          (id, str(np.float32(time_series.sample_period)),
                                           str(np.float32(out_time_series.sample_period))))
                    else:
                        time_series = self.select(time_series, select_fun)[0]
                        try:
                            out_time_series.set_data(np.concatenate([out_time_series.data, time_series.data], axis=dim))
                            if len(out_time_series.labels_dimensions[out_time_series.labels_ordering[dim]]) > 0:
                                dim_label = out_time_series.labels_ordering[dim]
                                out_time_series.labels_dimensions[dim_label] = \
                                    np.array(ensure_list(out_time_series.labels_dimensions[dim_label]) +
                                             ensure_list(time_series.labels_dimensions[dim_label]))
                        except:
                            raise_value_error("Timeseries concatenation failed!\n"
                                              "Timeseries %d have a shape (%s) and the concatenated ones (%s)!" %
                                              (id, str(out_time_series.shape), str(time_series.shape)))
                return out_time_series
            else:
                return out_time_series
        else:
            raise_value_error("Cannot concatenate empty list of TimeSeries!")

    def concatenate_in_time(self, time_series_list, **kwargs):
        return self.concatenate(time_series_list, 0, **kwargs)

    def concatenate_variables(self, time_series_list, **kwargs):
        return self.concatenate(time_series_list, 1, **kwargs)

    def concatenate_in_space(self, time_series_list, **kwargs):
        return self.concatenate(time_series_list, 2, **kwargs)

    def concatenate_samples(self, time_series_list, **kwargs):
        return self.concatenate(time_series_list, 3, **kwargs)

    def select_by_metric(self, time_series, metric, metric_th=None, metric_percentile=None, nvals=None):
        selection = np.unique(select_greater_values_array_inds(metric, metric_th, metric_percentile, nvals))
        return time_series.get_subspace_by_index(selection), selection

    def select_by_power(self, time_series, power=np.array([]), power_th=None):
        if len(power) != time_series.number_of_labels:
            power = self.power(time_series)
        return self.select_by_metric(time_series, power, power_th)

    def select_by_hierarchical_group_metric_clustering(self, time_series, distance, disconnectivity=np.array([]),
                                                       metric=None, n_groups=10, members_per_group=1):
        selection = np.unique(select_by_hierarchical_group_metric_clustering(distance, disconnectivity, metric,
                                                                             n_groups, members_per_group))
        return time_series.get_subspace_by_index(selection), selection

    def select_by_correlation_power(self, time_series, correlation=np.array([]), disconnectivity=np.array([]),
                                    power=np.array([]), n_groups=10, members_per_group=1):
        if correlation.shape[0] != time_series.number_of_labels:
            correlation = self.correlation(time_series)
        if len(power) != time_series.number_of_labels:
            power = self.power(time_series)
        return self.select_by_hierarchical_group_metric_clustering(time_series, 1 - correlation,
                                                                   disconnectivity, power, n_groups, members_per_group)

    def select_by_projection_power(self, time_series, projection=np.array([]),
                                   disconnectivity=np.array([]), power=np.array([]),
                                   n_groups=10, members_per_group=1):
        if len(power) != time_series.number_of_labels:
            power = self.power(time_series)
        return self.select_by_hierarchical_group_metric_clustering(time_series, 1 - np.corrcoef(projection),
                                                                   disconnectivity, power, n_groups, members_per_group)

    def select_by_rois_proximity(self, time_series, proximity, proximity_th=None, percentile=None, n_signals=None):
        initial_selection = range(time_series.number_of_labels)
        selection = []
        for prox in proximity:
            selection += (
                np.array(initial_selection)[select_greater_values_array_inds(prox, proximity_th,
                                                                             percentile, n_signals)]).tolist()
        selection = np.unique(selection)
        return time_series.get_subspace_by_index(selection), selection

    def select_by_rois(self, time_series, rois, all_labels):
        for ir, roi in rois:
            if not (isinstance(roi, string_types)):
                rois[ir] = all_labels[roi]
        return time_series.get_subspace_by_labels(rois), rois

    def compute_seeg(self, source_time_series, sensors, projection=None, sum_mode="lin", **kwargs):
        if np.all(sum_mode == "exp"):
            seeg_fun = lambda source, projection_data: self.compute_seeg_exp(source.squeezed, projection_data)
        else:
            seeg_fun = lambda source, projection_data: self.compute_seeg_lin(source.squeezed, projection_data)
        labels_ordering = LABELS_ORDERING
        labels_ordering[1] = "SEEG"
        labels_ordering[2] = "SEEG Sensor"
        kwargs.update({"labels_ordering": labels_ordering,
                       "start_time": source_time_series.start_time,
                       "sample_period": source_time_series.sample_period,
                       "sample_period_unit": source_time_series.sample_period_unit})
        if isinstance(sensors, dict):
            seeg = OrderedDict()
            for sensor, projection in sensors.items():
                kwargs.update({"labels_dimensions": {labels_ordering[2]: sensor.labels,
                                                     labels_ordering[1]: [sensor.name]},
                               "sensors": sensor})
                seeg[sensor.name] = \
                    source_time_series.__class__(
                        np.expand_dims(seeg_fun(source_time_series, projection.projection_data), 1), **kwargs)
            return seeg
        else:
            kwargs.update({"labels_dimensions": {labels_ordering[2]: sensors.labels,
                                                 labels_ordering[1]: [sensors.name]},
                           "sensors": sensors})
            return TimeSeriesSEEG(
                np.expand_dims(seeg_fun(source_time_series, projection.projection_data), 1), **kwargs)

    def compute_seeg_lin(self, source_time_series, projection_data):
        return source_time_series.dot(projection_data.T)

    def compute_seeg_exp(self, source_time_series, projection_data):
        return np.log(np.exp(source_time_series).dot(projection_data.T))
Esempio n. 9
0
def parse_csv(fname, merge=True):
    if '*' in fname:
        import glob
        return parse_csv(glob.glob(fname), merge=merge)
    if isinstance(fname, (list, tuple)):
        csv = []
        for csv_dict in [parse_csv(_) for _ in fname]:
            if len(csv_dict) > 0:
                csv.append(csv_dict)
        if merge:
            csv = merge_csv_data(*csv)
        return csv

    try:
        lines = []
        with open(fname, 'r') as fd:
            for line in fd.readlines():
                if not line.startswith('#'):
                    lines.append(line.strip().split(','))
        names = [field.split('.') for field in lines[0]]
        data = []
        for id_line, line in enumerate(lines[1:]):
            append_data = True
            for iline in range(len(line)):
                try:
                    line[iline] = float(line[iline])
                except:
                    logger = initialize_logger(__name__)
                    logger.warn("Failed to convert string " + line[iline] +
                                " to float!" + "\nSkipping line " +
                                str(id_line) + ":  " + str(line) + "!")
                    append_data = False
                    break
            if append_data:
                data.append(line)
        data = np.array(data)

        namemap = {}
        maxdims = {}
        for i, name in enumerate(names):
            if name[0] not in namemap:
                namemap[name[0]] = []
            namemap[name[0]].append(i)
            if len(name) > 1:
                maxdims[name[0]] = name[1:]

        for name in maxdims.keys():
            dims = []
            for dim in maxdims[name]:
                dims.append(int(dim))
            maxdims[name] = tuple(reversed(dims))

        # data in linear order per Stan, e.g. mat is col maj
        # TODO array is row maj, how to distinguish matrix vs array[,]?
        data_ = {}
        for name, idx in namemap.items():
            new_shape = (-1, ) + maxdims.get(name, ())
            data_[name] = data[:, idx].reshape(new_shape)

        return data_
    except:
        warning("Failed to read %s!" % fname)
        return {}
Esempio n. 10
0
class Head(object):
    """
    One patient virtualization. Fully configured for defining hypothesis on it.
    """
    logger = initialize_logger(__name__)

    def __init__(self,
                 connectivity,
                 sensors=OrderedDict(),
                 cortical_surface=None,
                 subcortical_surface=None,
                 cortical_region_mapping=None,
                 subcortical_region_mapping=None,
                 region_volume_mapping=None,
                 t1=None,
                 name=''):
        self.name = name
        self.connectivity = connectivity
        self.cortical_surface = cortical_surface
        self.subcortical_surface = subcortical_surface
        self.cortical_region_mapping = cortical_region_mapping
        self.subcortical_region_mapping = subcortical_region_mapping
        self.region_volume_mapping = region_volume_mapping
        self.t1 = t1
        self.sensors = sensors

    @property
    def number_of_regions(self):
        return self.connectivity.number_of_regions

    @property
    def cortex(self):
        cortex = Cortex()
        cortex.region_mapping_data = self.cortical_region_mapping
        cortex = cortex.populate_cortex(self.cortical_surface._tvb, {})
        for s_type, sensors in self.sensors.items():
            if isinstance(sensors, OrderedDict) and len(sensors) > 0:
                projection = sensors.values()[0]
                if projection is not None:
                    setattr(cortex, s_type.lower(), projection.projection_data)
        cortex.configure()
        return cortex

    def configure(self):
        self.connectivity.configure()
        self.cortical_surface.configure()
        self.subcortical_surface.configure()
        for s_type, sensors_set in self.sensors.items():
            for sensor, projection in sensors_set.items():
                sensor.configure()

    def filter_regions(self, filter_arr):
        return self.connectivity.region_labels[filter_arr]

    def get_sensors(self,
                    s_type=SensorTypes.TYPE_EEG.value,
                    name_or_index=None):
        sensors_set = OrderedDict()
        if s_type not in SensorTypesNames:
            raise_value_error("Invalid input sensor type!: %s" % str(s_type))
        else:
            sensors_set = self.sensors.get(s_type, None)
        out_sensor = None
        out_projection = None
        if isinstance(sensors_set, OrderedDict):
            if isinstance(name_or_index, string_types):
                for sensor, projection in sensors_set.items():
                    if isequal_string(sensor.name, name_or_index):
                        out_sensor = sensor
                        out_projection = projection
            elif is_integer(name_or_index):
                out_sensor = sensors_set.keys()[name_or_index]
                out_projection = sensors_set.values()[name_or_index]
            else:
                return sensors_set
        return out_sensor, out_projection

    def set_sensors(self,
                    input_sensors,
                    s_type=SensorTypes.TYPE_EEG.value,
                    reset=False):
        if not isinstance(input_sensors, (Sensors, dict, list, tuple)):
            return raise_value_error("Invalid input sensors instance''!: %s" %
                                     str(input_sensors))
        if s_type not in SensorTypesNames:
            raise_value_error("Invalid input sensor type!: %s" % str(s_type))
        sensors_set = self.get_sensors(s_type)[0]
        if reset is True:
            sensors_set = OrderedDict()
        if isinstance(input_sensors, dict):
            input_projections = input_sensors.values()
            input_sensors = input_sensors.keys()
        else:
            if isinstance(input_sensors, Sensors):
                input_sensors = [input_sensors]
            else:
                input_sensors = list(input_sensors)
            input_projections = [None] * len(input_sensors)

        for sensor, projection in zip(input_sensors, input_projections):
            if not isinstance(sensor, Sensors):
                raise_value_error(
                    "Input sensors:\n%s"
                    "\nis not a valid Sensors object of type %s!" %
                    (str(sensor), s_type))
            if sensor.sensors_type != s_type:
                raise_value_error("Sensors %s \nare not of type %s!" %
                                  (str(sensor), s_type))
            if not isinstance(projection, ProjectionMatrix):
                warning("projection is not set for sensor with name:\n%s!" %
                        sensor.name)
                sensors_set.update({sensor: None})
            else:
                if projection.projection_type != SensorTypesToProjectionDict[
                        s_type]:
                    raise_value_error(
                        "Disaggreement between sensors'type %s and projection's type %s!"
                        % (sensor.sensors_type, projection.projection_type))
                good_sensor_shape = (sensor.number_of_sensors,
                                     self.number_of_regions)
                if projection.projection_data.shape != good_sensor_shape:
                    warning(
                        "projections' shape %s of sensor %s "
                        "is not equal to (number_of_sensors, number_of_regions)=%s!"
                        % (str(projection.projection_data.shape), sensor.name,
                           str(good_sensor_shape)))
                sensors_set.update({sensor: projection})

        self.sensors[s_type] = sensors_set
Esempio n. 11
0
class TimeSeries(TimeSeriesTVB):
    logger = initialize_logger(__name__)

    def __init__(self, data, **kwargs):
        super(TimeSeries, self).__init__(**kwargs)
        self.data = prepare_4d(data, self.logger)

    def duplicate(self, **kwargs):
        duplicate = deepcopy(self)
        for attr, value in kwargs.items():
            setattr(duplicate, attr, value)
        duplicate.configure()
        return duplicate

    def _get_index_of_state_variable(self, sv_label):
        try:
            sv_index = numpy.where(self.variables_labels == sv_label)[0][0]
        except KeyError:
            self.logger.error(
                "There are no state variables defined for this instance. Its shape is: %s",
                self.data.shape)
            raise
        except IndexError:
            self.logger.error(
                "Cannot access index of state variable label: %s. Existing state variables: %s"
                % (sv_label, self.variables_labels))
            raise
        return sv_index

    def get_state_variable(self, sv_label):
        sv_data = self.data[:,
                            self._get_index_of_state_variable(sv_label), :, :]
        subspace_labels_dimensions = deepcopy(self.labels_dimensions)
        subspace_labels_dimensions[self.labels_ordering[1]] = [sv_label]
        if sv_data.ndim == 3:
            sv_data = numpy.expand_dims(sv_data, 1)
        return self.duplicate(data=sv_data,
                              labels_dimensions=subspace_labels_dimensions)

    def _get_indices_for_labels(self, list_of_labels):
        list_of_indices_for_labels = []
        for label in list_of_labels:
            try:
                space_index = numpy.where(self.space_labels == label)[0][0]
            except ValueError:
                self.logger.error(
                    "Cannot access index of space label: %s. Existing space labels: %s"
                    % (label, self.space_labels))
                raise
            list_of_indices_for_labels.append(space_index)
        return list_of_indices_for_labels

    def get_subspace_by_index(self, list_of_index, **kwargs):
        self._check_space_indices(list_of_index)
        subspace_data = self.data[:, :, list_of_index, :]
        subspace_labels_dimensions = deepcopy(self.labels_dimensions)
        subspace_labels_dimensions[self.labels_ordering[
            2]] = self.space_labels[list_of_index].tolist()
        if subspace_data.ndim == 3:
            subspace_data = numpy.expand_dims(subspace_data, 2)
        return self.duplicate(data=subspace_data,
                              labels_dimensions=subspace_labels_dimensions,
                              **kwargs)

    def get_subspace_by_labels(self, list_of_labels):
        list_of_indices_for_labels = self._get_indices_for_labels(
            list_of_labels)
        return self.get_subspace_by_index(list_of_indices_for_labels)

    def __getattr__(self, attr_name):
        if self.labels_ordering[1] in self.labels_dimensions.keys():
            if attr_name in self.variables_labels:
                return self.get_state_variable(attr_name)
        if self.labels_ordering[2] in self.labels_dimensions.keys():
            if attr_name in self.space_labels:
                return self.get_subspace_by_labels([attr_name])
        raise AttributeError("%r object has no attribute %r" %
                             (self.__class__.__name__, attr_name))

    def _get_index_for_slice_label(self, slice_label, slice_idx):
        if slice_idx == 1:
            return self._get_indices_for_labels([slice_label])[0]
        if slice_idx == 2:
            return self._get_index_of_state_variable(slice_label)

    def _check_for_string_slice_indices(self, current_slice, slice_idx):
        slice_label1 = current_slice.start
        slice_label2 = current_slice.stop

        if isinstance(slice_label1, string_types):
            slice_label1 = self._get_index_for_slice_label(
                slice_label1, slice_idx)
        if isinstance(slice_label2, string_types):
            slice_label2 = self._get_index_for_slice_label(
                slice_label2, slice_idx)

        return slice(slice_label1, slice_label2, current_slice.step)

    def _get_string_slice_index(self, current_slice_string, slice_idx):
        return self._get_index_for_slice_label(current_slice_string, slice_idx)

    def __getitem__(self, slice_tuple):
        slice_list = []
        for idx, current_slice in enumerate(slice_tuple):
            if isinstance(current_slice, slice):
                slice_list.append(
                    self._check_for_string_slice_indices(current_slice, idx))
            else:
                if isinstance(current_slice, string_types):
                    slice_list.append(
                        self._get_string_slice_index(current_slice, idx))
                else:
                    slice_list.append(current_slice)

        return self.data[tuple(slice_list)]

    @property
    def shape(self):
        return self.data.shape

    @property
    def time_length(self):
        return self.data.shape[0]

    @property
    def number_of_labels(self):
        return self.data.shape[1]

    @property
    def number_of_variables(self):
        return self.data.shape[2]

    @property
    def number_of_samples(self):
        return self.data.shape[3]

    @property
    def end_time(self):
        return self.start_time + (self.time_length - 1) * self.sample_period

    @property
    def duration(self):
        return self.end_time - self.start_time

    @property
    def time_unit(self):
        return self.sample_period_unit

    @property
    def sample_rate(self):
        if len(self.sample_period_unit
               ) > 0 and self.sample_period_unit[0] == "m":
            return 1000.0 / self.sample_period
        return 1.0 / self.sample_period

    @property
    def space_labels(self):
        try:
            return numpy.array(self.get_space_labels())
        except:
            return numpy.array(
                self.labels_dimensions.get(self.labels_ordering[2], []))

    @property
    def variables_labels(self):
        return numpy.array(
            self.labels_dimensions.get(self.labels_ordering[1], []))

    @property
    def number_of_dimensions(self):
        return self.nr_dimensions

    @property
    def squeezed(self):
        return numpy.squeeze(self.data)

    def _check_space_indices(self, list_of_index):
        for index in list_of_index:
            if index < 0 or index > self.data.shape[1]:
                self.logger.error(
                    "Some of the given indices are out of space range: [0, %s]",
                    self.data.shape[1])
                raise IndexError

    def _get_time_unit_for_index(self, time_index):
        return self.start_time + time_index * self.sample_period

    def _get_index_for_time_unit(self, time_unit):
        return int((time_unit - self.start_time) / self.sample_period)

    def get_time_window(self, index_start, index_end, **kwargs):
        if index_start < 0 or index_end > self.data.shape[0]:
            self.logger.error(
                "The time indices are outside time series interval: [%s, %s]" %
                (0, self.data.shape[0]))
            raise IndexError
        subtime_data = self.data[index_start:index_end, :, :, :]
        if subtime_data.ndim == 3:
            subtime_data = numpy.expand_dims(subtime_data, 0)
        return self.duplicate(
            data=subtime_data,
            start_time=self._get_time_unit_for_index(index_start),
            **kwargs)

    def get_time_window_by_units(self, unit_start, unit_end, **kwargs):
        end_time = self.end_time
        if unit_start < self.start_time or unit_end > end_time:
            self.logger.error(
                "The time units are outside time series interval: [%s, %s]" %
                (self.start_time, end_time))
            raise ValueError
        index_start = self._get_index_for_time_unit(unit_start)
        index_end = self._get_index_for_time_unit(unit_end)
        return self.get_time_window(index_start, index_end)

    def decimate_time(self, new_sample_period, **kwargs):
        if new_sample_period % self.sample_period != 0:
            self.logger.error(
                "Cannot decimate time if new time step is not a multiple of the old time step"
            )
            raise ValueError

        index_step = int(new_sample_period / self.sample_period)
        time_data = self.data[::index_step, :, :, :]
        return self.duplicate(data=time_data,
                              sample_period=new_sample_period,
                              **kwargs)

    def get_sample_window(self, index_start, index_end, **kwargs):
        subsample_data = self.data[:, :, :, index_start:index_end]
        if subsample_data.ndim == 3:
            subsample_data = numpy.expand_dims(subsample_data, 3)
        return self.duplicate(data=subsample_data, **kwargs)

    def configure(self):
        super(TimeSeries, self).configure()
        self.time = numpy.arange(self.start_time,
                                 self.end_time + self.sample_period,
                                 self.sample_period)
        self.start_time = self.start_time or self.time[0]
        self.sample_period = self.sample_period or numpy.mean(
            numpy.diff(self.time))
Esempio n. 12
0
class H5WriterBase(object):
    logger = initialize_logger(__name__)

    H5_TYPE_ATTRIBUTE = "EPI_Type"
    H5_SUBTYPE_ATTRIBUTE = "EPI_Subtype"

    def _determine_datasets_and_attributes(self, object, datasets_size=None):
        datasets_dict = {}
        metadata_dict = {}
        groups_keys = []

        try:
            if isinstance(object, dict):
                dict_object = object
            else:
                dict_object = vars(object)
            for key, value in dict_object.items():
                if isinstance(value, numpy.ndarray):
                    # if value.size == 1:
                    #     metadata_dict.update({key: value})
                    # else:
                    datasets_dict.update({key: value})
                    # if datasets_size is not None and value.size == datasets_size:
                    #     datasets_dict.update({key: value})
                    # else:
                    #     if datasets_size is None and value.size > 0:
                    #         datasets_dict.update({key: value})
                    #     else:
                    #         metadata_dict.update({key: value})
                # TODO: check how this works! Be carefull not to include lists and tuples if possible in tvb classes!
                elif isinstance(value, (list, tuple)):
                    warning("Writing %s %s to h5 file as a numpy array dataset !" % (value.__class__, key), self.logger)
                    datasets_dict.update({key: numpy.array(value)})
                else:
                    if is_numeric(value) or isinstance(value, str):
                        metadata_dict.update({key: value})
                    elif not(callable(value)):
                        groups_keys.append(key)
        except:
            msg = "Failed to decompose group object: " + str(object) + "!"
            try:
                self.logger.info(str(object.__dict__))
            except:
                msg += "\n It has no __dict__ attribute!"
            warning(msg, self.logger)

        return datasets_dict, metadata_dict, groups_keys

    def _write_dicts_at_location(self, datasets_dict, metadata_dict, location):
        for key, value in datasets_dict.items():
            try:
                location.create_dataset(key, data=value)
            except:
                warning("Failed to write to %s dataset %s %s:\n%s !" %
                        (str(location), value.__class__, key, str(value)), self.logger)

        for key, value in metadata_dict.items():
            try:
                location.attrs.create(key, value)
            except:
                warning("Failed to write to %s attribute %s %s:\n%s !" %
                        (str(location), value.__class__, key, str(value)), self.logger)
        return location

    def _prepare_object_for_group(self, group, object, h5_type_attribute="", nr_regions=None,
                                  regress_subgroups=True):
        group.attrs.create(self.H5_TYPE_ATTRIBUTE, h5_type_attribute)
        group.attrs.create(self.H5_SUBTYPE_ATTRIBUTE, object.__class__.__name__)
        datasets_dict, metadata_dict, subgroups = self._determine_datasets_and_attributes(object, nr_regions)
        # If empty return None
        if len(datasets_dict) == len(metadata_dict) == len(subgroups) == 0:
            if isinstance(group, h5py._hl.files.File):
                if regress_subgroups:
                    return group
                else:
                    return group, subgroups
            else:
                return None
        else:
            if len(datasets_dict) > 0 or len(metadata_dict) > 0:
                if isinstance(group, h5py._hl.files.File):
                    group = self._write_dicts_at_location(datasets_dict, metadata_dict, group)
                else:
                    self._write_dicts_at_location(datasets_dict, metadata_dict, group)
            # Continue recursively going deeper in the object
            if regress_subgroups:
                for subgroup in subgroups:
                    if isinstance(object, dict):
                        child_object = object.get(subgroup, None)
                    else:
                        child_object = getattr(object, subgroup, None)
                    if child_object is not None:
                        group.create_group(subgroup)
                        temp = self._prepare_object_for_group(group[subgroup], child_object,
                                                              h5_type_attribute, nr_regions)
                        # If empty delete it
                        if temp is None or len(temp.keys()) == 0:
                            del group[subgroup]

                return group
            else:
                return group, subgroups

    def write_object_to_file(self, path, object, h5_type_attribute="", nr_regions=None):
        h5_file = h5py.File(change_filename_or_overwrite(path), 'a', libver='latest')
        h5_file = self._prepare_object_for_group(h5_file, object, h5_type_attribute, nr_regions)
        h5_file.close()
Esempio n. 13
0
class Timeseries(object):

    logger = initialize_logger(__name__)

    ts_type = ""

    _tvb = None

    # labels_dimensions = {"space": numpy.array([]), "variables": numpy.array([])}

    def __init__(self, input=numpy.array([[], []]), **kwargs):
        if isinstance(input, (Timeseries, TimeSeries)):

            if isinstance(input, Timeseries):
                self._tvb = deepcopy(input._tvb)
                self.ts_type = str(input.ts_type)

            elif isinstance(input, TimeSeries):
                self._tvb = deepcopy(input)
                if isinstance(input, TimeSeriesRegion):
                    self.ts_type = "Region"
                if isinstance(input, TimeSeriesSEEG):
                    self.ts_type = "SEEG"
                elif isinstance(input, TimeSeriesEEG):
                    self.ts_type = "EEG"
                elif isinstance(input, TimeSeriesMEG):
                    self.ts_type = "MEG"
                elif isinstance(input, TimeSeriesEEG):
                    self.ts_type = "EEG"
                elif isinstance(input, TimeSeriesVolume):
                    self.ts_type = "Volume"
                elif isinstance(input, TimeSeriesSurface):
                    self.ts_type = "Surface"
                else:
                    self.ts_type = ""
                    warning(
                        "Input TimeSeries %s is not one of the known TVB TimeSeries classes!"
                        % str(input))
            for attr, value in kwargs.items():
                try:
                    setattr(self, attr, value)
                except:
                    setattr(self._tvb, attr, value)

        elif isinstance(input, numpy.ndarray):
            input = prepare_4D(input, self.logger)
            time = kwargs.pop("time", None)
            if time is not None:
                start_time = float(
                    kwargs.pop("start_time", kwargs.pop("start_time",
                                                        time[0])))
                sample_period = float(
                    kwargs.pop(
                        "sample_period",
                        kwargs.pop("sample_period",
                                   numpy.mean(numpy.diff(time)))))
                kwargs.update({
                    "start_time": start_time,
                    "sample_period": sample_period
                })

            # Initialize
            self.ts_type = kwargs.pop("ts_type", "Region")
            labels_ordering = kwargs.get("labels_ordering", None)

            # Get input sensors if any
            input_sensors = None
            if isinstance(kwargs.get("sensors", None), (TVBSensors, Sensors)):
                if isinstance(kwargs["sensors"], Sensors):
                    input_sensors = kwargs["sensors"]._tvb
                    self.ts_type = "%s sensor" % input_sensors.sensors_type
                    kwargs.update({"sensors": input_sensors})
                else:
                    input_sensors = kwargs["sensors"]

            # Create Timeseries
            if isinstance(input_sensors, TVBSensors) or \
                    self.ts_type in ["SEEG sensor", "Internal sensor", "EEG sensor", "MEG sensor"]:
                # ...for Sensor Timeseries
                if labels_ordering is None:
                    labels_ordering = LABELS_ORDERING
                    labels_ordering[2] = "%s sensor" % self.ts_type
                    kwargs.update({"labels_ordering": labels_ordering})
                if isinstance(input_sensors, TVBSensorsInternal) or isequal_string(self.ts_type, "Internal sensor")\
                        or isequal_string(self.ts_type, "SEEG sensor"):
                    self._tvb = TimeSeriesSEEG(data=input, **kwargs)
                    self.ts_type = "SEEG sensor"
                elif isinstance(input_sensors,
                                TVBSensorsEEG) or isequal_string(
                                    self.ts_type, "EEG sensor"):
                    self._tvb = TimeSeriesEEG(data=input, **kwargs)
                    self.ts_type = "EEG sensor"
                elif isinstance(input_sensors,
                                TVBSensorsMEG) or isequal_string(
                                    self.ts_type, "MEG sensor"):
                    self._tvb = TimeSeriesMEG(data=input, **kwargs)
                    self.ts_type = "MEG sensor"
                else:
                    raise_value_error(
                        "Not recognizing sensors of type %s:\n%s" %
                        (self.ts_type, str(input_sensors)))
            else:
                input_surface = kwargs.pop("surface", None)
                if isinstance(
                        input_surface,
                    (Surface, TVBSurface)) or self.ts_type == "Surface":
                    self.ts_type = "Surface"
                    if isinstance(input_surface, Surface):
                        kwargs.update({"surface": input_surface._tvb})
                    else:
                        kwargs.update({"surface": input_surface})
                    if labels_ordering is None:
                        labels_ordering = LABELS_ORDERING
                        labels_ordering[2] = "Vertex"
                        kwargs.update({"labels_ordering": labels_ordering})
                    self._tvb = TimeSeriesSurface(data=input, **kwargs)
                elif isequal_string(self.ts_type, "Region"):
                    if labels_ordering is None:
                        labels_ordering = LABELS_ORDERING
                        labels_ordering[2] = "Region"
                        kwargs.update({"labels_ordering": labels_ordering})
                    self._tvb = TimeSeriesRegion(data=input,
                                                 **kwargs)  # , **kwargs
                elif isequal_string(self.ts_type, "Volume"):
                    if labels_ordering is None:
                        labels_ordering = ["Time", "X", "Y", "Z"]
                        kwargs.update({"labels_ordering": labels_ordering})
                    self._tvb = TimeSeriesVolume(data=input, **kwargs)
                else:
                    self._tvb = TimeSeries(data=input, **kwargs)

            if not numpy.all([
                    dim_label in self._tvb.labels_dimensions.keys()
                    for dim_label in self._tvb.labels_ordering
            ]):
                warning(
                    "Lack of correspondance between timeseries labels_ordering %s\n"
                    "and labels_dimensions!: %s" %
                    (self._tvb.labels_ordering,
                     self._tvb.labels_dimensions.keys()))

        self._tvb.configure()
        self.configure_time()
        self.configure_sample_rate()
        if len(self.title) == 0:
            self._tvb.title = "%s Time Series" % self.ts_type

    def duplicate(self, **kwargs):
        return self.__class__(self, **kwargs)

    def _get_indices_from_labels(self, labels, dim):
        dim_label = self.labels_ordering[dim]
        return labels_to_indices(self.labels_dimensions[dim_label], labels,
                                 dim_label, self.logger)

    def _get_indices_of_variables(self, sv_labels):
        return self._get_indices_from_labels(sv_labels, 0)

    def _get_indices_of_labels(self, list_of_labels):
        return self._get_indices_from_labels(list_of_labels, 2)

    def _get_indices_of_samples(self, list_of_labels):
        return self._get_indices_from_labels(list_of_labels, 3)

    def _get_time_for_index(self, time_index):
        return self._tvb.start_time + time_index * self._tvb.sample_period

    def _get_index_for_time(self, time_unit):
        return int(
            (time_unit - self._tvb.start_time) / self._tvb.sample_period)

    def _check_indices(self, list_of_index, dim):
        for index in list_of_index:
            if index < 0 or index > self._tvb.data.shape[dim]:
                self.logger.error(
                    "Some of the given indices are out of region range: [0, %s]",
                    self._tvb.data.shape[dim])
                raise IndexError

    def get_subset_by_index(self, list_of_indices, dim, **kwargs):
        assert dim in [0, 1, 2, 3]
        list_of_indices = ensure_list(list_of_indices)
        self._check_indices(list_of_indices, dim)
        slice_tuple = [slice(None), slice(None), slice(None), slice(None)]
        slice_tuple[dim] = list_of_indices
        data = self._tvb.data[tuple(slice_tuple)]
        dim_label = self.labels_ordering[dim]
        if len(self.labels_dimensions[dim_label]):
            labels_dimensions = deepcopy(self.labels_dimensions)
            labels_dimensions[dim_label] = numpy.array(
                labels_dimensions[dim_label])[list_of_indices]
        else:
            labels_dimensions = self.labels_dimensions
        if data.ndim == 3:
            data = numpy.expand_dims(data, 1)
        return self.duplicate(data=data,
                              labels_dimensions=labels_dimensions,
                              **kwargs)

    def get_subset_by_label(self, list_of_labels, dim, **kwargs):
        assert dim in [0, 1, 2, 3]
        list_of_labels = ensure_list(list_of_labels)
        dim_label = self.labels_ordering[dim]
        list_of_indices = labels_to_indices(self.labels_dimensions[dim_label],
                                            list_of_labels, dim_label,
                                            self.logger)
        return self.get_subset_by_index(list_of_indices, dim, **kwargs)

    def get_subset(self, list_of_indices_or_labels, dim, **kwargs):
        assert dim in [0, 1, 2, 3]
        list_of_indices_or_labels = ensure_list(list_of_indices_or_labels)
        if numpy.all([
                is_integer(ind_or_lbl)
                for ind_or_lbl in list_of_indices_or_labels
        ]):
            return self.get_subset_by_index(list_of_indices_or_labels, dim,
                                            **kwargs)
        else:
            if dim == 0:
                if not numpy.all([
                        is_numeric(ind_or_lbl)
                        for ind_or_lbl in list_of_indices_or_labels
                ]):
                    raise_value_error(
                        "Input consists neither of integer indices nor of points in time (floats)!: %s"
                        % list_of_indices_or_labels)
                time_indices = [
                    self._get_index_for_time(time)
                    for time in list_of_indices_or_labels
                ]
                return self.get_subset_by_index(time_indices, 0, **kwargs)
            else:
                if not numpy.all([
                        isinstance(ind_or_lbl, string_types)
                        for ind_or_lbl in list_of_indices_or_labels
                ]):
                    raise_value_error(
                        "Input consists neither of integer indices nor of label strings!: %s"
                        % list_of_indices_or_labels)
                return self.get_subset_by_label(list_of_indices_or_labels, dim,
                                                **kwargs)

    def get_times_by_index(self, list_of_times_indices, **kwargs):
        return self.get_subset_by_index(list_of_times_indices, 0, **kwargs)

    def get_times(self, list_of_times, **kwargs):
        return self.get_subset(list_of_times, 0, **kwargs)

    def get_variables_by_index(self, list_of_indices, **kwargs):
        return self.get_subset_by_index(list_of_indices, 1, **kwargs)

    def get_variables_by_label(self, list_of_labels, **kwargs):
        return self.get_subset_by_label(list_of_labels, 1, **kwargs)

    def get_variables(self, list_of_labels_or_inds, **kwargs):
        return self.get_subset(list_of_labels_or_inds, 1, **kwargs)

    def get_subspace_by_index(self, list_of_indices, **kwargs):
        return self.get_subset_by_index(list_of_indices, 2, **kwargs)

    def get_subspace_by_labels(self, list_of_labels):
        return self.get_subset_by_label(list_of_labels, 2, **kwargs)

    def get_subspace(self, list_of_labels_or_inds, **kwargs):
        return self.get_subset(list_of_labels_or_inds, 2, **kwargs)

    def get_samples_by_index(self, list_of_indices, **kwargs):
        return self.get_subset_by_index(list_of_indices, 2, **kwargs)

    def get_samples_by_labels(self, list_of_labels):
        return self.get_subset_by_label(list_of_labels, 2, **kwargs)

    def get_samples(self, list_of_labels_or_inds, **kwargs):
        return self.get_subset(list_of_labels_or_inds, 2, **kwargs)

    def __getattr__(self, attr_name):
        if self.labels_ordering[1] in self._tvb.labels_dimensions.keys():
            if attr_name in self.variables_labels:
                return self.get_variables_by_label(attr_name)
        if (self.labels_ordering[2] in self._tvb.labels_dimensions.keys()):
            if attr_name in self.space_labels:
                return self.get_subspace_by_labels(attr_name)
        if (self.labels_ordering[3] in self._tvb.labels_dimensions.keys()):
            if attr_name in self.samples_labels:
                return self.get_samples_by_labels(attr_name)
        try:
            return getattr(self._tvb, attr_name)
        except:
            # Hack to avoid stupid error messages when searching for __ attributes in numpy.array() call...
            # TODO: something better? Maybe not needed if we never do something like numpy.array(timeseries)
            if attr_name.find("__") < 0:
                self.logger.error(
                    "Attribute %s is not defined for this instance! You can use the following labels: "
                    "%s = %s and %s = %s" %
                    (attr_name, TimeseriesDimensions.VARIABLES.value,
                     self.variables_labels, TimeseriesDimensions.SPACE.value,
                     self.space_labels))
            raise AttributeError

    def _get_index_for_slice_label(self, slice_label, slice_idx):
        if slice_idx == 1:
            return self._get_indices_of_labels([slice_label])[0]
        if slice_idx == 2:
            return self._get_indices_of_variables(slice_label)

    def _check_for_string_slice_indices(self, current_slice, slice_idx):
        slice_label1 = current_slice.start
        slice_label2 = current_slice.stop

        if isinstance(slice_label1, string_types):
            slice_label1 = self._get_index_for_slice_label(
                slice_label1, slice_idx)
        if isinstance(slice_label2, string_types):
            slice_label2 = self._get_index_for_slice_label(
                slice_label2, slice_idx)

        return slice(slice_label1, slice_label2, current_slice.step)

    def _get_string_slice_index(self, current_slice_string, slice_idx):
        return self._get_index_for_slice_label(current_slice_string, slice_idx)

    def __getitem__(self, slice_tuple):
        slice_list = []
        for idx, current_slice in enumerate(slice_tuple):
            if isinstance(current_slice, slice):
                slice_list.append(
                    self._check_for_string_slice_indices(current_slice, idx))
            else:
                if isinstance(current_slice, string_types):
                    slice_list.append(
                        self._get_string_slice_index(current_slice, idx))
                else:
                    slice_list.append(current_slice)

        return self._tvb.data[tuple(slice_list)]

    @property
    def title(self):
        return self._tvb.title

    @property
    def data(self):
        return self._tvb.data

    @property
    def shape(self):
        return self._tvb.data.shape

    @property
    def time(self):
        return self._tvb.time

    @property
    def time_length(self):
        return self._tvb.length_1d

    @property
    def number_of_variables(self):
        return self._tvb.length_2d

    @property
    def number_of_labels(self):
        return self._tvb.length_3d

    @property
    def number_of_samples(self):
        return self._tvb.length_4d

    @property
    def start_time(self):
        return self._tvb.start_time

    @property
    def sample_period(self):
        return self._tvb.sample_period

    @property
    def end_time(self):
        return self.start_time + (self.time_length - 1) * self.sample_period

    @property
    def duration(self):
        return self.end_time - self.start_time

    def configure_time(self):
        self._tvb.time = numpy.arange(self.start_time,
                                      self.end_time + self.sample_period,
                                      self.sample_period)
        return self

    @property
    def time_unit(self):
        return self._tvb.sample_period_unit

    @property
    def sample_period_unit(self):
        return self._tvb.sample_period_unit

    @property
    def sample_rate(self):
        return self._tvb.sample_rate

    def configure_sampling_frequency(self):
        if len(self._tvb.sample_period_unit
               ) > 0 and self._tvb.sample_period_unit[0] == "m":
            self._tvb.sample_rate = 1000.0 / self._tvb.sample_period

        else:
            self._tvb.sample_rate = 1.0 / self._tvb.sample_period
        return self

    def configure_sample_rate(self):
        return self.configure_sampling_frequency()

    @property
    def labels_dimensions(self):
        return self._tvb.labels_dimensions

    @property
    def labels_ordering(self):
        return self._tvb.labels_ordering

    @property
    def space_labels(self):
        try:
            return numpy.array(self._tvb.get_space_labels())
        except:
            return numpy.array(
                self._tvb.labels_dimensions.get(self.labels_ordering[2], []))

    @property
    def variables_labels(self):
        return numpy.array(
            self._tvb.labels_dimensions.get(self.labels_ordering[1], []))

    @property
    def samples_labels(self):
        return numpy.array(
            self._tvb.labels_dimensions.get(self.labels_ordering[3], []))

    @property
    def nr_dimensions(self):
        return self._tvb.nr_dimensions

    @property
    def number_of_dimensions(self):
        return self._tvb.nr_dimensions

    @property
    def sensors(self):
        return self._tvb.sensors

    @property
    def connectivity(self):
        return self._tvb.connectivity

    @property
    def region_mapping_volume(self):
        return self._tvb.region_mapping_volume

    @property
    def region_mapping(self):
        return self._tvb.region_mapping

    @property
    def surface(self):
        return self._tvb.surface

    @property
    def volume(self):
        return self._tvb.volume

    @property
    def squeezed(self):
        return numpy.squeeze(self._tvb.data)

    def get_time_window(self, index_start, index_end, **kwargs):
        if index_start < 0 or index_end > self._tvb.data.shape[0]:
            self.logger.error(
                "The time indices are outside time series interval: [%s, %s]" %
                (0, self._tvb.data.shape[0]))
            raise IndexError
        subtime_data = self._tvb.data[index_start:index_end, :, :, :]
        if subtime_data.ndim == 3:
            subtime_data = numpy.expand_dims(subtime_data, 0)
        return self.duplicate(data=subtime_data,
                              start_time=self._get_time_for_index(index_start),
                              **kwargs)

    def get_time_window_by_units(self, unit_start, unit_end, **kwargs):
        end_time = self.end_time
        if unit_start < self._tvb.start_time or unit_end > end_time:
            self.logger.error(
                "The time units are outside time series interval: [%s, %s]" %
                (self._tvb.start_time, end_time))
            raise ValueError
        index_start = self._get_index_for_time(unit_start)
        index_end = self._get_index_for_time(unit_end)
        return self.get_time_window(index_start, index_end)

    def decimate_time(self, new_sample_period, **kwargs):
        if new_sample_period % self.sample_period != 0:
            self.logger.error(
                "Cannot decimate time if new time step is not a multiple of the old time step"
            )
            raise ValueError

        index_step = int(new_sample_period / self._tvb.sample_period)
        time_data = self._tvb.data[::index_step, :, :, :]
        return self.duplicate(data=time_data,
                              sample_period=new_sample_period,
                              **kwargs)

    def get_sample_window(self, index_start, index_end, **kwargs):
        subsample_data = self._tvb.data[:, :, :, index_start:index_end]
        if subsample_data.ndim == 3:
            subsample_data = numpy.expand_dims(subsample_data, 3)
        return self.duplicate(data=subsample_data, **kwargs)

    def get_source(self):
        if self.labels_ordering[1] not in self._tvb.labels_dimensions.keys():
            self.logger.error(
                "No state variables are defined for this instance!")
            raise ValueError
        if PossibleVariables.SOURCE.value in self.variables_labels:
            return self.get_variables_by_label(PossibleVariables.SOURCE.value)

    def get_bipolar(self, **kwargs):
        bipolar_labels, bipolar_inds = monopolar_to_bipolar(self.space_labels)
        data = self._tvb.data[:, :,
                              bipolar_inds[0]] - self._tvb.data[:, :,
                                                                bipolar_inds[1]]
        bipolar_labels_dimensions = deepcopy(self._tvb.labels_dimensions)
        bipolar_labels_dimensions[self.labels_ordering[2]] = list(
            bipolar_labels)
        return self.duplicate(data=data,
                              labels_dimensions=bipolar_labels_dimensions,
                              **kwargs)

    def set_data(self, data):
        self._tvb.data = data
        return self

    def configure(self):
        self._tvb.configure()
        self.configure_time()
        self.configure_sampling_frequency()
        self.configure_sample_rate()
        return self
Esempio n. 14
0
class H5Reader(object):
    logger = initialize_logger(__name__)

    connectivity_filename = "Connectivity.h5"
    cortical_surface_filename = "CorticalSurface.h5"
    subcortical_surface_filename = "SubcorticalSurface.h5"
    cortical_region_mapping_filename = "RegionMapping.h5"
    subcortical_region_mapping_filename = "RegionMappingSubcortical.h5"
    volume_mapping_filename = "VolumeMapping.h5"
    structural_mri_filename = "StructuralMRI.h5"
    sensors_filename_prefix = "Sensors"
    sensors_filename_separator = "_"

    def read_connectivity(self, path):
        """
        :param path: Path towards a custom Connectivity H5 file
        :return: Connectivity object
        """
        self.logger.info("Starting to read a Connectivity from: %s" % path)
        if os.path.isfile(path):
            h5_file = h5py.File(path, 'r', libver='latest')

            weights = h5_file['/' + ConnectivityH5Field.WEIGHTS][()]
            try:
                tract_lengths = h5_file['/' + ConnectivityH5Field.TRACTS][()]
            except:
                tract_lengths = numpy.array([])
            try:
                region_centres = h5_file['/' + ConnectivityH5Field.CENTERS][()]
            except:
                region_centres = numpy.array([])
            try:
                region_labels = h5_file['/' +
                                        ConnectivityH5Field.REGION_LABELS][()]
            except:
                region_labels = numpy.array([])
            try:
                orientations = h5_file['/' +
                                       ConnectivityH5Field.ORIENTATIONS][()]
            except:
                orientations = numpy.array([])
            try:
                hemispheres = h5_file['/' +
                                      ConnectivityH5Field.HEMISPHERES][()]
            except:
                hemispheres = numpy.array([])
            try:
                areas = h5_file['/' + ConnectivityH5Field.AREAS][()]
            except:
                areas = numpy.array([])

            h5_file.close()

            conn = Connectivity(file_path=path,
                                weights=weights,
                                tract_lengths=tract_lengths,
                                region_labels=region_labels,
                                region_centres=region_centres,
                                hemispheres=hemispheres,
                                orientations=orientations,
                                areas=areas)
            conn.configure()
            self.logger.info("Successfully read connectvity from: %s" % path)

            return conn
        else:
            raise_value_error(
                ("\n No Connectivity file found at path %s!" % str(path)))

    def read_surface(self, path, surface_class):
        """
        :param path: Path towards a custom Surface H5 file
        :return: Surface object
        """
        if not os.path.isfile(path):
            self.logger.warning("Surface file %s does not exist" % path)
            return None

        self.logger.info("Starting to read Surface from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        vertices = h5_file['/' + SurfaceH5Field.VERTICES][()]
        triangles = h5_file['/' + SurfaceH5Field.TRIANGLES][()]
        try:
            vertex_normals = h5_file['/' + SurfaceH5Field.VERTEX_NORMALS][()]
        except:
            vertex_normals = numpy.array([])
        try:
            triangle_normals = h5_file['/' +
                                       SurfaceH5Field.TRIANGLE_NORMALS][()]
        except:
            triangle_normals = numpy.array([])
        h5_file.close()

        surface = surface_class(file_path=path,
                                vertices=vertices,
                                triangles=triangles,
                                vertex_normals=vertex_normals,
                                triangle_normals=triangle_normals)
        surface.configure()

        self.logger.info("Successfully read surface from: %s" % path)

        return surface

    def read_sensors(self, path):
        """
        :param path: Path towards a custom virtual_head folder
        :return: 3 lists with all sensors from Path by type
        """
        sensors = OrderedDict()

        self.logger.info("Starting to read all Sensors from: %s" % path)

        all_head_files = os.listdir(path)
        for head_file in all_head_files:
            str_head_file = str(head_file)
            if not str_head_file.startswith(self.sensors_filename_prefix):
                continue
            name = str_head_file.split(".")[0]
            sensor, projection = \
                self.read_sensors_of_type(os.path.join(path, head_file), name)
            sensors_set = sensors.get(sensor.sensors_type, OrderedDict())
            sensors_set.update({sensor: projection})
            sensors[sensor.sensors_type] = sensors_set

        self.logger.info("Successfuly read all sensors from: %s" % path)

        return sensors

    def read_sensors_of_type(self, sensors_file, name):
        """
        :param
            sensors_file: Path towards a custom Sensors H5 file
            s_type: Senors s_type
        :return: Sensors object
        """
        if not os.path.exists(sensors_file):
            self.logger.warning("Senors file %s does not exist!" %
                                sensors_file)
            return []

        self.logger.info("Starting to read sensors of from: %s" % sensors_file)
        h5_file = h5py.File(sensors_file, 'r', libver='latest')

        locations = h5_file['/' + SensorsH5Field.LOCATIONS][()]
        try:
            labels = h5_file['/' + SensorsH5Field.LABELS][()]
        except:
            labels = numpy.array([])
        try:
            orientations = h5_file['/' + SensorsH5Field.ORIENTATIONS][()]
        except:
            orientations = numpy.array([])
        name = h5_file.attrs.get("name", name)
        s_type = h5_file.attrs.get("Sensors_subtype", "")

        if '/' + SensorsH5Field.PROJECTION_MATRIX in h5_file:
            proj_matrix = h5_file['/' + SensorsH5Field.PROJECTION_MATRIX][()]
            projection = SensorTypesToProjectionDict.get(
                s_type, ProjectionMatrix())()
            projection.projection_data = proj_matrix
        else:
            projection = None

        h5_file.close()

        sensors = \
            SensorTypesToClassesDict.get(s_type, Sensors)(file_path=sensors_file, name=name,
                                                          labels=labels, locations=locations,
                                                          orientations=orientations)
        sensors.configure()
        self.logger.info("Successfully read sensors from: %s" % sensors_file)

        return sensors, projection

    def read_volume_mapping(self, path):
        """
        :param path: Path towards a custom VolumeMapping H5 file
        :return: volume mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("VolumeMapping file %s does not exist" % path)
            return None

        self.logger.info("Starting to read VolumeMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        vm = region_mapping.RegionVolumeMapping()
        vm.array_data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read volume mapping!")  #: %s" % data)

        return vm

    def read_region_mapping(self, path):
        """
        :param path: Path towards a custom RegionMapping H5 file
        :return: region mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("RegionMapping file %s does not exist" % path)
            return None

        self.logger.info("Starting to read RegionMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        rm = region_mapping.RegionMapping()
        rm.array_data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read region mapping!")  #: %s" % data)

        return rm

    def read_t1(self, path):
        """
        :param path: Path towards a custom StructuralMRI H5 file
        :return: structural MRI in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("StructuralMRI file %s does not exist" % path)
            return None

        self.logger.info("Starting to read StructuralMRI from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        t1 = structural.StructuralMRI()
        t1.array_data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read structural MRI from: %s" % path)

        return t1

    def read_head(self, path, atlas="default"):
        """
        :param path: Path towards a custom virtual_head folder
        :return: Head object
        """
        self.logger.info("Starting to read Head from: %s" % path)
        conn = \
            self.read_connectivity(os.path.join(path, self.connectivity_filename))
        cort_srf =\
            self.read_surface(os.path.join(path, self.cortical_surface_filename), CorticalSurface)
        subcort_srf = \
            self.read_surface(os.path.join(path, self.subcortical_surface_filename), SubcorticalSurface)
        cort_rm = \
            self.read_region_mapping(os.path.join(path, self.cortical_region_mapping_filename))
        if cort_rm is not None:
            cort_rm.connectivity = conn._tvb
            if cort_srf is not None:
                cort_rm.surface = cort_srf._tvb
        subcort_rm = \
            self.read_region_mapping(os.path.join(path, self.subcortical_region_mapping_filename))
        if subcort_rm is not None:
            subcort_rm.connectivity = conn._tvb
            if subcort_srf is not None:
                subcort_rm.surface = subcort_srf._tvb
        vm = \
            self.read_volume_mapping(os.path.join(path, self.volume_mapping_filename))
        t1 = \
            self.read_t1(os.path.join(path, self.structural_mri_filename))
        sensors = self.read_sensors(path)

        if len(atlas) > 0:
            name = atlas
        else:
            name = path

        head = Head(conn, sensors, cort_srf, subcort_srf, cort_rm, subcort_rm,
                    vm, t1, name)

        self.logger.info("Successfully read Head from: %s" % path)

        return head

    def read_ts(self, path):
        """
        :param path: Path towards a valid TimeSeries H5 file
        :return: Timeseries data and time in 2 numpy arrays
        """
        self.logger.info("Starting to read TimeSeries from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]
        total_time = int(h5_file["/"].attrs["Simulated_period"][0])
        nr_of_steps = int(h5_file["/data"].attrs["Number_of_steps"][0])
        start_time = float(h5_file["/data"].attrs["Start_time"][0])
        time = numpy.linspace(start_time, total_time, nr_of_steps)

        self.logger.info("First Channel sv sum: " + str(numpy.sum(data[:, 0])))
        self.logger.info("Successfully read timeseries!")  #: %s" % data)
        h5_file.close()

        return time, data

    def read_timeseries(self, path, timeseries=Timeseries):
        """
        :param path: Path towards a valid TimeSeries H5 file
        :return: Timeseries data and time in 2 numpy arrays
        """
        self.logger.info("Starting to read TimeSeries from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        ts_kwargs = {}
        labels_dimensions = {}
        try:
            time = h5_file['/time'][()]
            ts_kwargs["time"] = time
            ts_kwargs["sample_period"] = float(np.mean(np.diff(time)))
        except:
            pass
        try:
            labels_ordering = (h5_file['/dimensions_labels'][()]).tolist()
        except:
            labels_ordering = LABELS_ORDERING
        try:
            labels_dimensions.update(
                {labels_ordering[2]: h5_file['/labels'][()]})
        except:
            pass
        try:
            labels_dimensions.update(
                {labels_ordering[1]: h5_file['/variables'][()]})
        except:
            pass
        if len(labels_dimensions) > 0:
            ts_kwargs["labels_dimensions"] = labels_dimensions
        time_unit = str(h5_file.attrs.get("sample_period_unit", ""))
        if len(time_unit) > 0:
            ts_kwargs["sample_period_unit"] = time_unit
        ts_type = str(h5_file.attrs.get("time_series_type", ""))
        if len(ts_type) > 0:
            ts_kwargs["ts_type"] = ts_type
        title = str(h5_file.attrs.get("title", ""))
        if len(title) > 0:
            ts_kwargs["title"] = title
        self.logger.info("First Channel sv sum: " + str(numpy.sum(data[:, 0])))
        self.logger.info("Successfully read Timeseries!")  #: %s" % data)
        h5_file.close()

        return timeseries(data, labels_ordering=labels_ordering, **ts_kwargs)

    def read_dictionary(self, path, type=None):
        """
        :param path: Path towards a dictionary H5 file
        :return: dict
        """
        self.logger.info("Starting to read a dictionary from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')
        dictionary = H5GroupHandlers().read_dictionary_from_group(
            h5_file, type)
        h5_file.close()
        return dictionary

    def read_list_of_dicts(self, path, type=None):
        self.logger.info("Starting to read a list of dictionaries from: %s" %
                         path)
        h5_file = h5py.File(path, 'r', libver='latest')
        list_of_dicts = []
        id = 0
        h5_group_handlers = H5GroupHandlers()
        while 1:
            try:
                dict_group = h5_file[str(id)]
            except:
                break
            list_of_dicts.append(
                h5_group_handlers.read_dictionary_from_group(dict_group, type))
            id += 1
        h5_file.close()
        return list_of_dicts
Esempio n. 15
0
 def __init__(self, config=None):
     self.config = config or Config()
     self.logger = initialize_logger(self.__class__.__name__,
                                     self.config.out.FOLDER_LOGS)
     self.print_regions_indices = True
Esempio n. 16
0
# -*- coding: utf-8 -*-

import os
import inspect
import tvb_data
from tvb.simulator.plot.config import FiguresConfig as ConfigBase
from tvb.datatypes import cortex, connectivity
from tvb.basic.profile import TvbProfile
from tvb_scripts.utils.log_error_utils import initialize_logger

TvbProfile.set_profile(TvbProfile.LIBRARY_PROFILE)
initialize_logger('matplotlib')


class Config(ConfigBase):
    # NEST properties:
    NEST_MIN_DT = 0.001

    DEFAULT_MODEL = "iaf_cond_beta"  # "iaf_cond_deco2014"

    # Delays should be at least equal to NEST time resolution
    DEFAULT_CONNECTION = {
        "model": "static_synapse",
        "weight": 1.0,
        "delay": 0.0,
        'receptor_type': 0,
        "conn_spec": {
            "autapses": False,
            'multapses': True,
            'rule': "all_to_all",
            "indegree": None,
# -*- coding: utf-8 -*-

from pandas import Series
import numpy as np
from tvb_nest.simulator_nest.nest_factory import build_and_connect_output_devices
from tvb_nest.interfaces.tvb_to_nest_device_interface import INPUT_INTERFACES_DICT
from tvb_scripts.utils.log_error_utils import initialize_logger, raise_value_error
from tvb_scripts.utils.data_structures_utils import property_to_fun

LOG = initialize_logger(__name__)


class TVBtoNESTDeviceInterfaceBuilder(object):
    interfaces = []
    nest_instance = None
    nest_nodes = Series()
    tvb_nodes_ids = []
    nest_nodes_ids = []
    tvb_model = None
    tvb_weights = None
    tvb_delays = None
    tvb_dt = 0.1
    exclusive_nodes = False
    node_labels = None

    def __init__(self,
                 interfaces,
                 nest_instance,
                 nest_nodes,
                 nest_nodes_ids,
                 tvb_nodes_ids,
Esempio n. 18
0
class H5Writer(object):

    config = CONFIGURED
    logger = initialize_logger(__name__)
    H5_TYPE_ATTRIBUTE = "Type"
    H5_SUBTYPE_ATTRIBUTE = "Subtype"
    H5_VERSION_ATTRIBUTE = "Version"
    H5_DATE_ATTRIBUTE = "Last_update"
    force_overwrite = True
    write_mode = "a"

    def _open_file(self, name, path=None, h5_file=None):
        if h5_file is None:
            if self.write_mode == "w":
                path = change_filename_or_overwrite(path, self.force_overwrite)
            self.logger.info("Starting to write %s to: %s" % (name, path))
            h5_file = h5py.File(path, self.write_mode, libver='latest')
        return h5_file, path

    def _close_file(self, h5_file, close_file=True):
        if close_file:
            h5_file.close()

    def _log_success(self, name, path=None):
        if path is not None:
            self.logger.info("%s has been written to file: %s" % (name, path))

    def _determine_datasets_and_attributes(self, object, datasets_size=None):
        datasets_dict = {}
        metadata_dict = {}
        groups_keys = []

        try:
            if isinstance(object, dict):
                dict_object = object
            elif hasattr(object, "to_dict"):
                dict_object = object.to_dict()
            else:
                dict_object = vars(object)
            for key, value in dict_object.items():
                if isinstance(value, numpy.ndarray):
                    # if value.size == 1:
                    #     metadata_dict.update({key: value})
                    # else:
                    datasets_dict.update({key: value})
                    # if datasets_size is not None and value.size == datasets_size:
                    #     datasets_dict.update({key: value})
                    # else:
                    #     if datasets_size is None and value.size > 0:
                    #         datasets_dict.update({key: value})
                    #     else:
                    #         metadata_dict.update({key: value})
                # TODO: check how this works! Be carefull not to include lists and tuples if possible in tvb classes!
                elif isinstance(value, (list, tuple)):
                    warning(
                        "Writing %s %s to h5 file as a numpy array dataset !" %
                        (value.__class__, key), self.logger)
                    datasets_dict.update({key: numpy.array(value)})
                else:
                    if is_numeric(value) or isinstance(value, str):
                        metadata_dict.update({key: value})
                    elif callable(value):
                        metadata_dict.update({key: inspect.getsource(value)})
                    elif value is None:
                        continue
                    else:
                        groups_keys.append(key)
        except:
            msg = "Failed to decompose group object: " + str(object) + "!"
            try:
                self.logger.info(str(object.__dict__))
            except:
                msg += "\n It has no __dict__ attribute!"
            warning(msg, self.logger)

        return datasets_dict, metadata_dict, groups_keys

    def _write_dicts_at_location(self, datasets_dict, metadata_dict, location):
        for key, value in datasets_dict.items():
            try:
                try:
                    location.create_dataset(key, data=value)
                except:
                    location.create_dataset(key, data=numpy.str(value))
            except:
                warning(
                    "Failed to write to %s dataset %s %s:\n%s !" %
                    (str(location), value.__class__, key, str(value)),
                    self.logger)

        for key, value in metadata_dict.items():
            try:
                location.attrs.create(key, value)
            except:
                warning(
                    "Failed to write to %s attribute %s %s:\n%s !" %
                    (str(location), value.__class__, key, str(value)),
                    self.logger)
        return location

    def _prepare_object_for_group(self,
                                  group,
                                  object,
                                  h5_type_attribute="",
                                  nr_regions=None,
                                  regress_subgroups=True):
        group.attrs.create(self.H5_TYPE_ATTRIBUTE, h5_type_attribute)
        group.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                           object.__class__.__name__)
        datasets_dict, metadata_dict, subgroups = self._determine_datasets_and_attributes(
            object, nr_regions)
        # If empty return None
        if len(datasets_dict) == len(metadata_dict) == len(subgroups) == 0:
            if isinstance(group, h5py._hl.files.File):
                if regress_subgroups:
                    return group
                else:
                    return group, subgroups
            else:
                return None
        else:
            if len(datasets_dict) > 0 or len(metadata_dict) > 0:
                if isinstance(group, h5py._hl.files.File):
                    group = self._write_dicts_at_location(
                        datasets_dict, metadata_dict, group)
                else:
                    self._write_dicts_at_location(datasets_dict, metadata_dict,
                                                  group)
            # Continue recursively going deeper in the object
            if regress_subgroups:
                for subgroup in subgroups:
                    if isinstance(object, dict):
                        child_object = object.get(subgroup, None)
                    else:
                        child_object = getattr(object, subgroup, None)
                    if child_object is not None:
                        group.create_group(subgroup)
                        temp = self._prepare_object_for_group(
                            group[subgroup], child_object, h5_type_attribute,
                            nr_regions)
                        # If empty delete it
                        if temp is None or (len(temp.keys()) == 0
                                            and len(temp.attrs.keys()) == 0):
                            del group[subgroup]

                return group
            else:
                return group, subgroups

    def write_object(self,
                     object,
                     h5_type_attribute="",
                     nr_regions=None,
                     path=None,
                     h5_file=None,
                     close_file=True):
        """
                :param object: object to write recursively in H5
                :param path: H5 path to be written
        """
        h5_file, path = self._open_file(object.__class__.__name__, path,
                                        h5_file)
        h5_file = self._prepare_object_for_group(h5_file, object,
                                                 h5_type_attribute, nr_regions)
        self._close_file(h5_file, close_file)
        self._log_success(object.__class__.__name__, path)
        return h5_file, path

    def write_list_of_objects(self,
                              list_of_objects,
                              path=None,
                              h5_file=None,
                              close_file=True):
        h5_file, path = self._open_file("List of objects", path, h5_file)
        for idict, object in enumerate(list_of_objects):
            idict_str = str(idict)
            h5_file.create_group(idict_str)
            self.write_object(object,
                              h5_file=h5_file[idict_str],
                              close_file=False)
        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE,
                             numpy.string_("List of objects"))
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE, numpy.string_("list"))
        self._close_file(h5_file, close_file)
        self._log_success("List of objects", path)
        return h5_file, path

    def _write_dictionary_to_group(self, dictionary, group):
        group.attrs.create(self.H5_TYPE_ATTRIBUTE, "Dictionary")
        group.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                           dictionary.__class__.__name__)
        for key, value in dictionary.items():
            try:
                if isinstance(value, numpy.ndarray) and value.size > 0:
                    group.create_dataset(key, data=value)
                else:
                    if isinstance(value, list) and len(value) > 0:
                        group.create_dataset(key, data=numpy.array(value))
                    elif callable(value):
                        group.attrs.create(key, inspect.getsource(value))
                    elif isinstance(value, dict):
                        group.create_group(key)
                        self._write_dictionary_to_group(value, group[key])
                    elif value is None:
                        continue
                    else:
                        group.attrs.create(key, str(value))
            except:
                self.logger.warning("Did not manage to write " + key +
                                    " to h5 file " + str(group) + " !")

    def write_dictionary(self,
                         dictionary,
                         path=None,
                         h5_file=None,
                         close_file=True):
        """
        :param dictionary: dictionary/ies to write recursively in H5
        :param path: H5 path to be written
        Use this function only if you have to write dictionaries of data (scalars and arrays or lists of scalars,
        or of more such dictionaries recursively
        """
        h5_file, path = self._open_file("Dictionary", path, h5_file)
        self._write_dictionary_to_group(dictionary, h5_file)
        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE,
                             numpy.string_("Dictionary"))
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             numpy.string_(dictionary.__class__.__name__))
        self._close_file(h5_file, close_file)
        self._log_success("Dictionary", path)
        return h5_file, path

    def write_list_of_dictionaries(self,
                                   list_of_dicts,
                                   path=None,
                                   h5_file=None,
                                   close_file=True):
        h5_file, path = self._open_file("List of dictionaries", path, h5_file)
        for idict, dictionary in enumerate(list_of_dicts):
            idict_str = str(idict)
            h5_file.create_group(idict_str)
            self._write_dictionary_to_group(dictionary, h5_file[idict_str])
        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE,
                             numpy.string_("List of dictionaries"))
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE, numpy.string_("list"))
        self._close_file(h5_file, close_file)
        self._log_success("List of dictionaries", path)
        return h5_file, path

    def write_tvb_to_h5(self,
                        datatype,
                        path=None,
                        recursive=True,
                        force_overwrite=True):
        if path is None:
            path = self.config.out.FOLDER_RES
        if path.endswith("h5"):
            # It is a file path:
            dirpath = os.path.dirname(path)
            if os.path.isdir(dirpath):
                path = change_filename_or_overwrite(path, force_overwrite)
            else:
                os.mkdir(dirpath)
            h5.store(datatype, path, recursive)
        else:
            if not os.path.isdir(path):
                os.mkdir(path)
            from tvb_scripts.datatypes.head import Head
            if isinstance(datatype, Head):
                path = os.path.join(path, datatype.title)
                if not os.path.isdir(path):
                    os.mkdir(path)
                path = os.path.join(path, "Head.h5")
            else:
                path = os.path.join(path, datatype.title + ".h5")
            path = change_filename_or_overwrite(path, self.force_overwrite)
            h5.store(datatype, path, recursive)
        return path