示例#1
0
def set_empirical_data(empirical_file,
                       ts_file,
                       head,
                       sensors_lbls,
                       sensor_id=0,
                       seizure_length=SEIZURE_LENGTH,
                       times_on_off=[],
                       label_strip_fun=None,
                       plotter=None,
                       title_prefix="",
                       **kwargs):
    try:
        return H5Reader().read_timeseries(ts_file)
    except:
        # ... or preprocess empirical data for the first time:
        if len(sensors_lbls) == 0:
            sensors_lbls = head.get_sensors_id(sensor_ids=sensor_id).labels
        signals = prepare_seeg_observable_from_mne_file(
            empirical_file,
            head.get_sensors_id(sensor_ids=sensor_id),
            sensors_lbls,
            seizure_length,
            times_on_off,
            label_strip_fun=label_strip_fun,
            plotter=plotter,
            title_prefix=title_prefix,
            **kwargs)
        H5Writer().write_timeseries(signals, ts_file)
        return signals
def compute_seeg_and_write_ts_h5_file(folder,
                                      filename,
                                      model,
                                      vois_ts_dict,
                                      dt,
                                      time_length,
                                      hpf_flag=False,
                                      hpf_low=10.0,
                                      hpf_high=256.0,
                                      sensors_list=[],
                                      save_flag=True):
    fsAVG = 1000.0 / dt
    sensors_list = ensure_list(sensors_list)
    # Optionally high pass filter, and compute SEEG:
    if isinstance(model, EpileptorDP2D):
        raw_data = np.dstack(
            [vois_ts_dict["x1"], vois_ts_dict["z"], vois_ts_dict["x1"]])
        vois_ts_dict["lfp"] = vois_ts_dict["x1"]
        idx_proj = -1
        for sensor in sensors_list:
            if isinstance(sensor, Sensors):
                idx_proj += 1
                sensor_name = sensor.s_type + '%d' % idx_proj
                vois_ts_dict[sensor_name] = vois_ts_dict['x1'].dot(
                    sensor.gain_matrix.T)
                vois_ts_dict[sensor_name] -= np.min(vois_ts_dict[sensor_name])
                vois_ts_dict[sensor_name] /= np.max(vois_ts_dict[sensor_name])
    else:
        vois_ts_dict["lfp"] = vois_ts_dict["x2"] - vois_ts_dict["x1"]
        raw_data = np.dstack(
            [vois_ts_dict["x1"], vois_ts_dict["z"], vois_ts_dict["x2"]])
        if hpf_flag:
            hpf_low = max(hpf_low, 1000.0 / time_length)
            hpf_high = min(fsAVG / 2.0 - 10.0, hpf_high)
        idx_proj = -1
        for sensor in sensors_list:
            if isinstance(sensor, Sensors):
                idx_proj += 1
                sensor_name = sensor.s_type + '%d' % idx_proj
                vois_ts_dict[sensor_name] = vois_ts_dict['lfp'].dot(
                    sensor.gain_matrix.T)
                if hpf_flag:
                    for i in range(vois_ts_dict[sensor_name].shape[1]):
                        vois_ts_dict[sensor_name][:, i] = \
                            filter_data(vois_ts_dict[sensor_name][:, i], fsAVG, hpf_low, hpf_high)
                vois_ts_dict[sensor_name] -= np.min(vois_ts_dict[sensor_name])
                vois_ts_dict[sensor_name] /= np.max(vois_ts_dict[sensor_name])

    if save_flag:
        h5_writer = H5Writer()
        h5_writer.write_ts_epi(raw_data, dt, os.path.join(folder, filename),
                               vois_ts_dict["lfp"])
        # Write files:
        if idx_proj > -1:
            for i_sensor, sensor in enumerate(sensors_list):
                h5_writer.write_ts_seeg_epi(
                    vois_ts_dict[sensor.s_type + '%d' % i_sensor], dt,
                    os.path.join(folder, filename))
    return vois_ts_dict
示例#3
0
class TestFileUtils(BaseTest):
    writer = H5Writer()

    def test_change_filename_or_overwrite_always_overwrite(self):
        filename = "Test.h5"
        test_file = os.path.join(self.config.out.FOLDER_TEMP, filename)
        self.writer.write_dictionary({"a": [1, 2, 3]}, test_file)

        assert os.path.exists(test_file)

        change_filename_or_overwrite(test_file, True)

        assert not os.path.exists(test_file)
示例#4
0
def import_seeg(
    empirical_file="/Users/lia.domide/Downloads/TRECempirical/110223B-EEX_0004.EEG.mat",
    ignored_indices=[],
    ts_path=os.path.join(PATIENT_VIRTUAL_HEAD, "ep", "ts_empirical.h5")):
    data = loadmat(empirical_file)
    sampling_period = 1000 / data["sampling_rate_hz"][0]
    seeg_data = data['data']
    print "Ignoring ", ignored_indices
    seeg_data = numpy.delete(seeg_data, ignored_indices, axis=0)
    seeg_data = seeg_data.transpose()
    print seeg_data.min(), seeg_data.max()
    raw_data = numpy.zeros((1000, 88, 3))
    h5_writer = H5Writer()
    h5_writer.write_ts(raw_data, sampling_period, ts_path)
    h5_writer.write_ts_seeg_epi(seeg_data, sampling_period, path=ts_path)
示例#5
0
 def write_model_data_to_file(self, model_data, reset_path=False, **kwargs):
     model_data_path = kwargs.get("model_data_path", self.model_data_path)
     if reset_path:
         self.model_data_path = model_data_path
     extension = model_data_path.split(".", -1)[-1]
     if isequal_string(extension, "npy"):
         np.save(model_data_path, model_data)
     elif isequal_string(extension, "mat"):
         savemat(model_data_path, model_data)
     elif isequal_string(extension, "pkl"):
         with open(model_data_path, 'wb') as f:
             pickle.dump(model_data, f)
     elif isequal_string(extension, "R"):
         rdump(model_data_path, model_data)
     else:
         H5Writer().write_dictionary(model_data, os.path.join(os.path.dirname(model_data_path),
                                                              os.path.basename(model_data_path)))
示例#6
0
def correlate_sensors(
    empirical_file="/Users/lia.domide/Downloads/TRECempirical/110223B-EEX_0004.EEG.mat",
    existing_ep_file="/WORK/episense/episense-root/trunk/demo-data/SensorsSEEG_125.h5"
):
    data = loadmat(empirical_file)
    desired_labels = [str(i).strip().lower() for i in data["channel_names"]]
    reader = H5Reader()
    labels, locations = reader.read_sensors_of_type(existing_ep_file, "SEEG")
    new_labels = []
    new_locations = []
    ignored_indices = []
    for i, label in enumerate(desired_labels):
        if label not in labels:
            print "Ignored channel", label
            ignored_indices.append(i)
            continue
        idx = numpy.where(labels == label)
        new_labels.append(label)
        new_locations.append(locations[idx])
    H5Writer().write_sensors(
        Sensors(new_labels, new_locations),
        os.path.join(os.path.dirname(PATIENT_VIRTUAL_HEAD),
                     "SensorsSEEG_" + str(len(labels)) + ".h5"))
    return ignored_indices
class TestCustomH5Reader(BaseTest):
    reader = H5Reader()
    writer = H5Writer()
    in_head = InputConfig().HEAD
    not_existent_file = "NotExistent.h5"

    def test_read_connectivity(self):
        connectivity = self.reader.read_connectivity(
            os.path.join(self.in_head, "Connectivity.h5"))

        assert connectivity is not None
        assert connectivity.number_of_regions == 76

    def test_read_surface(self):
        surface = self.reader.read_surface(
            os.path.join(self.in_head, "CorticalSurface.h5"))

        assert surface is not None
        assert surface.vertices.shape[0] == 16

    def test_read_surface_not_existent_file(self):
        surface = self.reader.read_surface(
            os.path.join(self.in_head, self.not_existent_file))

        assert surface is None

    def test_read_region_mapping(self):
        region_mapping = self.reader.read_region_mapping(
            os.path.join(self.in_head, "RegionMapping.h5"))

        assert isinstance(region_mapping, numpy.ndarray)
        assert region_mapping.size == 16

    def test_read_region_mapping_not_existent_file(self):
        region_mapping = self.reader.read_region_mapping(
            os.path.join(self.in_head, self.not_existent_file))

        assert isinstance(region_mapping, numpy.ndarray)
        assert region_mapping.size == 0

    def test_read_volume_mapping(self):
        volume_mapping = self.reader.read_volume_mapping(
            os.path.join(self.in_head, "VolumeMapping.h5"))

        assert isinstance(volume_mapping, numpy.ndarray)
        assert volume_mapping.shape == (6, 5, 4)

    def test_read_volume_mapping_not_existent_file(self):
        volume_mapping = self.reader.read_volume_mapping(
            os.path.join(self.in_head, self.not_existent_file))

        assert isinstance(volume_mapping, numpy.ndarray)
        assert volume_mapping.shape == (0, )

    def test_sensors_of_type(self):
        sensors_file = os.path.join(self.in_head, "SensorsSEEG_20.h5")
        sensors = self.reader.read_sensors_of_type(sensors_file,
                                                   Sensors.TYPE_SEEG)

        assert sensors is not None
        assert sensors.number_of_sensors == 20

    def test_sensors_of_type_not_existent_file(self):
        sensors_file = os.path.join(self.in_head, self.not_existent_file)
        sensors = self.reader.read_sensors_of_type(sensors_file,
                                                   Sensors.TYPE_SEEG)

        assert sensors is None

    def test_read_sensors(self):
        sensors_seeg, sensors_eeg, sensors_meg = self.reader.read_sensors(
            self.in_head)

        assert len(sensors_seeg) > 0
        assert len(sensors_eeg) == 0
        assert len(sensors_meg) == 0
        assert sensors_seeg[0] is not None
        assert sensors_seeg[0].number_of_sensors == 20

    def test_read_hypothesis(self):
        test_file = os.path.join(self.config.out.FOLDER_TEMP,
                                 "TestHypothesis.h5")
        hypothesis_builder = HypothesisBuilder(3, self.config)
        dummy_hypothesis = hypothesis_builder.set_e_hypothesis(
            [0], [0.6]).build_hypothesis()

        self.writer.write_hypothesis(dummy_hypothesis, test_file)
        hypothesis = self.reader.read_hypothesis(test_file)

        assert dummy_hypothesis.number_of_regions == hypothesis.number_of_regions
        assert numpy.array_equal(dummy_hypothesis.x0_values,
                                 hypothesis.x0_values)
        assert dummy_hypothesis.x0_indices == hypothesis.x0_indices
        assert numpy.array_equal(dummy_hypothesis.e_values,
                                 hypothesis.e_values)
        assert dummy_hypothesis.e_indices == hypothesis.e_indices
        assert numpy.array_equal(dummy_hypothesis.w_values,
                                 hypothesis.w_values)
        assert dummy_hypothesis.w_indices == hypothesis.w_indices
        assert numpy.array_equal(dummy_hypothesis.lsa_propagation_indices,
                                 hypothesis.lsa_propagation_indices)
        if len(dummy_hypothesis.lsa_propagation_indices) == 0:
            assert numpy.array_equal([0, 0, 0],
                                     hypothesis.lsa_propagation_strengths)
        else:
            assert numpy.array_equal(
                dummy_hypothesis.lsa_propagation_strengths,
                hypothesis.lsa_propagation_strengths)

    def test_read_model_configuration(self):
        test_file = os.path.join(self.config.out.FOLDER_TEMP,
                                 "TestModelConfiguration.h5")
        dummy_mc = ModelConfiguration(x1eq=numpy.array([2.0, 3.0, 1.0]),
                                      zmode=None,
                                      zeq=numpy.array([3.0, 2.0, 1.0]),
                                      model_connectivity=numpy.array(
                                          [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0],
                                           [2.0, 2.0, 2.0]]),
                                      Ceq=numpy.array([1.0, 2.0, 3.0]))
        self.writer.write_model_configuration(dummy_mc, test_file)
        mc = self.reader.read_model_configuration(test_file)

        assert numpy.array_equal(dummy_mc.x1eq, mc.x1eq)
        assert numpy.array_equal(dummy_mc.zeq, mc.zeq)
        assert numpy.array_equal(dummy_mc.Ceq, mc.Ceq)
        assert numpy.array_equal(dummy_mc.model_connectivity,
                                 mc.model_connectivity)

    def test_read_lsa_service(self):
        test_file = os.path.join(self.config.out.FOLDER_TEMP,
                                 "TestLSAService.h5")
        dummy_lsa_service = LSAService()
        self.writer.write_lsa_service(dummy_lsa_service, test_file)

        lsa_service = self.reader.read_lsa_service(test_file)

        assert dummy_lsa_service.eigen_vectors_number_selection == lsa_service.eigen_vectors_number_selection
        assert dummy_lsa_service.eigen_vectors_number == lsa_service.eigen_vectors_number
        assert dummy_lsa_service.eigen_values == lsa_service.eigen_values
        assert dummy_lsa_service.eigen_vectors == lsa_service.eigen_vectors
        assert dummy_lsa_service.weighted_eigenvector_sum == lsa_service.weighted_eigenvector_sum
        assert dummy_lsa_service.normalize_propagation_strength == lsa_service.normalize_propagation_strength

    def test_read_model_configuration_builder(self):
        test_file = os.path.join(self.config.out.FOLDER_TEMP,
                                 "TestModelConfigService.h5")
        dummy_mc_service = ModelConfigurationBuilder(3)
        self.writer.write_model_configuration_builder(dummy_mc_service,
                                                      test_file)

        mc_service = self.reader.read_model_configuration_builder(test_file)

        assert dummy_mc_service.number_of_regions == mc_service.number_of_regions
        assert numpy.array_equal(dummy_mc_service.x0_values,
                                 mc_service.x0_values)
        assert numpy.array_equal(dummy_mc_service.K_unscaled,
                                 mc_service.K_unscaled)
        assert numpy.array_equal(dummy_mc_service.e_values,
                                 mc_service.e_values)
        assert dummy_mc_service.yc == mc_service.yc
        assert dummy_mc_service.Iext1 == mc_service.Iext1
        assert dummy_mc_service.Iext2 == mc_service.Iext2
        assert dummy_mc_service.a == mc_service.a
        assert dummy_mc_service.b == mc_service.b
        assert dummy_mc_service.d == mc_service.d
        assert dummy_mc_service.slope == mc_service.slope
        assert dummy_mc_service.s == mc_service.s
        assert dummy_mc_service.gamma == mc_service.gamma
        assert dummy_mc_service.tau1 == mc_service.tau1
        assert dummy_mc_service.tau0 == mc_service.tau0
        assert dummy_mc_service.zmode == mc_service.zmode
        assert dummy_mc_service.x1eq_mode == mc_service.x1eq_mode
        assert dummy_mc_service.K.all() == mc_service.K.all()
        assert dummy_mc_service.x0cr == mc_service.x0cr
        assert dummy_mc_service.rx0 == mc_service.rx0

    def test_read_simulation_settigs(self):
        test_file = os.path.join(self.config.out.FOLDER_TEMP,
                                 "TestSimSettings.h5")
        dummy_sim_settings = SimulationSettings()
        self.writer.write_simulation_settings(dummy_sim_settings, test_file)

        sim_settings = self.reader.read_simulation_settings(test_file)

        assert dummy_sim_settings.integration_step == sim_settings.integration_step
        assert dummy_sim_settings.simulated_period == sim_settings.simulated_period
        assert dummy_sim_settings.integrator_type == sim_settings.integrator_type
        assert dummy_sim_settings.noise_type == sim_settings.noise_type
        assert dummy_sim_settings.noise_ntau == sim_settings.noise_ntau
        assert dummy_sim_settings.noise_intensity == sim_settings.noise_intensity
        assert dummy_sim_settings.noise_seed == sim_settings.noise_seed
        assert dummy_sim_settings.monitor_type == sim_settings.monitor_type
        assert dummy_sim_settings.monitor_sampling_period == sim_settings.monitor_sampling_period
        assert dummy_sim_settings.monitor_expressions == sim_settings.monitor_expressions
        assert numpy.array_equal(dummy_sim_settings.initial_conditions,
                                 sim_settings.initial_conditions)
示例#8
0
import os
from tvb_epilepsy.base.constants.config import Config
from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.io.tvb_data_reader import TVBReader
from tvb_epilepsy.io.h5_reader import H5Reader
from tvb_epilepsy.io.h5_writer import H5Writer
from tvb_epilepsy.plot.plotter import Plotter
# input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "tvb")
# head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "Head")
input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                            'VBtech', 'VEP', "results", "INS", "JUNCH", "tvb")
head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                           'VBtech', 'VEP', "results", "INS", "JUNCH", "Head")
output_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                             'VBtech', 'VEP', "results", "tests")
config = Config(head_folder=input_folder,
                output_base=output_folder,
                data_mode="tvb")  #, data_mode="java"
config.hypothesis.head_folder = head_folder
config.figures.MATPLOTLIB_BACKEND = "inline"
config.figures.SHOW_FLAG = True
logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
writer = H5Writer()
plotter = Plotter(config)

logger.info("Reading from: " + config.input.HEAD)
head = reader.read_head(config.input.HEAD,
                        seeg_sensors_files=[("seeg_xyz.txt", )])
print("OK!")
示例#9
0
            continue

        connections_sum = head.connectivity.normalized_weights.sum(axis=1)
        if np.all(connections_sum > 0):
            for attribute in stats.keys():
                stats[attribute].append(getattr(head.connectivity, attribute))
        else:
            zero_conn_regions = np.where(connections_sum == 0)[0]
            logger.warning(
                "Excluding patient TVB " + str(ii) +
                " because of the following regions with zero connectivity!:\n"
                + str(zero_conn_regions))
            continue

        patients_included.append(ii)

    patients_included = np.array(patients_included)
    n_patients_included = patients_included.size

    results = OrderedDict()
    results["number_of_patients"] = n_patients_included
    results["patients_included"] = np.array(
        ["TVB" + str(p) for p in patients_included])
    results["patients_ids_included"] = np.array(patients_included)
    for attribute in stats.keys():
        stats[attribute] = np.stack(stats[attribute], axis=-1)
        results[attribute + "_mean"] = stats[attribute].mean(axis=-1).squeeze()

    H5Writer().write_dictionary(results,
                                os.path.join(output_base, "heads_stats.h5"))
示例#10
0
def from_model_configuration_to_simulation(model_configuration,
                                           head,
                                           lsa_hypothesis,
                                           sim_type="realistic",
                                           dynamical_model="EpileptorDP2D",
                                           ts_file=None,
                                           plot_flag=True,
                                           config=Config()):
    # Choose model
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # Optional variations:
    if dynamical_model is "EpileptorDP2D":
        spectral_raster_plot = False
        trajectories_plot = True
    else:
        spectral_raster_plot = False  # "lfp"
        trajectories_plot = False

    # ------------------------------Simulation--------------------------------------
    logger.info("\n\nConfiguring simulation...")
    if isequal_string(sim_type, "realistic"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_realistic(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "fitting"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_fitting(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "paper"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_paper(
            model_configuration, head.connectivity)
    else:
        sim, sim_settings, dynamical_model = build_simulator_TVB_default(
            model_configuration, head.connectivity)

    writer = H5Writer()
    writer.write_generic(sim.model, config.out.FOLDER_RES,
                         dynamical_model._ui_name + "_model.h5")

    vois_ts_dict = {}
    if ts_file is not None and os.path.isfile(ts_file):
        logger.info("\n\nLoading previously simulated time series...")
        vois_ts_dict = H5Reader().read_dictionary(ts_file)
    else:
        logger.info("\n\nSimulating...")
        ttavg, tavg_data, status = sim.launch_simulation(
            report_every_n_monitor_steps=100)
        if not status:
            logger.warning("\nSimulation failed!")
        else:
            time = np.array(ttavg, dtype='float32').flatten()
            output_sampling_time = np.mean(np.diff(time))
            tavg_data = tavg_data[:, :, :, 0]
            logger.info("\n\nSimulated signal return shape: %s",
                        tavg_data.shape)
            logger.info("Time: %s - %s", time[0], time[-1])
            logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max())
            # Variables of interest in a dictionary:
            vois_ts_dict = prepare_vois_ts_dict(
                dynamical_model.variables_of_interest, tavg_data)
            vois_ts_dict['time'] = time
            vois_ts_dict['time_units'] = 'msec'
            vois_ts_dict = compute_seeg_and_write_ts_h5_file(
                config.out.FOLDER_RES,
                dynamical_model._ui_name + "_ts.h5",
                sim.model,
                vois_ts_dict,
                output_sampling_time,
                sim_settings.simulated_period,
                hpf_flag=True,
                hpf_low=10.0,
                hpf_high=512.0,
                sensors_list=head.sensorsSEEG,
                save_flag=True)
            if isinstance(ts_file, basestring):
                writer.write_dictionary(
                    vois_ts_dict,
                    os.path.join(os.path.dirname(ts_file),
                                 os.path.basename(ts_file)))
    if plot_flag and len(vois_ts_dict) > 0:
        # Plot results
        Plotter(config).plot_sim_results(
            sim.model,
            lsa_hypothesis.lsa_propagation_indices,
            vois_ts_dict,
            sensorsSEEG=head.sensorsSEEG,
            hpf_flag=False,
            trajectories_plot=trajectories_plot,
            spectral_raster_plot=spectral_raster_plot,
            log_scale=True,
            region_labels=head.connectivity.region_labels)
    return vois_ts_dict
示例#11
0
def pse_from_lsa_hypothesis(lsa_hypothesis,
                            model_connectivity,
                            region_labels,
                            n_samples,
                            param_range=0.1,
                            global_coupling=[],
                            healthy_regions_parameters=[],
                            model_configuration_builder=None,
                            lsa_service=None,
                            save_flag=False,
                            folder_res=OutputConfig().FOLDER_RES,
                            filename=None,
                            logger=None,
                            **kwargs):
    if logger is None:
        logger = initialize_logger(__name__)
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.get_regions_disease_indices()
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "samples": []}
    sampler = StochasticSamplingService(n_samples=n_samples,
                                        random_seed=kwargs.get(
                                            "random_seed", None))
    # First build from the hypothesis the input parameters of the parameter search exploration.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.x0_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.x0_values[ii] - param_range,  # loc
        #                                         2 * param_range),                            # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.e_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.e_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.e_values[ii] - param_range,  # loc
        #                                         2 * param_range),  # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.w_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 model_connectivity.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
        # Now generate samples using a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.w_values[ii],  # loc
                    param_range * lsa_hypothesis.w_values[ii]),  # scale
                probability_distribution="norm",
                low=0.0,
                shape=(1, )))
    kloc = model_configuration_builder.K_unscaled[0]
    for val in global_coupling:
        pse_params["path"].append("model_configuration_builder.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)

        # Now generate samples susing a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    1.0,  # loc
                    100),  # scale
                probability_distribution="uniform",
                low=1.0,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(kloc,  # loc
        #                                         30 * param_range),  # scale
        #                              probability_distribution="norm", low=0.0, shape=(1,)))
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                0.0,  # loc
                param_range / 10),  # scale
            probability_distribution="norm",
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })
    # Now run pse service to generate output samples:
    # pse_old = PSEService("LSA", hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    # pse_results, execution_status = pse_old.run_pse(model_connectivity, grid_mode=False, lsa_service_input=lsa_service,
    #                                             model_configuration_builder_input=model_configuration_builder)
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(model_connectivity, False,
                                                model_configuration_builder,
                                                lsa_service)
    # Call to new PSEService:
    # pse = LSAPSEService(lsa_hypothesis, pse_params_list)
    # pse_results, execution_status = pse.run_pse(model_connectivity, False, lsa_service, model_configuration_builder)
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    for key in pse_results.keys():
        pse_results[key + "_mean"] = np.mean(pse_results[key], axis=0)
        pse_results[key + "_std"] = np.std(pse_results[key], axis=0)
    if save_flag:
        logger.info(pse.__repr__())
        if not (isinstance(filename, basestring)):
            filename = "LSA_PSA"
        writer = H5Writer()
        writer.write_pse_service(
            pse, os.path.join(folder_res, filename + "_pse_service.h5"))
        writer.write_dictionary(pse_results,
                                os.path.join(folder_res, filename + ".h5"))

    return pse_results, pse_params_list
示例#12
0
from tvb_epilepsy.base.datatypes.dot_dicts import DictDot, OrderedDictDot
from tvb_epilepsy.base.utils.data_structures_utils import isequal_string
from tvb_epilepsy.base.model.disease_hypothesis import DiseaseHypothesis
from tvb_epilepsy.base.model.vep.connectivity import Connectivity, ConnectivityH5Field
from tvb_epilepsy.base.model.vep.head import Head
from tvb_epilepsy.base.model.vep.sensors import Sensors, SensorsH5Field
from tvb_epilepsy.base.model.vep.surface import Surface, SurfaceH5Field
from tvb_epilepsy.base.model.timeseries import Timeseries, TimeseriesDimensions
from tvb_epilepsy.base.model.parameter import Parameter
from tvb_epilepsy.base.simulation_settings import SimulationSettings
from tvb_epilepsy.service.model_inversion.probabilistic_models_builders import *
from tvb_epilepsy.io.h5_model import read_h5_model
from tvb_epilepsy.io.h5_writer import H5Writer
from tvb_epilepsy.service.probabilistic_parameter_builder import generate_probabilistic_parameter

H5_TYPE_ATTRIBUTE = H5Writer().H5_TYPE_ATTRIBUTE
H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE
H5_TYPES_ATTRUBUTES = [H5_TYPE_ATTRIBUTE, H5_SUBTYPE_ATTRIBUTE]


class H5Reader(object):
    logger = initialize_logger(__name__)

    connectivity_filename = "Connectivity.h5"
    cortical_surface_filename = "CorticalSurface.h5"
    region_mapping_filename = "RegionMapping.h5"
    volume_mapping_filename = "VolumeMapping.h5"
    structural_mri_filename = "StructuralMRI.h5"
    sensors_filename_prefix = "Sensors"
    sensors_filename_separator = "_"
示例#13
0
def sensitivity_analysis_pse_from_lsa_hypothesis(n_samples,
                                                 lsa_hypothesis,
                                                 connectivity_matrix,
                                                 model_configuration_builder,
                                                 lsa_service,
                                                 region_labels,
                                                 method="sobol",
                                                 half_range=0.1,
                                                 global_coupling=[],
                                                 healthy_regions_parameters=[],
                                                 save_services=False,
                                                 config=Config(),
                                                 **kwargs):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    method = method.lower()
    if np.in1d(method, METHODS):
        if np.in1d(method, ["delta", "dgsm"]):
            sampler = "latin"
        elif method == "sobol":
            sampler = "saltelli"
        elif method == "fast":
            sampler = "fast_sampler"
        else:
            sampler = method
    else:
        raise_value_error("Method " + str(method) +
                          " is not one of the available methods " +
                          str(METHODS) + " !")
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.regions_disease_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "low": [], "high": []}
    n_inputs = 0
    # First build from the hypothesis the input parameters of the sensitivity analysis.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")
        pse_params["low"].append(lsa_hypothesis.x0_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE,
                 lsa_hypothesis.x0_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.e_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")
        pse_params["low"].append(lsa_hypothesis.e_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE, lsa_hypothesis.e_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.w_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 connectivity_matrix.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
            pse_params["low"].append(
                np.max([lsa_hypothesis.w_values[ii] - half_range, 0.0]))
            pse_params["high"].append(lsa_hypothesis.w_values[ii] + half_range)
    for val in global_coupling:
        n_inputs += 1
        pse_params["path"].append("model.configuration.service.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)
        pse_params["low"].append(val.get("low", 0.0))
        pse_params["high"].append(val.get("high", 2.0))
    # Now generate samples suitable for sensitivity analysis
    sampler = SalibSamplingService(n_samples=n_samples,
                                   sampler=sampler,
                                   random_seed=kwargs.get("random_seed", None))
    input_samples = sampler.generate_samples(low=pse_params["low"],
                                             high=pse_params["high"],
                                             **kwargs)
    n_samples = input_samples.shape[1]
    pse_params.update(
        {"samples": [np.array(value) for value in input_samples.tolist()]})
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    sampler = ProbabilisticSamplingService(n_samples=n_samples,
                                           random_seed=kwargs.get(
                                               "random_seed", None))
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                kwargs.get("loc", 0.0),  # loc
                kwargs.get("scale", 2 * half_range)),  # scale
            probability_distribution="uniform",
            low=0.0,
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })
    # Now run pse service to generate output samples:
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(connectivity_matrix, False,
                                                model_configuration_builder,
                                                lsa_service)
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    # Now prepare inputs and outputs and run the sensitivity analysis:
    # NOTE!: Without the jittered healthy regions which we don' want to include into the sensitivity analysis!
    inputs = dicts_of_lists_to_lists_of_dicts(pse_params)
    outputs = [{
        "names": ["LSA Propagation Strength"],
        "values": pse_results["lsa_propagation_strengths"]
    }]
    sensitivity_analysis_service = SensitivityAnalysisService(
        inputs,
        outputs,
        method=method,
        calc_second_order=kwargs.get("calc_second_order", True),
        conf_level=kwargs.get("conf_level", 0.95))
    results = sensitivity_analysis_service.run(**kwargs)
    if save_services:
        logger.info(pse.__repr__())
        writer = H5Writer()
        writer.write_pse_service(
            pse,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_pse_service.h5"))
        logger.info(sensitivity_analysis_service.__repr__())
        writer.write_sensitivity_analysis_service(
            sensitivity_analysis_service,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_sa_service.h5"))
    return results, pse_results