Esempio n. 1
0
def main_pse(config=Config()):
    # -------------------------------Reading data-----------------------------------
    reader = Reader()
    writer = H5Writer()
    head = reader.read_head(config.input.HEAD)
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    # --------------------------Manual Hypothesis definition-----------------------------------
    n_samples = 100
    x0_indices = [20]
    x0_values = [0.9]
    e_indices = [70]
    e_values = [0.9]
    disease_indices = x0_indices + e_indices
    n_disease = len(disease_indices)

    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)
    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
    hyp_x0_E = HypothesisBuilder(
        head.connectivity.number_of_regions).set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()

    # Now running the parameter search analysis:
    logger.info("running PSE LSA...")
    model_config, lsa_service, lsa_hypothesis, pse_res = pse_from_hypothesis(
        hyp_x0_E,
        head.connectivity.normalized_weights,
        head.connectivity.region_labels,
        n_samples,
        param_range=0.1,
        global_coupling=[{
            "indices": all_regions_indices
        }],
        healthy_regions_parameters=[{
            "name": "x0_values",
            "indices": healthy_indices
        }],
        save_services=True)[:4]

    logger.info("Plotting LSA...")
    Plotter(config).plot_lsa(lsa_hypothesis,
                             model_config,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             region_labels=head.connectivity.region_labels,
                             pse_results=pse_res,
                             lsa_service=lsa_service)

    logger.info("Saving LSA results ...")
    writer.write_dictionary(
        pse_res,
        os.path.join(config.out.FOLDER_RES,
                     lsa_hypothesis.name + "_PSE_LSA_results.h5"))
Esempio n. 2
0
def parse_csv(fname, merge=True):
    if '*' in fname:
        import glob
        return parse_csv(glob.glob(fname), merge=merge)
    if isinstance(fname, (list, tuple)):
        csv = [parse_csv(_) for _ in fname]
        if merge:
            csv = merge_csv_data(*csv)
        return csv

    lines = []
    with open(fname, 'r') as fd:
        for line in fd.readlines():
            if not line.startswith('#'):
                lines.append(line.strip().split(','))
    names = [field.split('.') for field in lines[0]]
    data = []
    for id_line, line in enumerate(lines[1:]):
        append_data = True
        for iline in range(len(line)):
            try:
                line[iline] = float(line[iline])
            except:
                logger = initialize_logger(__name__)
                logger.warn("Failed to convert string " + line[iline] +
                            " to float!" + "\nSkipping line " + str(id_line) +
                            ":  " + str(line) + "!")
                append_data = False
                break
        if append_data:
            data.append(line)
    data = np.array(data)

    namemap = {}
    maxdims = {}
    for i, name in enumerate(names):
        if name[0] not in namemap:
            namemap[name[0]] = []
        namemap[name[0]].append(i)
        if len(name) > 1:
            maxdims[name[0]] = name[1:]

    for name in maxdims.keys():
        dims = []
        for dim in maxdims[name]:
            dims.append(int(dim))
        maxdims[name] = tuple(reversed(dims))

    # data in linear order per Stan, e.g. mat is col maj
    # TODO array is row maj, how to distinguish matrix vs array[,]?
    data_ = {}
    for name, idx in namemap.items():
        new_shape = (-1, ) + maxdims.get(name, ())
        data_[name] = data[:, idx].reshape(new_shape)

    return data_
class Timeseries(TimeseriesBase):

    logger = initialize_logger(__name__)

    def get_source(self):
        if TimeseriesDimensions.VARIABLES.value not in self.dimension_labels.keys(
        ):
            self.logger.error(
                "No state variables are defined for this instance!")
            raise ValueError

        if PossibleVariables.SOURCE.value in self.dimension_labels[
                TimeseriesDimensions.VARIABLES.value]:
            return self.get_state_variable(PossibleVariables.SOURCE.value)
        if PossibleVariables.X1.value in self.dimension_labels[
                TimeseriesDimensions.VARIABLES.value]:
            y0_ts = self.get_state_variable(PossibleVariables.X1.value)
            if PossibleVariables.X2.value in self.dimension_labels[
                    TimeseriesDimensions.VARIABLES.value]:
                self.logger.info(
                    "%s is computed using %s and %s state variables!" %
                    (PossibleVariables.SOURCE.value,
                     PossibleVariables.X1.value, PossibleVariables.X2.value))
                y2_ts = self.get_state_variable(PossibleVariables.X2.value)
                source_data = y2_ts.data - y0_ts.data
            else:
                self.logger.warn("%s is computed using %s state variable!" %
                                 (PossibleVariables.SOURCE.value,
                                  PossibleVariables.X1.value))
                source_data = -y0_ts.data
            source_dim_labels = OrderedDict({
                TimeseriesDimensions.SPACE.value:
                self.dimension_labels[TimeseriesDimensions.SPACE.value],
                TimeseriesDimensions.VARIABLES.value:
                [PossibleVariables.SOURCE.value]
            })
            return Timeseries(source_data, source_dim_labels, self.time_start,
                              self.time_step, self.time_unit)
        self.logger.error(
            "%s is not computed and cannot be computed now because state variables %s and %s are not defined!"
            % (PossibleVariables.SOURCE.value, PossibleVariables.X1.value,
               PossibleVariables.X2.value))
        raise ValueError
Esempio n. 4
0
def read_edf(path,
             sensors,
             rois_selection=None,
             label_strip_fun=None,
             time_units="ms"):
    logger = initialize_logger(__name__)

    logger.info("Reading empirical dataset from mne file...")
    raw_data = read_raw_edf(path, preload=True)

    if not callable(label_strip_fun):
        label_strip_fun = lambda label: label

    rois = []
    rois_inds = []
    rois_lbls = []
    if len(rois_selection) == 0:
        rois_selection = sensors.labels

    logger.info("Selecting target signals from dataset...")
    for iR, s in enumerate(raw_data.ch_names):
        this_label = label_strip_fun(s)
        this_index = sensors.get_sensors_inds_by_sensors_labels(this_label)
        if this_label in rois_selection or (len(this_index) == 1 and
                                            this_index[0] in rois_selection):
            rois.append(iR)
            rois_inds.append(this_index[0])
            rois_lbls.append(this_label)

    data, times = raw_data[:, :]
    data = data[rois].T
    # Assuming that edf file time units is "sec"
    if ensure_string(time_units).find("ms") == 0:
        times = 1000 * times
    sort_inds = np.argsort(rois_lbls)
    rois = np.array(rois)[sort_inds]
    rois_inds = np.array(rois_inds)[sort_inds]
    rois_lbls = np.array(rois_lbls)[sort_inds]
    data = data[:, sort_inds]

    return data, times, rois, rois_inds, rois_lbls
class HeadService(object):
    logger = initialize_logger(__name__)

    def sensors_in_electrodes_disconnectivity(self, sensors, sensors_labels=[]):
        if len(sensors_labels) < 2:
            sensors_labels = sensors.labels
        n_sensors = len(sensors_labels)
        elec_labels, elec_inds = sensors.group_sensors_to_electrodes(sensors_labels)
        if len(elec_labels) >= 2:
            disconnectivity = np.ones((n_sensors, n_sensors))
            for ch in elec_inds:
                disconnectivity[np.meshgrid(ch, ch)] = 0.0
        return disconnectivity

    def vp2tvb_connectivity(self, vp_conn, model_connectivity=None, time_delay_flag=1):
        if model_connectivity is None:
            model_connectivity = vp_conn.normalized_weights
        return TVB_Connectivity(use_storage=False, weights=model_connectivity,
                                tract_lengths=time_delay_flag * vp_conn.tract_lengths,
                                region_labels=vp_conn.region_labels, centres=vp_conn.centres,
                                hemispheres=vp_conn.hemispheres, orientations=vp_conn.orientations, areas=vp_conn.areas)
def pse_from_hypothesis(n_samples,
                        hypothesis,
                        model_connectivity,
                        region_labels,
                        param_range=0.1,
                        global_coupling=[],
                        healthy_regions_parameters=[],
                        save_flag=False,
                        folder_res="",
                        filename=None,
                        config=Config(),
                        model_config_kwargs={},
                        **kwargs):
    if not os.path.isdir(folder_res):
        folder_res = config.out.FOLDER_RES
    logger = initialize_logger(__name__)
    logger.info("Running hypothesis: " + hypothesis.name)

    # Compute lsa for this hypothesis before the parameter search:
    model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis = \
        start_lsa_run(hypothesis, model_connectivity, config, **model_config_kwargs)
    pse_results, pse_params_list = pse_from_lsa_hypothesis(
        lsa_hypothesis,
        model_connectivity,
        model_configuration_builder,
        lsa_service,
        region_labels,
        n_samples,
        param_range,
        global_coupling,
        healthy_regions_parameters,
        save_flag,
        folder_res=folder_res,
        filename=filename,
        logger=logger,
        config=config,
        **kwargs)
    return model_configuration, lsa_service, lsa_hypothesis, pse_results, pse_params_list
def sensitivity_analysis_pse_from_hypothesis(n_samples,
                                             hypothesis,
                                             connectivity_matrix,
                                             region_labels,
                                             method="sobol",
                                             half_range=0.1,
                                             global_coupling=[],
                                             healthy_regions_parameters=[],
                                             save_services=False,
                                             config=Config(),
                                             model_config_kwargs={},
                                             **kwargs):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # Compute lsa for this hypothesis before sensitivity analysis:
    logger.info("Running hypothesis: " + hypothesis.name)
    model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis = \
        start_lsa_run(hypothesis, connectivity_matrix, config, **model_config_kwargs)
    results, pse_results = sensitivity_analysis_pse_from_lsa_hypothesis(
        n_samples, lsa_hypothesis, connectivity_matrix,
        model_configuration_builder, lsa_service, region_labels, method,
        half_range, global_coupling, healthy_regions_parameters, save_services,
        config, **kwargs)
    return model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis, results, pse_results
class PSEService(ABCPSEService):

    logger = initialize_logger(__name__)

    def update_model_config(self,
                            params,
                            conn_matrix=None,
                            model_config_builder_input=None,
                            hypothesis=None,
                            x1eq_mode="optimize"):
        # Create a ModelConfigService and update it
        if isinstance(model_config_builder_input, ModelConfigurationBuilder):
            model_configuration_builder = deepcopy(model_config_builder_input)
            if isinstance(conn_matrix, np.ndarray):
                model_configuration_builder.connectivity = conn_matrix
        else:
            model_configuration_builder = ModelConfigurationBuilder(
                connectivity=conn_matrix, x1eq_mode=x1eq_mode)
        model_configuration_builder.set_attributes_from_pse(
            params, self.params_paths, self.params_indices)
        # Copy and update hypothesis
        if isinstance(hypothesis, DiseaseHypothesis):
            hypo_copy = deepcopy(hypothesis)
            hypo_copy.update_for_pse(params, self.params_paths,
                                     self.params_indices)
        else:
            hypo_copy = DiseaseHypothesis(
                model_configuration_builder.number_of_regions)
        # Obtain ModelConfiguration
        if hypothesis.type == "Epileptogenicity":
            model_configuration = model_configuration_builder.build_model_from_E_hypothesis(
                hypo_copy)
        else:
            model_configuration = model_configuration_builder.build_model_from_hypothesis(
                hypo_copy)
        return model_configuration, hypo_copy
Esempio n. 9
0
 def __init__(self, config=None):
     self.config = config or Config()
     self.logger = initialize_logger(self.__class__.__name__,
                                     self.config.out.FOLDER_LOGS)
     self.print_regions_indices = True
Esempio n. 10
0
def main_vep(config=Config(),
             ep_name=EP_NAME,
             K_unscaled=K_UNSCALED_DEF,
             ep_indices=[],
             hyp_norm=0.99,
             manual_hypos=[],
             sim_type="paper",
             pse_flag=PSE_FLAG,
             sa_pse_flag=SA_PSE_FLAG,
             sim_flag=SIM_FLAG,
             n_samples=100,
             test_write_read=False):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    plotter = Plotter(config)
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    plotter.plot_head(head)
    if test_write_read:
        writer.write_head(head, os.path.join(config.out.FOLDER_RES, "Head"))
    # --------------------------Hypothesis definition-----------------------------------

    hypotheses = []
    # Reading a h5 file:

    if len(ep_name) > 0:
        # For an Excitability Hypothesis you leave e_indices empty
        # For a Mixed Hypothesis: you give as e_indices some indices for values > 0
        # For an Epileptogenicity Hypothesis: you give as e_indices all indices for values > 0
        hyp_file = HypothesisBuilder(head.connectivity.number_of_regions, config=config).set_normalize(hyp_norm). \
            build_hypothesis_from_file(ep_name, e_indices=ep_indices)
        hyp_file.name += ep_name
        # print(hyp_file.string_regions_disease(head.connectivity.region_labels))
        hypotheses.append(hyp_file)

    hypotheses += manual_hypos

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("\n\nRunning hypothesis: " + hyp.name)

        all_regions_indices = np.array(range(head.number_of_regions))
        healthy_indices = np.delete(all_regions_indices,
                                    hyp.regions_disease_indices).tolist()

        logger.info("\n\nCreating model configuration...")
        model_config_builder = ModelConfigurationBuilder("EpileptorDP2D", head.connectivity, K_unscaled=K_unscaled). \
                                    set_parameter("tau1", TAU1_DEF).set_parameter("tau0", TAU0_DEF)
        mcs_file = os.path.join(config.out.FOLDER_RES,
                                hyp.name + "_model_config_builder.h5")
        writer.write_model_configuration_builder(model_config_builder,
                                                 mcs_file)
        if test_write_read:
            logger.info(
                "Written and read model configuration builders are identical?: "
                + str(
                    assert_equal_objects(
                        model_config_builder,
                        reader.read_model_configuration_builder(mcs_file),
                        logger=logger)))
        # Fix healthy regions to default equilibria:
        # model_configuration = \
        #        model_config_builder.build_model_from_E_hypothesis(hyp)
        # Fix healthy regions to default x0s:
        model_configuration = model_config_builder.build_model_from_hypothesis(
            hyp)
        mc_path = os.path.join(config.out.FOLDER_RES,
                               hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, mc_path)
        if test_write_read:
            logger.info(
                "Written and read model configuration are identical?: " + str(
                    assert_equal_objects(model_configuration,
                                         reader.read_model_configuration(
                                             mc_path),
                                         logger=logger)))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 head.connectivity.region_labels,
                                 special_idx=hyp.regions_disease_indices,
                                 figure_name=hyp.name + "_StateSpace")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=1)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_path = os.path.join(config.out.FOLDER_RES,
                                lsa_hypothesis.name + "_LSA.h5")
        lsa_config_path = os.path.join(config.out.FOLDER_RES,
                                       lsa_hypothesis.name + "_LSAConfig.h5")
        writer.write_hypothesis(lsa_hypothesis, lsa_path)
        writer.write_lsa_service(lsa_service, lsa_config_path)
        if test_write_read:
            logger.info("Written and read LSA services are identical?: " + str(
                assert_equal_objects(lsa_service,
                                     reader.read_lsa_service(lsa_config_path),
                                     logger=logger)))
            logger.info(
                "Written and read LSA hypotheses are identical (no input check)?: "
                + str(
                    assert_equal_objects(lsa_hypothesis,
                                         reader.read_hypothesis(lsa_path),
                                         logger=logger)))
        plotter.plot_lsa(lsa_hypothesis,
                         model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels,
                         None,
                         lsa_service=lsa_service)

        if pse_flag:
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                n_samples,
                lsa_hypothesis,
                model_configuration.connectivity,
                model_config_builder,
                lsa_service,
                head.connectivity.region_labels,
                param_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                logger=logger,
                save_flag=True)[0]
            plotter.plot_lsa(lsa_hypothesis, model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels, pse_results)

            pse_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_PSE_LSA_results.h5")
            writer.write_dictionary(pse_results, pse_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_results,
                                             reader.read_dictionary(
                                                 pse_lsa_path),
                                             logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(n_samples, lsa_hypothesis,
                                                             model_configuration.connectivity,
                                                             model_config_builder, lsa_service,
                                                             head.connectivity.region_labels,
                                                             method="sobol", param_range=0.1,
                                                             global_coupling=[{"indices": all_regions_indices,
                                                                               "bounds": [0.0, 2 *
                                                                                          model_config_builder.K_unscaled[
                                                                                              0]]}],
                                                             healthy_regions_parameters=[
                                                                 {"name": "x0_values", "indices": healthy_indices}],
                                                             config=config)
            plotter.plot_lsa(lsa_hypothesis,
                             model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels,
                             pse_sa_results,
                             title="SA PSE Hypothesis Overview")

            sa_pse_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            sa_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_LSA_results.h5")
            writer.write_dictionary(pse_sa_results, sa_pse_path)
            writer.write_dictionary(sa_results, sa_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis results are identical?: "
                    + str(
                        assert_equal_objects(sa_results,
                                             reader.read_dictionary(
                                                 sa_lsa_path),
                                             logger=logger)))
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_sa_results,
                                             reader.read_dictionary(
                                                 sa_pse_path),
                                             logger=logger)))

        if sim_flag:
            # --------------------------Simulation preparations-----------------------------------
            # If you choose model...
            # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
            # EpileptorDP: similar to the TVB Epileptor + optional variations,
            # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
            # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
            #      -x0, Iext1, Iext2, slope and K become noisy state variables,
            #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
            #      -correlated noise is also used
            # We don't want any time delays for the moment
            head.connectivity.tract_lengths *= config.simulator.USE_TIME_DELAYS_FLAG
            sim_types = ensure_list(sim_type)
            for sim_type in sim_types:
                # ------------------------------Simulation--------------------------------------
                logger.info(
                    "\n\nConfiguring %s simulation from model_configuration..."
                    % sim_type)
                if isequal_string(sim_type, "realistic"):
                    sim_builder = SimulatorBuilder(model_configuration).set_model("EpileptorDPrealistic"). \
                        set_fs(2048.0).set_simulation_length(60000.0)
                    sim_builder.model_config.tau0 = 60000.0
                    sim_builder.model_config.tau1 = 0.2
                    sim_builder.model_config.slope = 0.25
                    sim_builder.model_config.pmode = np.array([PMODE_DEF])
                    sim_settings = sim_builder.build_sim_settings()
                    sim_settings.noise_type = COLORED_NOISE
                    sim_settings.noise_ntau = 20
                    # Necessary a more stable integrator:
                    sim_settings.integrator_type = "Dop853Stochastic"
                elif isequal_string(sim_type, "fitting"):
                    sim_builder = SimulatorBuilder(model_configuration).set_model("EpileptorDP2D"). \
                        set_fs(2048.0).set_fs_monitor(2048.0).set_simulation_length(300.0)
                    sim_builder.model_config.tau0 = 30.0
                    sim_builder.model_config.tau1 = 0.5
                    sim_settings = sim_builder.build_sim_settings()
                    sim_settings.noise_intensity = np.array([0.0, 1e-5])
                elif isequal_string(sim_type, "reduced"):
                    sim_builder = \
                        SimulatorBuilder(model_configuration).set_model("EpileptorDP2D").set_fs(
                            4096.0).set_simulation_length(1000.0)
                    sim_settings = sim_builder.build_sim_settings()
                elif isequal_string(sim_type, "paper"):
                    sim_builder = SimulatorBuilder(
                        model_configuration).set_model("Epileptor")
                    sim_settings = sim_builder.build_sim_settings()
                else:
                    sim_builder = SimulatorBuilder(
                        model_configuration).set_model("EpileptorDP")
                    sim_settings = sim_builder.build_sim_settings()

                # Integrator and initial conditions initialization.
                # By default initial condition is set right on the equilibrium point.
                sim, sim_settings = \
                    sim_builder.build_simulator_TVB_from_model_sim_settings(head.connectivity, sim_settings)
                sim_path = os.path.join(
                    config.out.FOLDER_RES,
                    lsa_hypothesis.name + "_" + sim_type + "_sim_settings.h5")
                model_path = os.path.join(
                    config.out.FOLDER_RES,
                    lsa_hypothesis.name + sim_type + "_model.h5")
                writer.write_simulation_settings(sim.settings, sim_path)
                writer.write_simulator_model(
                    sim.model, model_path, sim.connectivity.number_of_regions)
                if test_write_read:
                    # TODO: find out why it cannot set monitor expressions
                    logger.info(
                        "Written and read simulation settings are identical?: "
                        + str(
                            assert_equal_objects(
                                sim.settings,
                                reader.read_simulation_settings(sim_path),
                                logger=logger)))
                    # logger.info("Written and read simulation model are identical?: " +
                    #             str(assert_equal_objects(sim.model,
                    #                                      reader.read_epileptor_model(model_path), logger=logger)))

                logger.info("\n\nSimulating %s..." % sim_type)
                sim_output, status = sim.launch_simulation(
                    report_every_n_monitor_steps=100, timeseries=Timeseries)
                if not status:
                    logger.warning("\nSimulation failed!")
                else:
                    time = np.array(sim_output.time).astype("f")
                    logger.info("\n\nSimulated signal return shape: %s",
                                sim_output.shape)
                    logger.info("Time: %s - %s", time[0], time[-1])
                    logger.info("Values: %s - %s", sim_output.data.min(),
                                sim_output.data.max())
                    if not status:
                        logger.warning("\nSimulation failed!")
                    else:
                        sim_output, seeg = compute_seeg_and_write_ts_to_h5(
                            sim_output,
                            sim.model,
                            head.sensorsSEEG,
                            os.path.join(config.out.FOLDER_RES,
                                         sim_type + "_ts.h5"),
                            seeg_gain_mode="lin",
                            hpf_flag=True,
                            hpf_low=10.0,
                            hpf_high=512.0)

                        # Plot results
                        plotter.plot_simulated_timeseries(
                            sim_output,
                            sim.model,
                            lsa_hypothesis.lsa_propagation_indices,
                            seeg_dict=seeg,
                            spectral_raster_plot=False,
                            title_prefix=hyp.name,
                            spectral_options={"log_scale": True})
Esempio n. 11
0
class SimulatorTVB(ABCSimulator):
    """
    This class is used as a Wrapper over the TVB Simulator.
    It keeps attributes needed in order to create and configure a TVB Simulator object.
    """
    logger = initialize_logger(__name__)

    simTVB = None

    def __init__(self, model_configuration, connectivity, settings):
        super(SimulatorTVB, self).__init__(model_configuration, connectivity,
                                           settings)
        self.simTVB = None

    def _vp2tvb_connectivity(self, time_delays_flag=True):
        return HeadService().vp2tvb_connectivity(
            self.connectivity, self.model_configuration.connectivity,
            time_delays_flag)

    def get_vois(self, model_vois=None):
        if model_vois is None:
            model_vois = self.simTVB.model.variables_of_interest
        return self.settings.monitor_expressions(model_vois)

    @property
    def model(self):
        return self.simTVB.model

    # General choices are made here to be used as an example.
    def config_simulation(self, model):
        # TODO: generate model from self.model_configuration for every specific implementation
        tvb_connectivity = self._vp2tvb_connectivity(TIME_DELAYS_FLAG)

        tvb_coupling = coupling.Difference(a=1.0)

        noise_instance = noise.Additive(nsig=self.settings.noise_intensity,
                                        random_stream=numpy.random.RandomState(
                                            seed=self.settings.noise_seed))

        integrator = getattr(integrators, self.settings.integrator_type) \
                                (dt=self.settings.integration_step, noise=noise_instance)

        monitor = monitors.TemporalAverage()
        monitor.period = self.settings.monitor_sampling_period

        self.simTVB = simulator.Simulator(
            model=model,
            connectivity=tvb_connectivity,
            coupling=tvb_coupling,
            integrator=integrator,
            monitors=[monitor],
            simulation_length=self.settings.simulation_length)
        self.simTVB.configure()

        self.configure_initial_conditions()

    def config_simulation_from_tvb_simulator(self, tvb_simulator):
        # Ignore simulation settings and use the input tvb_simulator
        self.simTVB = deepcopy(tvb_simulator)
        self.simTVB.model = tvb_simulator.model  # TODO: compare this with self.model_configuration
        self.simTVB.connectivity = self._vp2tvb_connectivity(TIME_DELAYS_FLAG)
        self.simTVB.configure()
        self.configure_initial_conditions()

    def launch_simulation(self,
                          report_every_n_monitor_steps=None,
                          timeseries=Timeseries):
        if report_every_n_monitor_steps >= 1:
            time_length_avg = numpy.round(self.simTVB.simulation_length /
                                          self.simTVB.monitors[0].period)
            n_report_blocks = max(
                report_every_n_monitor_steps *
                numpy.round(time_length_avg / 100), 1.0)
        else:
            n_report_blocks = 1

        self.simTVB._configure_history(
            initial_conditions=self.simTVB.initial_conditions)

        status = True
        if n_report_blocks < 2:
            try:
                tavg_time, tavg_data = self.simTVB.run()[0]

            except Exception as error_message:
                status = False
                self.logger.warning(
                    "Something went wrong with this simulation...:" + "\n" +
                    str(error_message))
                return None, status

        else:

            sim_length = self.simTVB.simulation_length / self.simTVB.monitors[
                0].period
            block_length = sim_length / n_report_blocks
            curr_time_step = 0.0
            curr_block = 1.0

            # Perform the simulation
            tavg_data, tavg_time = [], []

            start = time.time()

            try:
                for tavg in self.simTVB():

                    curr_time_step += 1.0

                    if not tavg is None:
                        tavg_time.append(tavg[0][0])
                        tavg_data.append(tavg[0][1])

                    if curr_time_step >= curr_block * block_length:
                        end_block = time.time()
                        # TODO: correct this part to print percentage of simulation at the same line by erasing previous
                        print_this = "\r" + "..." + str(100 * curr_time_step / sim_length) + "% done in " + \
                                     str(end_block - start) + " secs"
                        sys.stdout.write(print_this)
                        sys.stdout.flush()
                        curr_block += 1.0
            except Exception, error_message:
                status = False
                self.logger.warning(
                    "Something went wrong with this simulation...:" + "\n" +
                    str(error_message))
                return None, status

        tavg_time = numpy.array(tavg_time).flatten().astype('f')
        tavg_data = numpy.swapaxes(tavg_data, 1, 2).astype('f')
        # Variables of interest in a dictionary:
        sim_output = timeseries(
            tavg_data, {
                TimeseriesDimensions.SPACE.value:
                self.connectivity.region_labels,
                TimeseriesDimensions.VARIABLES.value: self.get_vois()
            }, tavg_time[0],
            numpy.diff(tavg_time).mean(), "ms")
        return sim_output, status
def main_sampling_service(config=Config()):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    n_samples = 100
    logger.info("\nDeterministic numpy.linspace sampling:")
    sampler = DeterministicSampler(n_samples=n_samples, grid_mode=True)
    samples, stats = sampler.generate_samples(low=1.0,
                                              high=2.0,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.items():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())

    logger.info("\nStochastic uniform sampling with numpy:")
    sampler = ProbabilisticSampler(n_samples=n_samples,
                                   sampling_module="numpy")
    #                                      a (low), b (high)
    samples, stats = sampler.generate_samples(
        parameter=(1.0, 2.0),
        probability_distribution=ProbabilityDistributionTypes.UNIFORM,
        shape=(2, ),
        stats=True)
    for key, value in stats.items():
        logger.info("\n" + key + ": " + str(value))

    logger.info(sampler.__repr__())

    logger.info("\nStochastic truncated normal sampling with scipy:")
    sampler = ProbabilisticSampler(n_samples=n_samples)
    #                                   loc (mean), scale (sigma)
    samples, stats = sampler.generate_samples(parameter=(1.5, 1.0),
                                              probability_distribution="norm",
                                              low=1,
                                              high=2,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.items():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())

    logger.info("\nSensitivity analysis sampling:")
    sampler = SalibSamplerInterface(n_samples=n_samples, sampler="latin")
    samples, stats = sampler.generate_samples(low=1,
                                              high=2,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.items():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())

    logger.info("\nTesting distribution class and conversions...")
    sampler = ProbabilisticSampler(n_samples=n_samples)
    for distrib_name in ProbabilityDistributionTypes.available_distributions:
        logger.info("\n" + distrib_name)
        logger.info("\nmode/mean, std to distribution " + distrib_name + ":")
        if np.in1d(distrib_name, [
                ProbabilityDistributionTypes.EXPONENTIAL,
                ProbabilityDistributionTypes.CHISQUARE
        ]):
            target_stats = {"mean": 1.0}
            stats_m = "mean"
        elif np.in1d(distrib_name, [
                ProbabilityDistributionTypes.BERNOULLI,
                ProbabilityDistributionTypes.POISSON
        ]):
            target_stats = {"mean": np.ones((2, ))}
            stats_m = "mean"
        elif isequal_string(distrib_name,
                            ProbabilityDistributionTypes.BINOMIAL):
            target_stats = {"mean": 1.0, "std": 2.0}
            stats_m = "mean"
        else:
            if isequal_string(distrib_name,
                              ProbabilityDistributionTypes.UNIFORM):
                target_stats = {"mean": 1.0, "std": 2.0}
                stats_m = "mean"
            else:
                target_stats = {"mean": 1.0, "std": 2.0}
                stats_m = "mean"
        parameter1 = generate_probabilistic_parameter(
            name="test1_" + distrib_name,
            low=0.0,
            high=2.0,
            p_shape=(2, 2),
            probability_distribution=distrib_name,
            optimize_pdf=True,
            use="manual",
            **target_stats)
        name2 = "test2_" + distrib_name
        defaults = set_parameter_defaults(name2,
                                          _pdf=distrib_name,
                                          _shape=(2, 2),
                                          _lo=0.0,
                                          _hi=2.0,
                                          **(deepcopy(target_stats)))
        parameter2 = set_parameter(name=name2, use="manual", **defaults)
        for parameter in (parameter1, parameter2):
            logger.info(str(parameter))
            samples = sampler.generate_samples(parameter=parameter, stats=True)
            for key, value in stats.items():
                logger.info("\n" + key + ": " + str(value))
            diff = target_stats[stats_m] - stats[stats_m]
            if np.any(np.abs(diff.flatten()) > 0.001):
                logger.warning(
                    "Large difference between target and resulting samples' " +
                    stats_m + "!: " + str(diff))
            del parameter
    def test_computations(self):
        logger = initialize_logger(__name__, self.config.out.FOLDER_LOGS)

        # ------------------------------------------------------------------------------------------------------------------
        x1 = numpy.array([-4.1 / 3, -4.9 / 3, -5.0 / 3], dtype="float32")
        w = numpy.array([[0, 0.1, 0.9], [0.1, 0, 0.0], [0.9, 0.0, 0]])
        n = x1.size
        i1 = numpy.ones(x1.shape, dtype=x1.dtype)
        K = 0.0 * K_UNSCALED_DEF * i1
        yc = YC_DEF * i1
        Iext1 = I_EXT1_DEF * i1
        slope = SLOPE_DEF * i1
        Iext2 = I_EXT2_DEF * i1
        a = A_DEF * i1
        b = B_DEF * i1
        d = D_DEF * i1
        s = S_DEF * i1
        gamma = GAMMA_DEF * i1
        tau1 = TAU1_DEF * i1
        tau2 = TAU2_DEF * i1
        tau0 = TAU0_DEF * i1
        x1, K = assert_arrays([x1, K])
        w = assert_arrays([w])  # , (x1.size, x1.size)
        zmode = numpy.array([ZMODE_DEF]) * i1
        pmode = numpy.array([0]) * i1
        model = "EpileptorDPrealistic"
        x1eq = x1

        z = calc_eq_z(x1,
                      yc,
                      Iext1,
                      "2d",
                      x2=0.0,
                      slope=slope,
                      a=a,
                      b=b,
                      d=d,
                      x1_neg=True)
        zeq = z

        x0cr, r = calc_x0cr_r(yc,
                              Iext1,
                              zmode=zmode,
                              x1_rest=X1_DEF,
                              x1_cr=X1EQ_CR_DEF,
                              x0def=X0_DEF,
                              x0cr_def=X0_CR_DEF)

        x0 = calc_x0(x1, z, K, w, zmode=zmode, z_pos=True)

        calc_model_x0_to_x0_val(x0,
                                yc,
                                Iext1,
                                a,
                                b,
                                d,
                                zmode=numpy.array([ZMODE_DEF]))

        if model == "EpileptorDP2D":
            eq = numpy.c_[x1eq, zeq].T.astype('float32')
            model_vars = 2
            dfun = calc_dfun(eq[0].T,
                             eq[1].T,
                             yc,
                             Iext1,
                             x0,
                             K,
                             w,
                             model_vars,
                             zmode=zmode,
                             pmode=pmode,
                             x0_var=x0,
                             slope_var=slope,
                             Iext1_var=Iext1,
                             Iext2_var=Iext2,
                             K_var=K,
                             slope=slope,
                             a=a,
                             b=b,
                             d=d,
                             s=s,
                             Iext2=Iext2,
                             gamma=gamma,
                             tau1=tau1,
                             tau0=tau0,
                             tau2=tau2,
                             output_mode="array")

            jac = calc_jac(eq[0].T,
                           eq[1].T,
                           yc,
                           Iext1,
                           x0,
                           K,
                           w,
                           model_vars,
                           zmode=zmode,
                           pmode=pmode,
                           x1_neg=True,
                           z_pos=True,
                           x2_neg=False,
                           x0_var=x0,
                           slope_var=slope,
                           Iext1_var=Iext1,
                           Iext2_var=Iext2,
                           K_var=K,
                           slope=slope,
                           a=a,
                           b=b,
                           d=d,
                           s=s,
                           Iext2=Iext2,
                           gamma=gamma,
                           tau1=tau1,
                           tau0=tau0,
                           tau2=tau2)
        else:
            if model == "EpileptorDPrealistic":
                # the 11D "realistic" simulations model
                eq, slope_eq, Iext2_eq = calc_eq_11d(
                    x0,
                    K,
                    w,
                    yc,
                    Iext1,
                    Iext2,
                    slope,
                    EpileptorDPrealistic.fun_slope_Iext2,
                    x1,
                    a=a,
                    b=b,
                    d=d,
                    zmode=zmode,
                    pmode=pmode)
                model_vars = 11
                dfun = calc_dfun(eq[0].T,
                                 eq[2].T,
                                 yc,
                                 Iext1,
                                 x0,
                                 K,
                                 w,
                                 model_vars,
                                 zmode,
                                 pmode,
                                 y1=eq[1].T,
                                 x2=eq[3].T,
                                 y2=eq[4].T,
                                 g=eq[5].T,
                                 x0_var=eq[6].T,
                                 slope_var=eq[7].T,
                                 Iext1_var=eq[8].T,
                                 Iext2_var=eq[9].T,
                                 K_var=eq[10].T,
                                 slope=slope,
                                 a=a,
                                 b=b,
                                 d=d,
                                 s=s,
                                 Iext2=Iext2,
                                 gamma=gamma,
                                 tau1=tau1,
                                 tau0=tau0,
                                 tau2=tau2,
                                 output_mode="array")
                # jac = calc_jac(eq[0].T, eq[2].T, yc, Iext1, x0, K, w, model_vars, zmode, pmode,
                #                x1_neg=True, z_pos=True, x2_neg=False, y1=eq[1].T, x2=eq[3].T, y2=eq[4].T, g=eq[5].T,
                #                x0_var=eq[6].T, slope_var=eq[7].T, Iext1_var=eq[8].T, Iext2_var=eq[9].T, K_var=eq[10].T,
                #                slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0,
                #                tau2=tau2)
            else:
                # all >=6D models
                eq = calc_eq_6d(x0,
                                K,
                                w,
                                yc,
                                Iext1,
                                Iext2,
                                x1,
                                a=a,
                                b=b,
                                d=d,
                                zmode=zmode)
                model_vars = 6
                dfun = calc_dfun(eq[0].T,
                                 eq[2].T,
                                 yc,
                                 Iext1,
                                 x0,
                                 K,
                                 w,
                                 model_vars,
                                 zmode,
                                 y1=eq[1].T,
                                 x2=eq[3].T,
                                 y2=eq[4].T,
                                 g=eq[5].T,
                                 slope=slope,
                                 a=a,
                                 b=b,
                                 d=d,
                                 s=s,
                                 Iext2=Iext2,
                                 gamma=gamma,
                                 tau1=tau1,
                                 tau0=tau0,
                                 tau2=tau2,
                                 output_mode="array")
                jac = calc_jac(eq[0].T,
                               eq[2].T,
                               yc,
                               Iext1,
                               r,
                               K,
                               w,
                               model_vars,
                               zmode,
                               x1_neg=True,
                               z_pos=True,
                               x2_neg=False,
                               y1=eq[1].T,
                               x2=eq[3].T,
                               y2=eq[4].T,
                               g=eq[5].T,
                               slope=slope,
                               a=a,
                               b=b,
                               d=d,
                               s=s,
                               Iext2=Iext2,
                               gamma=gamma,
                               tau1=tau1,
                               tau0=tau0,
                               tau2=tau2)

        model = str(model_vars) + "d"
        sx1, sy1, sz, sx2, sy2, sg, sx0, sx0_val, sK, syc, sIext1, sIext2, sslope, sa, sb, sd, stau1, stau0, stau2, v = \
            symbol_vars(n, ["x1", "y1", "z", "x2", "y2", "g", "x0", "x0_val", "K", "yc", "Iext1", "Iext2",
                            "slope", "a", "b", "d", "tau1", "tau0", "tau2"], shape=(3,))
        sw, vw = symbol_vars(n, ["w"], dims=2, output_flag="numpy_array")

        v.update(vw)
        del vw
        numpy.fill_diagonal(sw, 0.0)
        sw = numpy.array(sw)
        a = numpy.ones((n, ))
        b = 3.0 * a
        d = 5.0 * a
        s = 6.0 * a
        tau1 = a
        tau0 = a
        tau2 = a
        x1sq = -4.0 / 3 * a
        if model == "2d":
            y1 = yc
        else:
            y1 = eq[1].T
            x2 = eq[3].T
            y2 = eq[4].T
            g = eq[5].T
            if model == "11d":
                x0_var = eq[6].T
                slope_var = eq[7].T
                Iext1_var = eq[8].T
                Iext2_var = eq[9].T
                K_var = eq[10].T

        # -------------------------------------------- Test symbolic x0cr, r calculation ----------------------------------

        logger.info("\n\nTest symbolic x0cr, r calculation...")
        x0cr2, r2 = calc_x0cr_r(syc,
                                sIext1,
                                zmode=zmode,
                                x1_rest=X1_DEF,
                                x1_cr=X1EQ_CR_DEF,
                                x0def=X0_DEF,
                                x0cr_def=X0_CR_DEF)  # test=True

        lx0cr_r, sx0cr_r, v = symbol_eqtn_x0cr_r(
            n, zmode=zmode,
            shape=(n, ))  # symbol_calc_x0cr_r(n, zmode=zmode, shape=(3, ))
        sx0cr_r = list(sx0cr_r)

        for ii in range(2):
            sx0cr_r[ii] = Matrix(sx0cr_r[ii])
            for iv in range(n):
                sx0cr_r[ii][iv] = sx0cr_r[ii][iv].subs([
                    (v["a"][iv], a[iv]), (v["b"][iv], b[iv]),
                    (v["d"][iv], d[iv]), (v["x1_rest"][iv], X1_DEF),
                    (v["x0_rest"][iv], X0_DEF), (v["x1_cr"][iv], X1EQ_CR_DEF),
                    (v["x0_cr"][iv], X0_CR_DEF)
                ])

        assert list(x0cr2) == list(sx0cr_r[0])
        assert list(r2) == list(sx0cr_r[1])

        # -------------------------------------------- Test coupling ------------------------------------------------------
        coupling = calc_coupling(sx1, sK, sw)
        scoupling = symbol_eqtn_coupling(n, shape=(n, ))[:2]

        assert list(coupling) == list(scoupling[1])
        assert list(calc_coupling(x1, K, w)) == list(scoupling[0](x1, K, w))
        assert coupling.shape == scoupling[1].shape

        # ---------------------------------------- Test coupling derivative to x1 ------------------------------------------
        coupling_diff = calc_coupling_diff(sK, sw)
        scoupling_diff = symbol_calc_coupling_diff(n, ix=None, jx=None,
                                                   K="K")[:2]
        assert coupling_diff.shape == scoupling_diff[1].shape

        # ------------------------------------- Test the fz with substitution of z via fx1 ----------------------------------
        fx1z = calc_fx1z(sx1,
                         sx0,
                         sK,
                         sw,
                         syc,
                         sIext1,
                         sa,
                         sb,
                         sd,
                         stau1,
                         stau0,
                         zmode=zmode)
        sfx1z = symbol_eqtn_fx1z(n, model, zmode, shape=(n, ))[:2]
        # if model == "2d":
        #     fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode)
        #     s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0)
        #     assert list(fx1z) == list(s_fx1z)
        # else:
        #     fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode)
        #     s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0)
        #     assert list(fx1z) == list(s_fx1z)

        # ------------------------------- Test the derivative to x1 of fz with substitution of z via fx1 ---------------------
        fx1z_diff = calc_fx1z_diff(sx1,
                                   sK,
                                   sw,
                                   sa,
                                   sb,
                                   sd,
                                   stau1,
                                   stau0,
                                   model=model,
                                   zmode=zmode)
        sfx1z_diff = symbol_eqtn_fx1z_diff(n, model, zmode)[:2]
        # for ii in range(n):
        #     assert list(fx1z_diff[ii]) == list(sfx1z_diff[1][ii, :])

        # -------------------------------- Test symbolic fx2 with substitution of y2 via fy2 ----------------------------------
        if model != "2d":
            sfx2y2 = symbol_eqtn_fx2y2(n, x2_neg=False, shape=(n, ))[:2]

        # ----------------------------------------------- Test calc_fx1_2d_taylor ---------------------------------------------
        x_taylor = symbol_vars(n, ["x1lin"],
                               shape=(n, ))[0]  # x_taylor = -4.5/3 (=x1lin)
        fx1lin = calc_fx1_2d_taylor(sx1,
                                    x_taylor,
                                    sz,
                                    syc,
                                    sIext1,
                                    sslope,
                                    sa,
                                    sb,
                                    stau1,
                                    x1_neg=True,
                                    order=2,
                                    shape=(n, ))
        sfx1lin = symbol_calc_2d_taylor(n,
                                        "x1lin",
                                        order=2,
                                        x1_neg=True,
                                        slope="slope",
                                        Iext1="Iext1",
                                        shape=(n, ))[:2]

        # for ii in range(3):
        #     assert numpy.array(fx1lin[ii].expand(sx1[ii]).collect(sx1[ii])) == numpy.array(
        #         sfx1lin[1][ii].expand(sx1[ii]).collect(sx1[ii]))
        calc_fx1_2d_taylor(x1,
                           -1.5,
                           z,
                           yc,
                           Iext1,
                           slope,
                           a=a,
                           b=b,
                           d=d,
                           tau1=tau1,
                           x1_neg=True,
                           order=2,
                           shape=(n, ))

        # ----------------------------------------- Test calc_fx1y1_6d_diff_x1 -------------------------------------------------
        fx1y1_6d_diff_x1 = calc_fx1y1_6d_diff_x1(sx1, syc, sIext1, sa, sb, sd,
                                                 stau1, stau0)
        sfx1y1_6d_diff_x1 = symbol_calc_fx1y1_6d_diff_x1(n, shape=(n, ))[:2]

        # for ii in range(n):
        #     assert fx1y1_6d_diff_x1[ii].expand(sx1[ii]).collect(sx1[ii]) == sfx1y1_6d_diff_x1[1][ii].expand(sx1[ii]).collect(sx1[ii])

        # ------------------------------- Test eq_x1_hypo_x0_optimize_fun & eq_x1_hypo_x0_optimize_jac --------------------------
        ix0 = numpy.array([1, 2])
        iE = numpy.array([0])
        x = numpy.empty_like(sx1).flatten()
        x[ix0] = sx1[ix0]
        x[iE] = sx0[iE]
        eq_x1_hypo_x0_optimize(ix0,
                               iE,
                               x1eq,
                               zeq,
                               x0[ix0],
                               K,
                               w,
                               yc,
                               Iext1,
                               a=A_DEF,
                               b=B_DEF,
                               d=D_DEF,
                               slope=SLOPE_DEF)
        eq_x1_hypo_x0_optimize_fun(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0],
                                   sK, sw, syc, sIext1)
        eq_x1_hypo_x0_optimize_jac(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0],
                                   sK, sw, sy1, sIext1)
        eq_x1_hypo_x0_optimize(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1)
        eq_x1_hypo_x0_linTaylor(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1)

        # ------------------------------------------ Test calc_fz_jac_square_taylor ----------------------------------------------
        calc_fz_jac_square_taylor(numpy.array(sz),
                                  syc,
                                  sIext1,
                                  sK,
                                  sw,
                                  tau1=tau1,
                                  tau0=tau0)
        lfz_jac_square_taylor, sfz_jac_square_taylor, v = symbol_calc_fz_jac_square_taylor(
            n)
        sfz_jac_square_taylor = Matrix(sfz_jac_square_taylor).reshape(n, n)
        for iv in range(n):
            for jv in range(n):
                sfz_jac_square_taylor[iv,
                                      jv] = sfz_jac_square_taylor[iv, jv].subs(
                                          [(v["x_taylor"][jv], x1sq[jv]),
                                           (v["a"][jv], a[jv]),
                                           (v["b"][jv], b[jv]),
                                           (v["d"][jv], d[jv]),
                                           (v["tau1"][iv], tau1[iv]),
                                           (v["tau0"][iv], tau2[iv])])

        assert list(
            calc_fz_jac_square_taylor(
                z, yc, Iext1, K, w, tau1=tau1, tau0=tau0)[0]) == list(
                    lfz_jac_square_taylor(zeq, yc, Iext1, K, w, a, b, d, tau1,
                                          tau0, x1sq)[0])
Esempio n. 14
0
class TVBReader(object):
    logger = initialize_logger(__name__)

    def read_connectivity(self, path):
        tvb_conn = connectivity.Connectivity.from_file(path)
        return Connectivity(path, tvb_conn.weights, tvb_conn.tract_lengths,
                            tvb_conn.region_labels, tvb_conn.centres,
                            tvb_conn.hemispheres, tvb_conn.orientations,
                            tvb_conn.areas)

    def read_cortical_surface(self, path):
        if os.path.isfile(path):
            tvb_srf = surfaces.CorticalSurface.from_file(path)
            return Surface(tvb_srf.vertices, tvb_srf.triangles,
                           tvb_srf.vertex_normals, tvb_srf.triangle_normals)
        else:
            self.logger.warning("\nNo Cortical Surface file found at path " +
                                path + "!")
            return []

    def read_region_mapping(self, path):
        if os.path.isfile(path):
            tvb_rm = region_mapping.RegionMapping.from_file(path)
            return tvb_rm.array_data
        else:
            self.logger.warning("\nNo Region Mapping file found at path " +
                                path + "!")
            return []

    def read_volume_mapping(self, path):
        if os.path.isfile(path):
            tvb_vm = region_mapping.RegionVolumeMapping.from_file(path)
            return tvb_vm.array_data
        else:
            self.logger.warning("\nNo Volume Mapping file found at path " +
                                path + "!")
            return []

    def read_t1(self, path):
        if os.path.isfile(path):
            tvb_t1 = structural.StructuralMRI.from_file(path)
            return tvb_t1.array_data
        else:
            self.logger.warning("\nNo Structural MRI file found at path " +
                                path + "!")
            return []

    def read_sensors(self, filename, root_folder, s_type, atlas=""):
        def get_sensors_name(sensors_file, s_type):
            locations_file = sensors_file[0]
            if len(sensors_file) > 1:
                gain_file = sensors_file[1]
            else:
                gain_file = ""
            return s_type.value + (locations_file + gain_file).replace(
                ".txt", "").replace(s_type.value, "")

        filename = ensure_list(filename)
        name = get_sensors_name(filename, s_type)
        path = os.path.join(root_folder, filename[0])
        if os.path.isfile(path):
            if s_type == Sensors.TYPE_EEG:
                tvb_sensors = sensors.SensorsEEG.from_file(path)
            elif s_type == Sensors.TYPE_MEG:
                tvb_sensors = sensors.SensorsMEG.from_file(path)
            else:
                tvb_sensors = sensors.SensorsInternal.from_file(path)
            if len(filename) > 1:
                gain_matrix = self.read_gain_matrix(
                    os.path.join(root_folder, atlas, filename[1]), s_type,
                    atlas)
            else:
                gain_matrix = np.array([])
            return Sensors(tvb_sensors.labels,
                           tvb_sensors.locations,
                           orientations=tvb_sensors.orientations,
                           gain_matrix=gain_matrix,
                           s_type=s_type,
                           name=name)
        else:
            self.logger.warning("\nNo Sensor file found at path " + path + "!")
            return None

    def read_gain_matrix(self, path, s_type):
        if os.path.isfile(path):
            if s_type == Sensors.TYPE_EEG:
                tvb_prj = projections.ProjectionSurfaceEEG.from_file(path)
            elif s_type == Sensors.TYPE_MEG:
                tvb_prj = projections.ProjectionSurfaceMEG.from_file(path)
            else:
                tvb_prj = projections.ProjectionSurfaceSEEG.from_file(path)
            return tvb_prj.gain_matrix_data
        else:
            self.logger.warning("\nNo Projection Matrix file found at path " +
                                path + "!")
            return None

    def read_head(
        self,
        root_folder,
        name='',
        atlas="default",
        connectivity_file="connectivity.zip",
        cortical_surface_file="surface_cort.zip",
        subcortical_surface_file="surface_subcort.zip",
        cortical_region_mapping_file="region_mapping_cort.txt",
        subcortical_region_mapping_file="region_mapping_subcort.txt",
        eeg_sensors_files=[("eeg_brainstorm_65.txt",
                            "gain_matrix_eeg_65_surface_16k.npy")],
        meg_sensors_files=[("meg_brainstorm_276.txt",
                            "gain_matrix_meg_276_surface_16k.npy")],
        seeg_sensors_files=[("seeg_xyz.txt", "seeg_dipole_gain.txt"),
                            ("seeg_xyz.txt", "seeg_distance_gain.txt"),
                            ("seeg_xyz.txt", "seeg_regions_distance_gain.txt"),
                            ("seeg_588.txt",
                             "gain_matrix_seeg_588_surface_16k.npy")],
        vm_file="aparc+aseg.nii.gz",
        t1_file="T1.nii.gz",
    ):

        conn = self.read_connectivity(
            os.path.join(root_folder, atlas, connectivity_file))
        cort_srf = self.read_cortical_surface(
            os.path.join(root_folder, cortical_surface_file))
        subcort_srf = self.read_cortical_surface(
            os.path.join(root_folder, subcortical_surface_file))
        cort_rm = self.read_region_mapping(
            os.path.join(root_folder, atlas, cortical_region_mapping_file))
        subcort_rm = self.read_region_mapping(
            os.path.join(root_folder, atlas, subcortical_region_mapping_file))
        vm = self.read_volume_mapping(os.path.join(root_folder, atlas,
                                                   vm_file))
        t1 = self.read_t1(os.path.join(root_folder, t1_file))
        sensorsSEEG = OrderedDict()
        for s_files in ensure_list(seeg_sensors_files):
            sensors = self.read_sensors(s_files, root_folder,
                                        Sensors.TYPE_SEEG, atlas)
            sensorsSEEG[sensors.name] = sensors
        sensorsEEG = OrderedDict()
        for s_files in ensure_list(eeg_sensors_files):
            sensors = self.read_sensors(s_files, root_folder, Sensors.TYPE_EEG,
                                        atlas)
            sensorsSEEG[sensors.name] = sensors
        sensorsMEG = OrderedDict()
        for s_files in ensure_list(meg_sensors_files):
            sensors = self.read_sensors(s_files, root_folder, Sensors.TYPE_MEG,
                                        atlas)
            sensorsSEEG[sensors.name] = sensors
        if len(name) == 0:
            name = atlas
        return Head(conn,
                    cort_srf,
                    subcort_srf,
                    cort_rm,
                    subcort_rm,
                    vm,
                    t1,
                    name,
                    sensorsSEEG=sensorsSEEG,
                    sensorsEEG=sensorsEEG,
                    sensorsMEG=sensorsMEG)
Esempio n. 15
0
class ProbabilisticModelBuilderBase(object):

    __metaclass__ = ABCMeta

    logger = initialize_logger(__name__)

    model_name = "vep"
    model_config = EpileptorModelConfiguration("EpileptorDP2D")
    xmode = XModes.X0MODE.value
    linear_flag = False
    priors_mode = PriorsModes.NONINFORMATIVE.value
    model = None
    normal_flag = True
    x1eq_cr = X1EQ_CR
    x1eq_def = X1EQ_DEF

    def __init__(self,
                 model=None,
                 model_name="vep",
                 model_config=EpileptorModelConfiguration("EpileptorDP2D"),
                 xmode=XModes.X0MODE.value,
                 priors_mode=PriorsModes.NONINFORMATIVE.value,
                 normal_flag=True,
                 linear_flag=False,
                 x1eq_cr=X1EQ_CR,
                 x1eq_def=X1EQ_DEF):
        self.model = deepcopy(model)
        self.model_name = model_name
        self.model_config = model_config
        self.xmode = xmode
        self.x1eq_cr = x1eq_cr
        self.x1eq_def = x1eq_def
        self.priors_mode = priors_mode
        self.normal_flag = normal_flag
        self.linear_flag = linear_flag
        if self.normal_flag:
            self.model_name += "_normal"
        if self.linear_flag:
            self.model_name += "_lin"
        if isinstance(self.model, EpiProbabilisticModel):
            self.model_name = self.model.name
            for attr in [
                    "model_config", "normal_flag", "linear_flag", "xmode",
                    "x1eq_cr", "x1eq_def", "priors_mode"
            ]:
                setattr(self, attr, getattr(self.model, attr))

    def __repr__(self, d=OrderedDict()):
        return formal_repr(self, self._repr(d))

    def __str__(self):
        return self.__repr__()

    @property
    def number_of_regions(self):
        if isinstance(self.model, EpiProbabilisticModel):
            return self.model.number_of_regions
        else:
            return self.model_config.number_of_regions

    def _repr(self, d=OrderedDict()):
        for ikey, (key, val) in enumerate(self.__dict__.items()):
            d.update({str(ikey) + ". " + key: val})
        return d

    def set_attributes(self, attributes_names, attribute_values):
        for attribute_name, attribute_value in zip(
                ensure_list(attributes_names), ensure_list(attribute_values)):
            setattr(self, attribute_name, attribute_value)
        return self

    def _set_attributes_from_dict(self, attributes_dict):
        if not isinstance(attributes_dict, dict):
            attributes_dict = attributes_dict.__dict__
        for attr, value in attributes_dict.items():
            if not attr in [
                    "model_config", "parameters", "number_of_regions",
                    "number_of_parameters"
            ]:
                value = attributes_dict.get(attr, None)
                if value is None:
                    warning(attr + " not found in input dictionary!" +
                            "\nLeaving as it is: " + attr + " = " +
                            str(getattr(self, attr)))
                if value is not None:
                    setattr(self, attr, value)
        return attributes_dict

    def generate_normal_or_lognormal_parameter(self,
                                               name,
                                               mean,
                                               low,
                                               high,
                                               sigma=None,
                                               sigma_scale=2,
                                               p_shape=(),
                                               use="scipy",
                                               negative_log=False):
        if self.normal_flag:
            return generate_normal_parameter(name, mean, low, high, sigma,
                                             sigma_scale, p_shape, use)
        else:
            if negative_log:
                return generate_negative_lognormal_parameter(
                    name, mean, low, high, sigma, sigma_scale, p_shape, use)
            else:
                return generate_lognormal_parameter(name, mean, low, high,
                                                    sigma, sigma_scale,
                                                    p_shape, use)

    @abstractmethod
    def generate_parameters(self):
        pass

    @abstractmethod
    def generate_model(self):
        pass
Esempio n. 16
0
import os
from tvb_fit.tvb_epilepsy.base.constants.config import Config
from tvb_fit.base.utils.log_error_utils import initialize_logger
from tvb_fit.io.tvb_data_reader import TVBReader
from tvb_fit.io.h5_reader import H5Reader
from tvb_fit.io.h5_writer import H5Writer
from tvb_fit.plot.plotter import Plotter
# input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "tvb")
# head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "Head")
input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                            'VBtech', 'VEP', "results", "INS", "JUNCH", "tvb")
head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                           'VBtech', 'VEP', "results", "INS", "JUNCH", "Head")
output_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                             'VBtech', 'VEP', "results", "tests")
config = Config(head_folder=input_folder,
                output_base=output_folder,
                data_mode="tvb")  #, data_mode="java"
config.hypothesis.head_folder = head_folder
config.figures.MATPLOTLIB_BACKEND = "inline"
config.figures.SHOW_FLAG = True
logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
writer = H5Writer()
plotter = Plotter(config)

logger.info("Reading from: " + config.input.HEAD)
head = reader.read_head(config.input.HEAD,
                        seeg_sensors_files=[("seeg_xyz.txt", )])
print("OK!")
Esempio n. 17
0
class ModelConfigurationBuilder(object):

    logger = initialize_logger(__name__)

    model_name = None
    connectivity = None
    coupling = None
    initial_conditions = None  # initial conditions in a reduced form
    noise = None
    monitor = None

    def __repr__(self):
        d = {"01. model": self.model,
             "02. Number of regions": self.number_of_regions,
             "03. connectivity": self.connectivity,
             "04. coupling": self.coupling,
             "05. monitor": self.monitor,
             "06. initial_conditions": self.initial_conditions,
             "07. noise": self.noise
             }
        return formal_repr(self, d)

    def __str__(self):
        return self.__repr__()

    @property
    def number_of_regions(self):
        if isinstance(self.connectivity, numpy.ndarray):
            return self.connectivity.shape[0]
        else:
            return 1

    def model(self):
        model_module = importlib.import_module('tvb.simulator.models.%s' % self.model_name.lower())
        model = getattr(model_module, self.model_name)
        model = vars(model)
        model["model_name"] = self.model_name
        for key in model.keys():
            if key in ["_ui_name", "ui_configurable_parameters", "variables_of_interest", "state_variable_range",
                       "state_variables", "_nvar", "cvar", ] \
               or callable(model[key]):
                del model[key]
        return model

    def nvar(self):
        return self.model["nvar"]

    def set_parameter(self, pname, pval):
        if pval is not None:
            setattr(self, pname, pval * numpy.ones((self.number_of_regions,)))
        else:
            setattr(self, pname, pval)

    def set_params_from_tvb_model(self, model, params):
        for pname in params:
            self.set_parameter(pname, getattr(model, pname))

    def build_model_config_from_tvb(self):
        model = self.model
        del model["model_name"]
        model_config = ModelConfiguration(self.model_name, self.connectivity, self.coupling,
                                          self.monitor, self.initial_conditions, self.noise, **model)
        return model_config

    def build_model_config_from_model_config(self, model_config):
        if not isinstance(model_config, dict):
            model_config_dict = model_config.__dict__
        else:
            model_config_dict = model_config
        model_configuration = ModelConfiguration()
        for attr, value in model_configuration.__dict__.items():
            value = model_config_dict.get(attr, None)
            if value is None:
                warning(attr + " not found in the input model configuration dictionary!" +
                        "\nLeaving default " + attr + ": " + str(getattr(model_configuration, attr)))
            if value is not None:
                setattr(model_configuration, attr, value)
        return model_configuration

    def set_attribute(self, attr_name, data):
        setattr(self, attr_name, data)
        return self
Esempio n. 18
0
class H5Reader(object):
    logger = initialize_logger(__name__)

    connectivity_filename = "Connectivity.h5"
    cortical_surface_filename = "CorticalSurface.h5"
    subcortical_surface_filename = "SubcorticalSurface.h5"
    cortical_region_mapping_filename = "RegionMapping.h5"
    subcortical_region_mapping_filename = "RegionMappingSubcortical.h5"
    volume_mapping_filename = "VolumeMapping.h5"
    structural_mri_filename = "StructuralMRI.h5"
    sensors_filename_prefix = "Sensors"
    sensors_filename_separator = "_"

    def read_simulator_model(self, path, model_builder_fun):
        """
        :param path: Path towards a TVB model H5 file
        :return: TVB model object
        """
        self.logger.info("Starting to read epileptor model from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')
        try:
            model_name = h5_file["/"].attrs[H5_SUBTYPE_ATTRIBUTE]
            model = model_builder_fun(model_name)
        except:
            raise_value_error(
                "No model read from model configuration file!: %s" % str(path))

        return H5GroupHandlers().read_simulator_model_group(
            h5_file, model, "/")

    def read_connectivity(self, path):
        """
        :param path: Path towards a custom Connectivity H5 file
        :return: Connectivity object
        """
        self.logger.info("Starting to read a Connectivity from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        weights = h5_file['/' + ConnectivityH5Field.WEIGHTS][()]
        tract_lengths = h5_file['/' + ConnectivityH5Field.TRACTS][()]
        region_centres = h5_file['/' + ConnectivityH5Field.CENTERS][()]
        region_labels = h5_file['/' + ConnectivityH5Field.REGION_LABELS][()]
        orientations = h5_file['/' + ConnectivityH5Field.ORIENTATIONS][()]
        hemispheres = h5_file['/' + ConnectivityH5Field.HEMISPHERES][()]

        h5_file.close()

        conn = Connectivity(path, weights, tract_lengths, region_labels,
                            region_centres, hemispheres, orientations)
        self.logger.info("Successfully read connectvity from: %s" % path)

        return conn

    def read_surface(self, path):
        """
        :param path: Path towards a custom Surface H5 file
        :return: Surface object
        """
        if not os.path.isfile(path):
            self.logger.warning("Surface file %s does not exist" % path)
            return None

        self.logger.info("Starting to read Surface from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        vertices = h5_file['/' + SurfaceH5Field.VERTICES][()]
        triangles = h5_file['/' + SurfaceH5Field.TRIANGLES][()]
        vertex_normals = h5_file['/' + SurfaceH5Field.VERTEX_NORMALS][()]

        h5_file.close()

        surface = Surface(vertices, triangles, vertex_normals)
        self.logger.info("Successfully read surface from: %s" % path)

        return surface

    def read_sensors(self, path):
        """
        :param path: Path towards a custom head folder
        :return: 3 lists with all sensors from Path by type
        """
        sensors_seeg = OrderedDict()
        sensors_eeg = OrderedDict()
        sensors_meg = OrderedDict()

        self.logger.info("Starting to read all Sensors from: %s" % path)

        all_head_files = os.listdir(path)
        for head_file in all_head_files:
            str_head_file = str(head_file)
            if not str_head_file.startswith(self.sensors_filename_prefix):
                continue
            name = str_head_file.split(".")[0]
            type = str_head_file[len(self.sensors_filename_prefix
                                     ):str_head_file.
                                 index(self.sensors_filename_separator)]
            if type.upper() == SensorTypes.TYPE_SEEG.value:
                sensors_seeg[name] = \
                    self.read_sensors_of_type(os.path.join(path, head_file), SensorTypes.TYPE_SEEG, name)
            if type.upper() == SensorTypes.TYPE_EEG.value:
                sensors_eeg[name] = \
                    self.read_sensors_of_type(os.path.join(path, head_file), SensorTypes.TYPE_EEG, name)
            if type.upper() == SensorTypes.TYPE_MEG.value:
                sensors_meg[name] = \
                    self.read_sensors_of_type(os.path.join(path, head_file), SensorTypes.TYPE_MEG, name)

        self.logger.info("Successfuly read all sensors from: %s" % path)

        return sensors_seeg, sensors_eeg, sensors_meg

    def read_sensors_of_type(self, sensors_file, type, name):
        """
        :param
            sensors_file: Path towards a custom Sensors H5 file
            type: Senors type
        :return: Sensors object
        """
        if not os.path.exists(sensors_file):
            self.logger.warning("Senors file %s does not exist!" %
                                sensors_file)
            return []

        self.logger.info("Starting to read sensors of type %s from: %s" %
                         (type.value, sensors_file))
        h5_file = h5py.File(sensors_file, 'r', libver='latest')

        labels = h5_file['/' + SensorsH5Field.LABELS][()]
        locations = h5_file['/' + SensorsH5Field.LOCATIONS][()]

        if '/' + SensorsH5Field.GAIN_MATRIX in h5_file:
            gain_matrix = h5_file['/' + SensorsH5Field.GAIN_MATRIX][()]
        else:
            gain_matrix = None

        h5_file.close()

        sensors = Sensors(labels,
                          locations,
                          gain_matrix=gain_matrix,
                          s_type=type,
                          name=name)
        self.logger.info("Successfully read sensors from: %s" % sensors_file)

        return sensors

    def read_volume_mapping(self, path):
        """
        :param path: Path towards a custom VolumeMapping H5 file
        :return: volume mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("VolumeMapping file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read VolumeMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read volume mapping!")  #: %s" % data)

        return data

    def read_region_mapping(self, path):
        """
        :param path: Path towards a custom RegionMapping H5 file
        :return: region mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("RegionMapping file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read RegionMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read region mapping!")  #: %s" % data)

        return data

    def read_t1(self, path):
        """
        :param path: Path towards a custom StructuralMRI H5 file
        :return: structural MRI in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("StructuralMRI file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read StructuralMRI from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read structural MRI from: %s" % path)

        return data

    def read_head(self, path, atlas="default"):
        """
        :param path: Path towards a custom head folder
        :return: Head object
        """
        self.logger.info("Starting to read Head from: %s" % path)
        conn = self.read_connectivity(
            os.path.join(path, self.connectivity_filename))
        cort_srf = self.read_surface(
            os.path.join(path, self.cortical_surface_filename))
        subcort_srf = self.read_surface(
            os.path.join(path, self.subcortical_surface_filename))
        cort_rm = self.read_region_mapping(
            os.path.join(path, self.cortical_region_mapping_filename))
        subcort_rm = self.read_region_mapping(
            os.path.join(path, self.subcortical_region_mapping_filename))
        vm = self.read_volume_mapping(
            os.path.join(path, self.volume_mapping_filename))
        t1 = self.read_t1(os.path.join(path, self.structural_mri_filename))
        sensorsSEEG, sensorsEEG, sensorsMEG = self.read_sensors(path)

        if len(atlas) > 0:
            name = atlas
        else:
            name = path
        head = Head(conn,
                    cort_srf,
                    subcort_srf,
                    cort_rm,
                    subcort_rm,
                    vm,
                    t1,
                    name,
                    sensorsSEEG=sensorsSEEG,
                    sensorsEEG=sensorsEEG,
                    sensorsMEG=sensorsMEG)
        self.logger.info("Successfully read Head from: %s" % path)

        return head

    def read_model_configuration_builder(
            self,
            path,
            default_model="Epileptor",
            model_configuration_builder=ModelConfigurationBuilder):
        """
        :param path: Path towards a ModelConfigurationService H5 file
        :return: ModelConfigurationService object
        """
        self.logger.info(
            "Starting to read ModelConfigurationService from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        try:
            model_name = h5_file.attrs["model_name"]
        except:
            self.logger.warning(
                "No model_name read from model configuration builder file!: %s"
                % str(path))
            self.logger.warning("Setting default model!: %s" % default_model)
            model_name = default_model

        mc_service = model_configuration_builder(model_name)

        for dataset in h5_file.keys():
            if dataset != "model":
                mc_service.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            mc_service.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return mc_service

    def read_model_configuration(self,
                                 path,
                                 default_model="Epileptor",
                                 model_configuration=ModelConfiguration):
        """
        :param path: Path towards a EpileptorModelConfiguration H5 file
        :return: EpileptorModelConfiguration object
        """
        self.logger.info("Starting to read ModelConfiguration from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        try:
            model_name = h5_file.attrs["model_name"]
        except:
            self.logger.warning(
                "No model_name read from model configuration file!: %s" %
                str(path))
            self.logger.warning("Setting default model!: %s" % default_model)
            model_name = default_model

        model_configuration = model_configuration(model_name)
        for dataset in h5_file.keys():
            if dataset != "model":
                model_configuration.set_attribute(dataset,
                                                  h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            model_configuration.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return model_configuration

    def read_simulation_settings(self, path):
        """
        :param path: Path towards a SimulationSettings H5 file
        :return: SimulationSettings
        """
        self.logger.info("Starting to read SimulationSettings from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        sim_settings = SimulationSettings()
        for dataset in h5_file.keys():
            sim_settings.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            sim_settings.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return sim_settings

    def read_ts(self, path):
        """
        :param path: Path towards a valid TimeSeries H5 file
        :return: Timeseries data and time in 2 numpy arrays
        """
        self.logger.info("Starting to read TimeSeries from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]
        total_time = int(h5_file["/"].attrs["Simulated_period"][0])
        nr_of_steps = int(h5_file["/data"].attrs["Number_of_steps"][0])
        start_time = float(h5_file["/data"].attrs["Start_time"][0])
        time = numpy.linspace(start_time, total_time, nr_of_steps)

        self.logger.info("First Channel sv sum: " + str(numpy.sum(data[:, 0])))
        self.logger.info("Successfully read timeseries!")  #: %s" % data)
        h5_file.close()

        return time, data

    def read_timeseries(self, path, timeseries=Timeseries):
        """
        :param path: Path towards a valid TimeSeries H5 file
        :return: Timeseries data and time in 2 numpy arrays
        """
        self.logger.info("Starting to read TimeSeries from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]
        time = h5_file['/time'][()]
        labels = h5_file['/labels'][()]
        variables = h5_file['/variables'][()]
        time_unit = h5_file.attrs["time_unit"]
        self.logger.info("First Channel sv sum: " + str(numpy.sum(data[:, 0])))
        self.logger.info("Successfully read Timeseries!")  #: %s" % data)
        h5_file.close()

        return timeseries(
            data, {
                TimeseriesDimensions.SPACE.value: labels,
                TimeseriesDimensions.VARIABLES.value: variables
            }, time[0], numpy.mean(numpy.diff(time)), time_unit)

    def read_dictionary(self, path, type=None):
        """
        :param path: Path towards a dictionary H5 file
        :return: dict
        """
        self.logger.info("Starting to read a dictionary from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')
        dictionary = H5GroupHandlers().read_dictionary_from_group(
            h5_file, type)
        h5_file.close()
        return dictionary

    def read_list_of_dicts(self, path, type=None):
        self.logger.info("Starting to read a list of dictionaries from: %s" %
                         path)
        h5_file = h5py.File(path, 'r', libver='latest')
        list_of_dicts = []
        id = 0
        h5_group_handlers = H5GroupHandlers()
        while 1:
            try:
                dict_group = h5_file[str(id)]
            except:
                break
            list_of_dicts.append(
                h5_group_handlers.read_dictionary_from_group(dict_group, type))
            id += 1
        h5_file.close()
        return list_of_dicts

    def read_probabilistic_model(self, path):
        h5_file = h5py.File(path, 'r', libver='latest')
        epi_subtype = h5_file.attrs[H5_SUBTYPE_ATTRIBUTE]

        probabilistic_model = None
        if ProbabilisticModelBase.__class__.find(epi_subtype) >= 0:
            probabilistic_model = ProbabilisticModelBase()
        else:
            raise_value_error(
                epi_subtype +
                "does not correspond to the available probabilistic model!:\n"
                + ProbabilisticModelBase.__class__)

        for attr in h5_file.attrs.keys():
            if attr not in H5_TYPES_ATTRUBUTES:
                probabilistic_model.__setattr__(attr, h5_file.attrs[attr])

        for key, value in h5_file.items():
            if isinstance(value, h5py.Dataset):
                probabilistic_model.__setattr__(key, value[()])
            if isinstance(value, h5py.Group):
                h5_group_handlers = H5GroupHandlers()
                if key == "parameters":  # and value.attrs[epi_subtype_key] == OrderedDict.__name__:
                    parameters = h5_group_handlers.handle_group_parameters(
                        value)

                    probabilistic_model.__setattr__(key, parameters)

                if key == "ground_truth":
                    h5_group_handlers.handle_group_ground_truth(
                        value, probabilistic_model)

        h5_file.close()
        return probabilistic_model
class ModelConfigurationBuilder(ModelConfigurationBuilderBase):
    logger = initialize_logger(__name__)

    # For the momdent coupling, monitor, and noise are left to be None.
    # If in the future they are targeted for probabilistic modeling they will obtain contents

    x0 = np.array([-2.0])
    a = np.array([A_DEF])
    b = np.array([B_DEF])
    yc = np.array([YC_DEF])
    d = np.array([D_DEF])
    Iext1 = np.array([I_EXT1_DEF])
    Iext2 = np.array([I_EXT2_DEF])
    slope = np.array([SLOPE_DEF])
    s = np.array([S_DEF])
    gamma = np.array([GAMMA_DEF])
    tau1 = np.array([TAU1_DEF])
    tau0 = np.array([TAU0_DEF])
    tau2 = np.array([TAU2_DEF])
    zmode = np.array([ZMODE_DEF])
    pmode = np.array([PMODE_DEF])
    K = np.array([K_DEF])
    Kvf = np.array([0.0])
    Kf = np.array([0.0])

    def __init__(self, input="EpileptorDP", connectivity=None, K_unscaled=np.array([K_UNSCALED_DEF]),
                 x0_values=X0_DEF, e_values=E_DEF, x1eq_mode="optimize", **kwargs):
        if isinstance(input, Simulator):
            # TODO: make this more specific once we clarify the model configuration representation compared to simTVB
            self.model_name = input.model._ui_name
            self.set_params_from_tvb_model(input.model)
            self.connectivity = normalize_weights(input.connectivity.weights)
            # self.coupling = input.coupling
            self.initial_conditions = np.squeeze(input.initial_conditions)  # initial conditions in a reduced form
            # self.noise = input.integrator.noise
            # self.monitor = ensure_list(input.monitors)[0]
        else:
            if isinstance(input, Model):
                self.model_name = input._ui_name
                self.set_params_from_tvb_model(input)
            elif isinstance(input, basestring):
                self.model_name = input
            else:
                raise_value_error("Input (%s) is not a TVB simulator, an epileptor model, "
                                  "\nor a string of an epileptor model!")
        if isinstance(connectivity, Connectivity):
            self.connectivity = connectivity.normalized_weights
        elif isinstance(connectivity, TVBConnectivity):
            self.connectivity = normalize_weights(connectivity.weights)
        elif isinstance(connectivity, np.ndarray):
            self.connectivity = normalize_weights(connectivity)
        else:
            if not(isinstance(input, Simulator)):
                warning("Input connectivity (%s) is not a virtual patient connectivity, a TVB connectivity, "
                        "\nor a numpy.array!" % str(connectivity))
        self.x0_values = x0_values * np.ones((self.number_of_regions,), dtype=np.float32)
        self.x1eq_mode = x1eq_mode
        if len(ensure_list(K_unscaled)) == 1:
            K_unscaled = np.array(K_unscaled) * np.ones((self.number_of_regions,), dtype=np.float32)
        elif len(ensure_list(K_unscaled)) == self.number_of_regions:
            K_unscaled = np.array(K_unscaled)
        else:
            self.logger.warning(
                "The length of input global coupling K_unscaled is neither 1 nor equal to the number of regions!" +
                "\nSetting model_configuration_builder.K_unscaled = K_UNSCALED_DEF for all regions")
        self.set_K_unscaled(K_unscaled)
        for pname in EPILEPTOR_PARAMS:
            self.set_parameter(pname, kwargs.get(pname, getattr(self, pname)))
        # Update K_unscaled
        self.e_values = e_values * np.ones((self.number_of_regions,), dtype=np.float32)
        self.x0cr = 0.0
        self.rx0 = 0.0
        self._compute_critical_x0_scaling()

    def __repr__(self):
        d = {"01. model": self.model,
             "02. Number of regions": self.number_of_regions,
             "03. x0_values": self.x0_values,
             "04. e_values": self.e_values,
             "05. K": self.K,
             "06. x1eq_mode": self.x1eq_mode,
             "07. connectivity": self.connectivity,
             "08. coupling": self.coupling,
             "09. monitor": self.monitor,
             "10. initial_conditions": self.initial_conditions,
             "11. noise": self.noise

             }
        return formal_repr(self, d)

    def set_params_from_tvb_model(self, model):
        for pname in ["x0", "a", "b", "d", "Iext2", "slope", "gamma", "tt", "r", "tau2", "Kvf", "Kf"]:
            self.set_parameter(pname, getattr(model, pname))

        if model._ui_name == "Epileptor":
            for pname in ["c","Iext", "aa", "tt", "Ks"]:
                self.set_parameter(pname, getattr(model, pname))
        else:
            for pname in ["yc","Iext1", "s", "tau1", "K"]:
                self.set_parameter(pname, getattr(model, pname))
            if model._ui_name == "EpileptorDPrealistic":
                for pname in ["zmode", "pmode"]:
                    self.set_parameter(pname, getattr(model, pname))
        return self

    def set_parameter(self, pname, pval):
        if pname == "tt":
            self.tau1 = pval * np.ones((self.number_of_regions,), dtype=np.float32)
        elif pname == "r":
            self.tau0 = 1.0 / pval
        elif pname == "c":
            self.yc = pval * np.ones((self.number_of_regions,), dtype=np.float32)
        elif pname == "Iext":
            self.Iext1 = pval * np.ones((self.number_of_regions,), dtype=np.float32)
        elif pname == "s":
            self.s = pval * np.ones((self.number_of_regions,), dtype=np.float32)
        elif pname == "Ks":
            self.K = -pval * np.ones((self.number_of_regions,), dtype=np.float32)
        elif pval is not None:
            setattr(self, pname, pval * np.ones((self.number_of_regions,), dtype=np.float32))
        else:
            setattr(self, pname, pval)
        return self

    def build_model_config_from_tvb(self):
        model = self.model
        del model["model_name"]
        model_config = ModelConfiguration(self.model_name, self.connectivity, self.coupling,
                                          self.monitor, self.initial_conditions, self.noise,
                                          self.x0_values, self.e_values, x1eq=None, zeq=None, Ceq=None, **model)
        if model_config.initial_conditions is None:
            model_config.initial_conditions = compute_initial_conditions_from_eq_point(model_config)
        return model_config

    def build_model_config_from_model_config(self, model_config):
        if not isinstance(model_config, dict):
            model_config_dict = model_config.__dict__
        else:
            model_config_dict = model_config
        model_configuration = ModelConfiguration()
        for attr, value in model_configuration.__dict__.items():
            value = model_config_dict.get(attr, None)
            if value is None:
                warning(attr + " not found in the input model configuration dictionary!" +
                        "\nLeaving default " + attr + ": " + str(getattr(model_configuration, attr)))
            if value is not None:
                setattr(model_configuration, attr, value)
        return model_configuration

    def set_K_unscaled(self, K_unscaled):
        self._normalize_global_coupling(K_unscaled)

    def update_K(self):
        self.set_K_unscaled(self.K * self.number_of_regions)
        return self

    @property
    def K_unscaled(self):
        # !!Very important to correct here for the sign of K!!
        return self.K * self.number_of_regions

    @property
    def model_K(self):
        return -self.K

    @property
    def Ks(self):
        # !!Very important to correct here for the sign of K!!
        return -self.K

    @property
    def c(self):
        return self.yc

    @property
    def Iext(self):
        return self.Iext1

    @property
    def aa(self):
        return self.s

    @property
    def tt(self):
        return self.tau1

    @property
    def model(self):
        return {pname: getattr(self, pname) for pname in ["model_name"] + EPILEPTOR_PARAMS}

    @property
    def nvar(self):
        return EPILEPTOR_MODEL_NVARS[self.model_name]

    def _compute_model_x0(self, x0_values, x0_indices=None):
        if x0_indices is None:
            x0_indices = np.array(range(self.number_of_regions))
        return calc_x0_val_to_model_x0(x0_values, self.yc[x0_indices], self.Iext1[x0_indices], self.a[x0_indices],
                                       self.b[x0_indices], self.d[x0_indices], self.zmode[x0_indices])

    def _ensure_equilibrum(self, x1eq, zeq):
        temp = x1eq > self.x1eq_cr - 10 ** (-3)
        if temp.any():
            x1eq[temp] = self.x1eq_cr - 10 ** (-3)
            zeq = self._compute_z_equilibrium(x1eq)

        return x1eq, zeq

    def _compute_x1_equilibrium_from_E(self, e_values):
        array_ones = np.ones((self.number_of_regions,), dtype=np.float32)
        return ((e_values - 5.0) / 3.0) * array_ones

    def _compute_z_equilibrium(self, x1eq):
        return calc_eq_z(x1eq, self.yc, self.Iext1, "2d", slope=self.slope, a=self.a, b=self.b, d=self.d)

    def _compute_critical_x0_scaling(self):
        (self.x0cr, self.rx0) = calc_x0cr_r(self.yc, self.Iext1, a=self.a, b=self.b, d=self.d, zmode=self.zmode)

    def _compute_coupling_at_equilibrium(self, x1eq, model_connectivity):
        return calc_coupling(x1eq, self.K, model_connectivity)

    def _compute_x0_values_from_x0_model(self, x0):
        return calc_model_x0_to_x0_val(x0, self.yc, self.Iext1, self.a, self.b, self.d, self.zmode)

    def _compute_x0_values(self, x1eq, zeq, model_connectivity):
        x0 = calc_x0(x1eq, zeq, self.K, model_connectivity)
        return self._compute_x0_values_from_x0_model(x0)

    def _compute_e_values(self, x1eq):
        return 3.0 * x1eq + 5.0

    def _compute_params_after_equilibration(self, x1eq, zeq, model_connectivity):
        self._compute_critical_x0_scaling()
        Ceq = self._compute_coupling_at_equilibrium(x1eq, model_connectivity)
        x0_values = self._compute_x0_values(x1eq, zeq, model_connectivity)
        e_values = self._compute_e_values(x1eq)
        x0 = self._compute_model_x0(x0_values)
        return x0, Ceq, x0_values, e_values

    def _compute_x1_and_z_equilibrium_from_E(self, e_values):
        x1EQ = self._compute_x1_equilibrium_from_E(e_values)
        zEQ = self._compute_z_equilibrium(x1EQ)
        return x1EQ, zEQ

    def _compute_x1_equilibrium(self, e_indices, x1eq, zeq, x0_values, model_connectivity):
        self._compute_critical_x0_scaling()
        x0_indices = np.delete(np.array(range(self.number_of_regions)), e_indices)
        x0 = self._compute_model_x0(x0_values, x0_indices)
        if self.x1eq_mode == "linTaylor":
            x1eq = \
                eq_x1_hypo_x0_linTaylor(x0_indices, e_indices, x1eq, zeq, x0, self.K,
                                        model_connectivity, self.yc, self.Iext1, self.a, self.b, self.d)[0]
        else:
            x1eq = \
                eq_x1_hypo_x0_optimize(x0_indices, e_indices, x1eq, zeq, x0, self.K,
                                       model_connectivity, self.yc, self.Iext1, self.a, self.b, self.d)[0]
        return x1eq

    def _normalize_global_coupling(self, K_unscaled):
        self.K = K_unscaled / self.number_of_regions

    def _configure_model_from_equilibrium(self, x1eq, zeq, model_connectivity):
        # x1eq, zeq = self._ensure_equilibrum(x1eq, zeq) # We don't this by default anymore
        x0, Ceq, x0_values, e_values = self._compute_params_after_equilibration(x1eq, zeq, model_connectivity)
        self.x0 = x0
        model = self.model
        del model["model_name"]
        model_config = ModelConfiguration(self.model_name, model_connectivity, self.coupling,
                                          self.monitor, self.initial_conditions, self.noise,
                                          x0_values, e_values, x1eq, zeq, Ceq, **model)
        if model_config.initial_conditions is None:
            model_config.initial_conditions = compute_initial_conditions_from_eq_point(model_config)
        return model_config

    def build_model_from_E_hypothesis(self, disease_hypothesis):

        # This function sets healthy regions to the default epileptogenicity.

        model_connectivity = np.array(self.connectivity)

        # Then apply connectivity disease hypothesis scaling if any:
        if len(disease_hypothesis.w_indices) > 0:
            model_connectivity *= disease_hypothesis.connectivity_disease

        # All nodes except for the diseased ones will get the default epileptogenicity:
        e_values = np.array(self.e_values)
        e_values[disease_hypothesis.e_indices] = disease_hypothesis.e_values

        # Compute equilibrium from epileptogenicity:
        x1eq, zeq = self._compute_x1_and_z_equilibrium_from_E(e_values)

        if len(disease_hypothesis.x0_values) > 0:

            # If there is also some x0 hypothesis, solve the system for the equilibrium:
            # x0_values values must have size of len(x0_indices),
            # e_indices are all regions except for the x0_indices in this case
            x1eq = self._compute_x1_equilibrium(np.delete(range(self.number_of_regions), disease_hypothesis.x0_indices),
                                                x1eq, zeq, disease_hypothesis.x0_values, model_connectivity)
            zeq = self._compute_z_equilibrium(x1eq)

        return self._configure_model_from_equilibrium(x1eq, zeq, model_connectivity)

    def build_model_from_hypothesis(self, disease_hypothesis):
        # This function sets healthy regions to the default excitability.

        model_connectivity = np.array(self.connectivity)

        # Then apply connectivity disease hypothesis scaling if any:
        if len(disease_hypothesis.w_indices) > 0:
            model_connectivity *= disease_hypothesis.connectivity_disease

        # We assume that all nodes have the default (healthy) excitability:
        x0_values = np.array(self.x0_values)
        # ...and some  excitability-diseased ones:
        x0_values[disease_hypothesis.x0_indices] = disease_hypothesis.x0_values
        # x0_values values must have size of len(x0_indices):
        x0_values = np.delete(x0_values, disease_hypothesis.e_indices)

        # There might be some epileptogenicity-diseased regions as well:
        # Initialize with the default e_values
        e_values = np.array(self.e_values)
        # and assign any diseased E_values if any
        e_values[disease_hypothesis.e_indices] = disease_hypothesis.e_values

        # Compute equilibrium only from epileptogenicity:
        x1eq, zeq = self._compute_x1_and_z_equilibrium_from_E(e_values)

        # Now, solve the system in order to compute equilibrium:
        x1eq = self._compute_x1_equilibrium(disease_hypothesis.e_indices, x1eq, zeq, x0_values,
                                            model_connectivity)
        zeq = self._compute_z_equilibrium(x1eq)

        return self._configure_model_from_equilibrium(x1eq, zeq, model_connectivity)

    # TODO: This is used from PSE for varying an attribute's value. We should find a better way, not hardcoded strings.
    def set_attributes_from_pse(self, values, paths, indices):
        for i, val in enumerate(paths):
            vals = val.split(".")
            if vals[0] == "model_configuration_builder":
                if vals[1] == "K_unscaled":
                    temp = self.K_unscaled
                    temp[indices[i]] = values[i]
                    self.set_K_unscaled(temp)
                else:
                    getattr(self, vals[1])[indices[i]] = values[i]
Esempio n. 20
0
class Timeseries(object):

    logger = initialize_logger(__name__)

    dimensions = TimeseriesDimensions

    # dimension_labels = {"space": numpy.array([]), "variables": numpy.array([])}

    def __init__(self,
                 data,
                 dimension_labels,
                 time_start,
                 time_step,
                 time_unit="ms"):
        self.data = self.prepare_4D(data)
        self.dimension_labels = dimension_labels
        self.time_start = time_start
        self.time_step = time_step
        self.time_unit = time_unit

    def prepare_4D(self, data):
        if data.ndim < 2:
            self.logger.error("The data array is expected to be at least 2D!")
            raise ValueError
        if data.ndim < 4:
            if data.ndim == 2:
                data = numpy.expand_dims(data, 2)
            data = numpy.expand_dims(data, 3)
        return data

    @property
    def shape(self):
        return self.data.shape

    @property
    def time_length(self):
        return self.data.shape[0]

    @property
    def sampling_frequency(self):
        if len(self.time_unit) > 0 and self.time_unit[0] == "m":
            return 1000.0 / self.time_step
        else:
            return 1.0 / self.time_step

    @property
    def number_of_labels(self):
        return self.data.shape[1]

    @property
    def number_of_variables(self):
        return self.data.shape[2]

    @property
    def number_of_samples(self):
        return self.data.shape[3]

    @property
    def space_labels(self):
        return self.dimension_labels.get(TimeseriesDimensions.SPACE.value,
                                         numpy.array([]))

    @property
    def variables_labels(self):
        return self.dimension_labels.get(TimeseriesDimensions.VARIABLES.value,
                                         numpy.array([]))

    @property
    def time_end(self):
        return self.time_start + (self.data.shape[0] - 1) * self.time_step

    @property
    def time(self):
        return numpy.arange(self.time_start, self.time_end + self.time_step,
                            self.time_step)

    @property
    def squeezed(self):
        return numpy.squeeze(self.data)

    def _get_index_for_slice_label(self, slice_label, slice_idx):
        if slice_idx == 1:
            return self._get_indices_for_labels([slice_label])[0]
        if slice_idx == 2:
            return self._get_index_of_state_variable(slice_label)

    def _check_for_string_slice_indices(self, current_slice, slice_idx):
        slice_label1 = current_slice.start
        slice_label2 = current_slice.stop

        if isinstance(slice_label1, basestring):
            slice_label1 = self._get_index_for_slice_label(
                slice_label1, slice_idx)
        if isinstance(slice_label2, basestring):
            slice_label2 = self._get_index_for_slice_label(
                slice_label2, slice_idx)

        return slice(slice_label1, slice_label2, current_slice.step)

    def _get_string_slice_index(self, current_slice_string, slice_idx):
        return self._get_index_for_slice_label(current_slice_string, slice_idx)

    def _get_index_of_state_variable(self, sv_label):
        try:
            sv_index = numpy.where(self.dimension_labels[
                TimeseriesDimensions.VARIABLES.value] == sv_label)[0][0]
        except KeyError:
            self.logger.error(
                "There are no state variables defined for this instance. Its shape is: %s",
                self.data.shape)
            raise
        except IndexError:
            self.logger.error(
                "Cannot access index of state variable label: %s. Existing state variables: %s"
                %
                (sv_label,
                 self.dimension_labels[TimeseriesDimensions.VARIABLES.value]))
            raise
        return sv_index

    def _check_space_indices(self, list_of_index):
        for index in list_of_index:
            if index < 0 or index > self.data.shape[1]:
                self.logger.error(
                    "Some of the given indices are out of region range: [0, %s]",
                    self.data.shape[1])
                raise IndexError

    def _get_indices_for_labels(self, list_of_labels):
        list_of_indices_for_labels = []
        for label in list_of_labels:
            try:
                space_index = numpy.where(self.dimension_labels[
                    TimeseriesDimensions.SPACE.value] == label)[0][0]
            except ValueError:
                self.logger.error(
                    "Cannot access index of space label: %s. Existing space labels: %s"
                    %
                    (label,
                     self.dimension_labels[TimeseriesDimensions.SPACE.value]))
                raise
            list_of_indices_for_labels.append(space_index)
        return list_of_indices_for_labels

    def _get_time_unit_for_index(self, time_index):
        return self.time_start + time_index * self.time_step

    def _get_index_for_time_unit(self, time_unit):
        return int((time_unit - self.time_start) / self.time_step)

    def __getattr__(self, attr_name):
        state_variables_keys = []
        if TimeseriesDimensions.VARIABLES.value in self.dimension_labels.keys(
        ):
            state_variables_keys = self.dimension_labels[
                TimeseriesDimensions.VARIABLES.value]
            if attr_name in self.dimension_labels[
                    TimeseriesDimensions.VARIABLES.value]:
                return self.get_state_variable(attr_name)
        space_keys = []
        if (TimeseriesDimensions.SPACE.value in self.dimension_labels.keys()):
            space_keys = self.dimension_labels[
                TimeseriesDimensions.SPACE.value]
            if attr_name in self.dimension_labels[
                    TimeseriesDimensions.SPACE.value]:
                return self.get_subspace_by_labels([attr_name])
        # Hack to avoid stupid error messages when searching for __ attributes in numpy.array() call...
        # TODO: something better? Maybe not needed if we never do something like numpy.array(timeseries)
        if attr_name.find("__") < 0:
            self.logger.error(
                "Attribute %s is not defined for this instance! You can use the folllowing labels: "
                "state_variables = %s and space = %s" %
                (attr_name, state_variables_keys, space_keys))
        raise AttributeError

    def __getitem__(self, slice_tuple):
        slice_list = []
        for idx, current_slice in enumerate(slice_tuple):
            if isinstance(current_slice, slice):
                slice_list.append(
                    self._check_for_string_slice_indices(current_slice, idx))
            else:
                if isinstance(current_slice, basestring):
                    slice_list.append(
                        self._get_string_slice_index(current_slice, idx))
                else:
                    slice_list.append(current_slice)

        return self.data[tuple(slice_list)]

    def get_state_variable(self, sv_label):
        sv_data = self.data[:, :,
                            self._get_index_of_state_variable(sv_label), :]
        subspace_dimension_labels = deepcopy(self.dimension_labels)
        subspace_dimension_labels[
            TimeseriesDimensions.VARIABLES.value] = numpy.array([sv_label])
        if sv_data.ndim == 3:
            sv_data = numpy.expand_dims(sv_data, 2)
        return self.__class__(sv_data, subspace_dimension_labels,
                              self.time_start, self.time_step, self.time_unit)

    def get_subspace_by_labels(self, list_of_labels):
        list_of_indices_for_labels = self._get_indices_for_labels(
            list_of_labels)
        subspace_data = self.data[:, list_of_indices_for_labels, :, :]
        subspace_dimension_labels = deepcopy(self.dimension_labels)
        subspace_dimension_labels[
            TimeseriesDimensions.SPACE.value] = numpy.array(list_of_labels)
        if subspace_data.ndim == 3:
            subspace_data = numpy.expand_dims(subspace_data, 1)
        return self.__class__(subspace_data, subspace_dimension_labels,
                              self.time_start, self.time_step, self.time_unit)

    def get_subspace_by_index(self, list_of_index):
        self._check_space_indices(list_of_index)
        subspace_data = self.data[:, list_of_index, :, :]
        subspace_dimension_labels = deepcopy(self.dimension_labels)
        subspace_dimension_labels[TimeseriesDimensions.SPACE.value] = \
            numpy.array(self.dimension_labels[TimeseriesDimensions.SPACE.value])[list_of_index]
        if subspace_data.ndim == 3:
            subspace_data = numpy.expand_dims(subspace_data, 1)
        return self.__class__(subspace_data, subspace_dimension_labels,
                              self.time_start, self.time_step, self.time_unit)

    def get_time_window(self, index_start, index_end):
        if index_start < 0 or index_end > self.data.shape[0]:
            self.logger.error(
                "The time indices are outside time series interval: [%s, %s]" %
                (0, self.data.shape[0]))
            raise IndexError
        subtime_data = self.data[index_start:index_end, :, :, :]
        if subtime_data.ndim == 3:
            subtime_data = numpy.expand_dims(subtime_data, 0)
        return self.__class__(subtime_data, self.dimension_labels,
                              self._get_time_unit_for_index(index_start),
                              self.time_step, self.time_unit)

    def get_time_window_by_units(self, unit_start, unit_end):
        end_time = self.time_end
        if unit_start < self.time_start or unit_end > end_time:
            self.logger.error(
                "The time units are outside time series interval: [%s, %s]" %
                (self.time_start, end_time))
            raise ValueError
        index_start = self._get_index_for_time_unit(unit_start)
        index_end = self._get_index_for_time_unit(unit_end)
        return self.get_time_window(index_start, index_end)

    def decimate_time(self, time_step):
        if time_step % self.time_step != 0:
            self.logger.error(
                "Cannot decimate time if new time step is not a multiple of the old time step"
            )
            raise ValueError

        index_step = int(time_step / self.time_step)
        time_data = self.data[::index_step, :, :, :]

        return self.__class__(time_data, self.dimension_labels,
                              self.time_start, time_step, self.time_unit)

    def get_sample_window(self, index_start, index_end):
        subsample_data = self.data[:, :, :, index_start:index_end]
        if subsample_data.ndim == 3:
            subsample_data = numpy.expand_dims(subsample_data, 3)
        return self.__class__(subsample_data, self.dimension_labels,
                              self.time_start, self.time_step, self.time_unit)

    def get_sample_window_by_percentile(self, percentile_start,
                                        percentile_end):
        pass

    def get_source(self):
        if TimeseriesDimensions.VARIABLES.value not in self.dimension_labels.keys(
        ):
            self.logger.error(
                "No state variables are defined for this instance!")
            raise ValueError
        if PossibleVariables.SOURCE.value in self.dimension_labels[
                TimeseriesDimensions.VARIABLES.value]:
            return self.get_state_variable(PossibleVariables.SOURCE.value)

    def get_bipolar(self):
        bipolar_labels, bipolar_inds = monopolar_to_bipolar(self.space_labels)
        data = self.data[:, bipolar_inds[0]] - self.data[:, bipolar_inds[1]]
        bipolar_dimension_labels = deepcopy(self.dimension_labels)
        bipolar_dimension_labels["space"] = numpy.array(bipolar_labels)
        return self.__class__(data, bipolar_dimension_labels, self.time_start,
                              self.time_step, self.time_unit)
Esempio n. 21
0
class H5WriterBase(object):
    logger = initialize_logger(__name__)

    H5_TYPE_ATTRIBUTE = "EPI_Type"
    H5_SUBTYPE_ATTRIBUTE = "EPI_Subtype"

    def _determine_datasets_and_attributes(self, object, datasets_size=None):
        datasets_dict = {}
        metadata_dict = {}
        groups_keys = []

        try:
            if isinstance(object, dict):
                dict_object = object
            else:
                dict_object = vars(object)
            for key, value in dict_object.items():
                if isinstance(value, numpy.ndarray):
                    # if value.size == 1:
                    #     metadata_dict.update({key: value})
                    # else:
                    datasets_dict.update({key: value})
                    # if datasets_size is not None and value.size == datasets_size:
                    #     datasets_dict.update({key: value})
                    # else:
                    #     if datasets_size is None and value.size > 0:
                    #         datasets_dict.update({key: value})
                    #     else:
                    #         metadata_dict.update({key: value})
                # TODO: check how this works! Be carefull not to include lists and tuples if possible in tvb_fit classes!
                elif isinstance(value, (list, tuple)):
                    warning(
                        "Writing %s %s to h5 file as a numpy array dataset !" %
                        (value.__class__, key), self.logger)
                    datasets_dict.update({key: numpy.array(value)})
                else:
                    if is_numeric(value) or isinstance(value, str):
                        metadata_dict.update({key: value})
                    elif not (callable(value)):
                        groups_keys.append(key)
        except:
            msg = "Failed to decompose group object: " + str(object) + "!"
            try:
                self.logger.info(str(object.__dict__))
            except:
                msg += "\n It has no __dict__ attribute!"
            warning(msg, self.logger)

        return datasets_dict, metadata_dict, groups_keys

    def _write_dicts_at_location(self, datasets_dict, metadata_dict, location):
        for key, value in datasets_dict.items():
            try:
                location.create_dataset(key, data=value)
            except:
                warning(
                    "Failed to write to %s dataset %s %s:\n%s !" %
                    (str(location), value.__class__, key, str(value)),
                    self.logger)

        for key, value in metadata_dict.items():
            try:
                location.attrs.create(key, value)
            except:
                warning(
                    "Failed to write to %s attribute %s %s:\n%s !" %
                    (str(location), value.__class__, key, str(value)),
                    self.logger)
        return location

    def _prepare_object_for_group(self,
                                  group,
                                  object,
                                  h5_type_attribute="HypothesisModel",
                                  nr_regions=None,
                                  regress_subgroups=True):
        group.attrs.create(self.H5_TYPE_ATTRIBUTE, h5_type_attribute)
        group.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                           object.__class__.__name__)
        datasets_dict, metadata_dict, subgroups = self._determine_datasets_and_attributes(
            object, nr_regions)
        # If empty return None
        if len(datasets_dict) == len(metadata_dict) == len(subgroups) == 0:
            if isinstance(group, h5py._hl.files.File):
                if regress_subgroups:
                    return group
                else:
                    return group, subgroups
            else:
                return None
        else:
            if len(datasets_dict) > 0 or len(metadata_dict) > 0:
                if isinstance(group, h5py._hl.files.File):
                    group = self._write_dicts_at_location(
                        datasets_dict, metadata_dict, group)
                else:
                    self._write_dicts_at_location(datasets_dict, metadata_dict,
                                                  group)
            # Continue recursively going deeper in the object
            if regress_subgroups:
                for subgroup in subgroups:
                    if isinstance(object, dict):
                        child_object = object.get(subgroup, None)
                    else:
                        child_object = getattr(object, subgroup, None)
                    if child_object is not None:
                        group.create_group(subgroup)
                        temp = self._prepare_object_for_group(
                            group[subgroup], child_object, h5_type_attribute,
                            nr_regions)
                        # If empty delete it
                        if temp is None or len(temp.keys()) == 0:
                            del group[subgroup]

                return group
            else:
                return group, subgroups

    def write_object_to_file(self,
                             path,
                             object,
                             h5_type_attribute="HypothesisModel",
                             nr_regions=None):
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')
        h5_file = self._prepare_object_for_group(h5_file, object,
                                                 h5_type_attribute, nr_regions)
        h5_file.close()
def sensitivity_analysis_pse_from_lsa_hypothesis(n_samples,
                                                 lsa_hypothesis,
                                                 connectivity_matrix,
                                                 model_configuration_builder,
                                                 lsa_service,
                                                 region_labels,
                                                 method="sobol",
                                                 half_range=0.1,
                                                 global_coupling=[],
                                                 healthy_regions_parameters=[],
                                                 save_services=False,
                                                 config=Config(),
                                                 **kwargs):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    method = method.lower()
    if np.in1d(method, METHODS):
        if np.in1d(method, ["delta", "dgsm"]):
            sampler = "latin"
        elif method == "sobol":
            sampler = "saltelli"
        elif method == "fast":
            sampler = "fast_sampler"
        else:
            sampler = method
    else:
        raise_value_error("Method " + str(method) +
                          " is not one of the available methods " +
                          str(METHODS) + " !")
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.regions_disease_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "low": [], "high": []}
    n_inputs = 0
    # First build from the hypothesis the input parameters of the sensitivity analysis.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")
        pse_params["low"].append(lsa_hypothesis.x0_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE,
                 lsa_hypothesis.x0_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.e_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")
        pse_params["low"].append(lsa_hypothesis.e_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE, lsa_hypothesis.e_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.w_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 connectivity_matrix.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
            pse_params["low"].append(
                np.max([lsa_hypothesis.w_values[ii] - half_range, 0.0]))
            pse_params["high"].append(lsa_hypothesis.w_values[ii] + half_range)
    for val in global_coupling:
        n_inputs += 1
        pse_params["path"].append("model.configuration.service.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)
        pse_params["low"].append(val.get("low", 0.0))
        pse_params["high"].append(val.get("high", 2.0))
    # Now generate samples suitable for sensitivity analysis
    sampler = SalibSamplerInterface(n_samples=n_samples,
                                    sampler=sampler,
                                    random_seed=kwargs.get(
                                        "random_seed", None))
    input_samples = sampler.generate_samples(low=pse_params["low"],
                                             high=pse_params["high"],
                                             **kwargs)
    n_samples = input_samples.shape[1]
    pse_params.update(
        {"samples": [np.array(value) for value in input_samples.tolist()]})
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    sampler = ProbabilisticSampler(n_samples=n_samples,
                                   random_seed=kwargs.get("random_seed", None))
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                kwargs.get("loc", 0.0),  # loc
                kwargs.get("scale", 2 * half_range)),  # scale
            probability_distribution="uniform",
            low=0.0,
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })
    # Now run pse service to generate output samples:
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(connectivity_matrix, False,
                                                model_configuration_builder,
                                                lsa_service)
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    # Now prepare inputs and outputs and run the sensitivity analysis:
    # NOTE!: Without the jittered healthy regions which we don' want to include into the sensitivity analysis!
    inputs = dicts_of_lists_to_lists_of_dicts(pse_params)
    outputs = [{
        "names": ["LSA Propagation Strength"],
        "values": pse_results["lsa_propagation_strengths"]
    }]
    sensitivity_analysis_service = SensitivityAnalysisService(
        inputs,
        outputs,
        method=method,
        calc_second_order=kwargs.get("calc_second_order", True),
        conf_level=kwargs.get("conf_level", 0.95))
    results = sensitivity_analysis_service.run(**kwargs)
    if save_services:
        logger.info(pse.__repr__())
        writer = H5Writer()
        writer.write_pse_service(
            pse,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_pse_service.h5"))
        logger.info(sensitivity_analysis_service.__repr__())
        writer.write_sensitivity_analysis_service(
            sensitivity_analysis_service,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_sa_service.h5"))
    return results, pse_results
    HIGH_HPF, LOW_HPF, LOW_LPF, HIGH_LPF, WIN_LEN_RATIO, BIPOLAR, TARGET_DATA_PREPROCESSING, XModes, compute_upsample, \
    compute_seizure_length
from tvb_fit.tvb_epilepsy.base.model.timeseries import TimeseriesDimensions, Timeseries
from tvb_fit.tvb_epilepsy.service.hypothesis_builder import HypothesisBuilder
from tvb_fit.tvb_epilepsy.service.model_configuration_builder import ModelConfigurationBuilder
from tvb_fit.tvb_epilepsy.service.lsa_service import LSAService
from tvb_fit.tvb_epilepsy.service.probabilistic_models_builders import SDEProbabilisticModelBuilder
from tvb_fit.tvb_epilepsy.top.scripts.hypothesis_scripts import from_hypothesis_to_model_config_lsa
from tvb_fit.tvb_epilepsy.top.scripts.pse_scripts import pse_from_lsa_hypothesis
from tvb_fit.tvb_epilepsy.top.scripts.simulation_scripts import from_model_configuration_to_simulation
from tvb_fit.tvb_epilepsy.top.scripts.fitting_data_scripts import prepare_seeg_observable_from_mne_file, \
    prepare_simulated_seeg_observable, prepare_signal_observable
from tvb_fit.tvb_epilepsy.io.h5_writer import H5Writer
from tvb_fit.tvb_epilepsy.io.h5_reader import H5Reader

logger = initialize_logger(__name__)


def path(name, base_path):
    return os.path.join(base_path, name + ".h5")


def set_model_config_LSA(head,
                         hyp,
                         reader,
                         config,
                         K_unscaled=K_UNSCALED_DEF,
                         tau1=TAU1_DEF,
                         tau0=TAU0_DEF,
                         pse_flag=True,
                         plotter=None,
Esempio n. 24
0
class SimulatorBuilder(object):
    logger = initialize_logger(__name__)

    def __init__(self, model_configuration, simulator="tvb"):
        self.model_config = deepcopy(model_configuration)
        self.simulator = simulator
        self.simulation_length = 2500
        self.fs = 16384.0
        self.fs_monitor = 1024.0

    @property
    def model_name(self):
        return self.model_config.model_name

    def set_model(self, model=None):
        if isinstance(model, Model):
            self.model_config.model_name = model._ui_name
            self.model_config = self.model_config.set_params_from_tvb_model(
                model)
        else:
            self.model_config.model_name = model
        self.model_config = self.model_config.update_initial_conditions()
        return self

    def set_simulation_length(self, simulation_length):
        self.simulation_length = simulation_length
        return self

    def set_fs(self, fs):
        self.fs = fs
        return self

    def set_fs_monitor(self, fs_monitor):
        self.fs_monitor = fs_monitor
        return self

    def set_time_scales(self):
        scale_fsavg = int(numpy.round(self.fs / self.fs_monitor))
        dt = 1000.0 / self.fs
        monitor_period = scale_fsavg * dt
        return dt, monitor_period

    def _check_noise_intesity_size(self, noise_intensity):
        nn = len(ensure_list(noise_intensity))
        if nn != 1 and nn != EPILEPTOR_MODEL_NVARS[self.model_name]:
            raise_value_error(
                "Noise intensity is neither of size 1 nor of size equal to the number of model variables, "
                "\n but of size: " + str(nn) + "!")

    def generate_white_noise(self, noise_intensity):
        self._check_noise_intesity_size(noise_intensity)
        noise_instance = noise.Additive(
            nsig=noise_intensity,
            random_stream=numpy.random.RandomState(seed=NOISE_SEED))
        noise_instance.configure_white(dt=1.0 / self.fs)
        return noise_instance

    def generate_colored_noise(self, noise_intensity, ntau, **kwargs):
        self._check_noise_intesity_size(noise_intensity)
        noise_instance = noise.Additive(
            ntau=ntau,
            nsig=noise_intensity,
            random_stream=numpy.random.RandomState(seed=NOISE_SEED))
        noise_shape = noise_instance.nsig.shape
        noise_instance.configure_coloured(dt=1.0 / self.fs, shape=noise_shape)
        return noise_instance

    def build_sim_settings(self):
        dt, monitor_period = self.set_time_scales()
        return SimulationSettings(
            simulation_length=self.simulation_length,
            integration_step=dt,
            noise_type=WHITE_NOISE,
            noise_ntau=0.0,
            noise_seed=NOISE_SEED,
            noise_intensity=model_noise_intensity_dict[self.model_name],
            monitor_sampling_period=monitor_period)

    def set_noise(self, sim_settings, **kwargs):
        # Check if the user provides a preconfigured noise instance to override
        noise = kwargs.get("noise", None)
        if isinstance(noise, Noise):
            self._check_noise_intesity_size(noise.nsig)
            sim_settings.noise_intensity = noise.nsig
            if noise.ntau == 0:
                sim_settings.noise_type = WHITE_NOISE
            else:
                sim_settings.noise_type = COLORED_NOISE
            sim_settings.noise_ntau = noise.ntau
        else:
            if isequal_string(sim_settings.noise_type, COLORED_NOISE):
                noise = self.generate_colored_noise(
                    sim_settings.noise_intensity, sim_settings.noise_ntau,
                    **kwargs)
            else:
                noise = self.generate_white_noise(sim_settings.noise_intensity)
            sim_settings.noise_ntau = noise.ntau
        return noise, sim_settings

    def generate_temporal_average_monitor(self, sim_settings):
        monitor = TemporalAverage()
        monitor.period = sim_settings.monitor_sampling_period
        monitor_vois = numpy.array(sim_settings.monitor_vois)
        n_model_vois = len(VOIS[self.model_name])
        monitor_vois = monitor_vois[monitor_vois < n_model_vois]
        if len(monitor_vois) == 0:
            monitor_vois = numpy.array(range(n_model_vois))
        monitor.variables_of_interest = numpy.array(monitor_vois)
        sim_settings.monitor_vois = numpy.array(monitor.variables_of_interest)
        return (monitor, ), sim_settings

    def set_tvb_monitor(self, sim_settings, monitor):
        monitor = (monitor, )
        sim_settings.monitor_sampling_period = monitor.period
        monitor_vois = numpy.union1d(monitor.variables_of_interest,
                                     sim_settings.monitor_vois)
        n_model_vois = len(VOIS[self.model_name])
        monitor_vois = monitor_vois[monitor_vois < n_model_vois]
        if len(monitor_vois) == 0:
            monitor.variables_of_interest = numpy.array(range(n_model_vois))
        else:
            monitor.variables_of_interest = monitor_vois
        sim_settings.monitor_vois = numpy.array(monitor.variables_of_interest)
        return monitor, sim_settings

    def set_monitor(self, sim_settings, monitor=None):
        # Check if the user provides a preconfigured set of monitor instances to override
        if isinstance(monitor, Monitor):
            return self.set_tvb_monitor(monitor, sim_settings)
        elif isinstance(monitor, tuple) or isinstance(monitor, list):
            return self.set_tvb_monitor(monitor[0], sim_settings)
        else:
            return self.generate_temporal_average_monitor(sim_settings)

    def build_simulator_TVB_from_model_sim_settings(self, connectivity,
                                                    sim_settings, **kwargs):
        monitors, sim_settings = self.set_monitor(sim_settings,
                                                  kwargs.get("monitors", None))

        noise, sim_settings = self.set_noise(sim_settings, **kwargs)

        simulator_instance = SimulatorTVB(self.model_config, connectivity,
                                          sim_settings)
        simulator_instance.config_simulation(noise, monitors)

        return simulator_instance, sim_settings

    def build_simulator_TVB(self, connectivity, **kwargs):

        sim_settings = self.build_sim_settings()

        return self.build_simulator_TVB_from_model_sim_settings(
            connectivity, sim_settings, **kwargs)

    def build_simulator_java_from_model_configuration(self, connectivity,
                                                      **kwargs):

        sim_settings = self.build_sim_settings()
        # sim_settings.noise_intensity = kwargs.get("noise_intensity", 1e-6)
        sim_settings.noise_intensity = kwargs.get(
            "noise_intensity", numpy.array([0., 0., 5e-6, 0.0, 5e-6, 0.]))

        simulator_instance = SimulatorJava(connectivity, self.model_config,
                                           sim_settings)

        return simulator_instance, sim_settings

    def build_simulator(self, connectivity, **kwargs):
        if isequal_string(self.simulator, "java"):
            return self.build_simulator_java_from_model_configuration(
                connectivity, **kwargs)
        else:
            return self.build_simulator_TVB(connectivity, **kwargs)
def pse_from_lsa_hypothesis(n_samples,
                            lsa_hypothesis,
                            model_connectivity,
                            model_configuration_builder,
                            lsa_service,
                            region_labels,
                            param_range=0.1,
                            global_coupling=[],
                            healthy_regions_parameters=[],
                            save_flag=False,
                            folder_res="",
                            filename=None,
                            logger=None,
                            config=Config(),
                            **kwargs):
    if not os.path.isdir(folder_res):
        folder_res = config.out.FOLDER_RES
    if logger is None:
        logger = initialize_logger(__name__)
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.regions_disease_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "samples": []}
    sampler = ProbabilisticSampler(n_samples=n_samples,
                                   random_seed=kwargs.get("random_seed", None))
    # First build from the hypothesis the input parameters of the parameter search exploration.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.x0_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.x0_values[ii] - param_range,  # loc
        #                                         2 * param_range),                            # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.e_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.e_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.e_values[ii] - param_range,  # loc
        #                                         2 * param_range),  # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.w_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 model_connectivity.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
        # Now generate samples using a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.w_values[ii],  # loc
                    param_range * lsa_hypothesis.w_values[ii]),  # scale
                probability_distribution="norm",
                low=0.0,
                shape=(1, )))
    kloc = model_configuration_builder.K_unscaled[0]
    for val in global_coupling:
        pse_params["path"].append("model_configuration_builder.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)

        # Now generate samples using a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    0.1 * kloc,  # loc
                    2 * kloc),  # scale
                probability_distribution="uniform",
                low=1.0,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(kloc,  # loc
        #                                         30 * param_range),  # scale
        #                              probability_distribution="norm", low=0.0, shape=(1,)))
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                0.0,  # loc
                param_range / 10),  # scale
            probability_distribution="norm",
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })

    # Now run pse service to generate output samples:
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(model_connectivity, False,
                                                model_configuration_builder,
                                                lsa_service)
    logger.info(pse.__repr__())
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    for key in pse_results.keys():
        pse_results[key + "_mean"] = np.mean(pse_results[key], axis=0)
        pse_results[key + "_std"] = np.std(pse_results[key], axis=0)
    if save_flag:
        if not (isinstance(filename, basestring)):
            filename = "LSA_PSA"
        writer = H5Writer()
        writer.write_pse_service(
            pse, os.path.join(folder_res, filename + "_pse_service.h5"))
        writer.write_dictionary(pse_results,
                                os.path.join(folder_res, filename + ".h5"))

    return pse_results, pse_params_list
class SamplerBase(object):
    logger = initialize_logger(__name__)

    def __init__(self, n_samples=10):
        self.sampler = None
        self.sampling_module = ""
        self.n_samples = n_samples
        self.shape = (1, n_samples)
        self.stats = {}

    def __repr__(self):
        d = {
            "01. Sampling module": self.sampling_module,
            "02. Sampler": self.sampler,
            "03. Number of samples": self.n_samples,
            "04. Samples' shape": self.shape,
        }
        return formal_repr(
            self, d) + "\n05. Resulting statistics: " + dict_str(self.stats)

    def adjust_shape(self, parameter_shape):
        shape = []
        for p in parameter_shape:
            shape.append(p)
        shape.append(self.n_samples)
        self.shape = shape

    def check_for_infinite_bounds(self, low, high):
        low = np.array(low)
        high = np.array(high)
        id = (low == -np.inf)
        if np.any(id):
            self.logger.warning(
                "Sampling is not possible with infinite bounds! Setting lowest system value for low!"
            )
            low[id] = -CalculusConfig.MAX_SINGLE_VALUE
        id = (high == np.inf)
        if np.any(id):
            self.logger.warning(
                "Sampling is not possible with infinite bounds! Setting highest system value for high!"
            )
            high[id] = CalculusConfig.MAX_SINGLE_VALUE
        return low, high

    def check_size(self, low, high, parameter_shape):
        n_params = shape_to_size((low + high).shape)
        shape_size = shape_to_size(parameter_shape)
        if shape_size > n_params:
            self.logger.warning(
                "Input parameters' size (" + str(n_params) +
                ") larger than the one implied by input parameters's shape (" +
                str(shape_size) + ")!" +
                "\nModifying input parameters' size accordingly!")
            n_params = shape_size
        elif n_params > shape_size:
            self.logger.warning(
                "Input parameters' size (" + str(n_params) +
                ") smaller than the one implied by input parameters's shape ("
                + str(shape_size) + ")!" +
                "\nModifying input parameters' shape accordingly!: " +
                str((n_params, )))
            parameter_shape = (n_params, )
        i1 = np.ones(parameter_shape)
        low = low * i1
        high = high * i1
        return low, high, n_params, parameter_shape

    def compute_stats(self, samples):
        return OrderedDict([("mean", samples.mean(axis=-1)),
                            ("median", scp.median(samples, axis=-1)),
                            ("mode", scp.stats.mode(samples, axis=-1)[0]),
                            ("std", samples.std(axis=-1)),
                            ("var", samples.var(axis=-1)),
                            ("kurt", ss.kurtosis(samples, axis=-1)),
                            ("skew", ss.skew(samples, axis=-1)),
                            ("min", samples.min(axis=-1)),
                            ("max", samples.max(axis=-1)),
                            ("1%", np.percentile(samples, 1, axis=-1)),
                            ("5%", np.percentile(samples, 5, axis=-1)),
                            ("10%", np.percentile(samples, 10, axis=-1)),
                            ("p25", np.percentile(samples, 25, axis=-1)),
                            ("p50", np.percentile(samples, 50, axis=-1)),
                            ("p75", np.percentile(samples, 75, axis=-1)),
                            ("p90", np.percentile(samples, 90, axis=-1)),
                            ("p95", np.percentile(samples, 95, axis=-1)),
                            ("p99", np.percentile(samples, 99, axis=-1))])

    def generate_samples(self, stats=False, parameter=(), **kwargs):
        samples = self.sample(parameter, **kwargs)
        self.stats = self.compute_stats(samples)
        if stats:
            return samples, self.stats
        else:
            return samples
Esempio n. 27
0
class Head(object):
    """
    One patient virtualization. Fully configured for defining hypothesis on it.
    """
    logger = initialize_logger(__name__)

    def __init__(self,
                 connectivity,
                 cortical_surface=None,
                 subcortical_surface=None,
                 cortical_region_mapping=np.array([]),
                 subcortical_region_mapping=np.array([]),
                 vm=np.array([]),
                 t1=np.array([]),
                 name='',
                 **kwargs):
        self.connectivity = connectivity
        self.cortical_surface = cortical_surface
        self.subcortical_surface = subcortical_surface
        self.cortical_region_mapping = cortical_region_mapping
        self.subcortical_region_mapping = subcortical_region_mapping
        self.volume_mapping = vm
        self.t1_background = t1
        self.sensorsSEEG = OrderedDict()
        self.sensorsEEG = OrderedDict()
        self.sensorsMEG = OrderedDict()
        for s_type in SensorTypes:
            self.set_sensors(kwargs.get("sensors" + s_type.value),
                             s_type=s_type)
        if len(name) == 0:
            self.name = 'Head' + str(self.number_of_regions)
        else:
            self.name = name

    @property
    def number_of_regions(self):
        return self.connectivity.number_of_regions

    def filter_regions(self, filter_arr):
        return self.connectivity.region_labels[filter_arr]

    def __repr__(self):
        d = {
            "1. name":
            self.name,
            "2. connectivity":
            self.connectivity,
            "3. cortical region mapping":
            reg_dict(self.cortical_region_mapping,
                     self.connectivity.region_labels),
            "4. subcortical region mapping":
            reg_dict(self.subcortical_region_mapping,
                     self.connectivity.region_labels),
            "5. VM":
            reg_dict(self.volume_mapping, self.connectivity.region_labels),
            "6. cortical surface":
            self.cortical_surface,
            "7. subcortical surface":
            self.cortical_surface,
            "8. T1":
            self.t1_background,
            "9. SEEG":
            self.sensorsSEEG,
            "10. EEG":
            self.sensorsEEG,
            "11. MEG":
            self.sensorsMEG
        }
        return formal_repr(self, sort_dict(d))

    def __str__(self):
        return self.__repr__()

    def get_sensors(self, s_type=SensorTypes.TYPE_SEEG):
        if np.in1d(s_type, [stype for stype in SensorTypes]):
            return getattr(self, "sensors" + s_type.value)
        else:
            raise_value_error("Invalid input sensor type " + str(s_type))

    def set_sensors(self,
                    input_sensors,
                    s_type=SensorTypes.TYPE_SEEG,
                    reset=False):
        if input_sensors is None:
            return
        sensors = self.get_sensors(s_type)
        if reset is False or len(sensors) == 0:
            sensors = OrderedDict()
        for s_name, s in input_sensors.items():
            if isinstance(s, Sensors) and (s.s_type == s_type):
                if s.gain_matrix is None or s.gain_matrix.shape != (
                        s.number_of_sensors, self.number_of_regions):
                    self.logger.warning(
                        "No correctly sized gain matrix found in sensors!")
                sensors[s_name] = s
            else:
                if s is not None:
                    raise_value_error(
                        "Input sensors:\n" + str(s) +
                        "\nis not a valid Sensors object of type " +
                        str(s_type) + "!")
        setattr(self, "sensors" + s_type.value, sensors)

    def get_sensors_by_name(self, name, s_type=SensorTypes.TYPE_SEEG):
        sensors = self.get_sensors(s_type)
        if sensors is None:
            return sensors
        else:
            out_sensors = OrderedDict()
            for s_name, s in sensors.items():
                if s_name.lower().find(name.lower()) >= 0:
                    out_sensors[s.name] = s
            if len(out_sensors) == 0:
                return None
            elif len(out_sensors) == 1:
                return out_sensors.values()[0]
            else:
                return out_sensors

    def sensors_name_to_id(self, name, s_type=SensorTypes.TYPE_SEEG):
        sensors = self.get_sensors(s_type)
        if sensors is None:
            return None
        else:
            out_sensor_id = None
            for sensor_id, (s_name, s) in enumerate(sensors.items()):
                if s_name.lower() == name.lower():
                    return sensor_id
            return out_sensor_id

    def get_sensors_by_index(self, s_type=SensorTypes.TYPE_SEEG, sensor_ids=0):
        sensors = self.get_sensors(s_type)
        if sensors is None:
            return sensors
        else:
            sensors = sensors.values()
            out_sensors = []
            sensors = ensure_list(sensors)
            for iS, s in enumerate(sensors):
                if np.in1d(iS, sensor_ids):
                    out_sensors.append(sensors[iS])
            if len(out_sensors) == 0:
                return None
            elif len(out_sensors) == 1:
                return out_sensors[0]
            else:
                return out_sensors
Esempio n. 28
0
class StanInterface(object):
    __metaclass__ = ABCMeta

    logger = initialize_logger(__name__)

    def __init__(self,
                 model_name="",
                 model=None,
                 model_code=None,
                 model_dir="",
                 model_code_path="",
                 model_data_path="",
                 fitmethod="sampling",
                 config=None):
        self.fitmethod = fitmethod
        self.model_name = model_name
        self.model = model
        self.config = config or Config()
        if not os.path.isdir(model_dir):
            model_dir = config.out.FOLDER_RES
        if not (os.path.isdir(model_dir)):
            os.mkdir(model_dir)
        self.model_path = os.path.join(model_dir, self.model_name)
        self.model_code = model_code
        if os.path.isfile(model_code_path):
            self.model_code_path = model_code_path
        else:
            self.model_code_path = self.model_path + ".stan"
        if model_data_path == "":
            self.model_data_path = os.path.join(model_dir, "ModelData.h5")
        self.compilation_time = 0.0

    @abstractmethod
    def compile_stan_model(self, save_model=True, **kwargs):
        pass

    @abstractmethod
    def set_model_from_file(self, **kwargs):
        pass

    @abstractmethod
    def fit(self, model_data, **kwargs):
        pass

    def write_model_data_to_file(self, model_data, reset_path=False, **kwargs):
        model_data_path = kwargs.get("model_data_path", self.model_data_path)
        if reset_path:
            self.model_data_path = model_data_path
        extension = model_data_path.split(".", -1)[-1]
        if isequal_string(extension, "npy"):
            np.save(model_data_path, model_data)
        elif isequal_string(extension, "mat"):
            savemat(model_data_path, model_data)
        elif isequal_string(extension, "pkl"):
            with open(model_data_path, 'wb') as f:
                pickle.dump(model_data, f)
        elif isequal_string(extension, "R"):
            rdump(model_data_path, model_data)
        else:
            H5Writer().write_dictionary(
                model_data,
                os.path.join(os.path.dirname(model_data_path),
                             os.path.basename(model_data_path)))

    def load_model_data_from_file(self, reset_path=False, **kwargs):
        model_data_path = kwargs.get("model_data_path", self.model_data_path)
        if reset_path:
            self.model_data_path = model_data_path
        extension = model_data_path.split(".", -1)[-1]
        if isequal_string(extension, "R"):
            model_data = rload(model_data_path)
        elif isequal_string(extension, "npy"):
            model_data = np.load(model_data_path).item()
        elif isequal_string(extension, "mat"):
            model_data = loadmat(model_data_path)
        elif isequal_string(extension, "pkl"):
            with open(model_data_path, 'wb') as f:
                model_data = pickle.load(f)
        elif isequal_string(extension, "h5"):
            model_data = H5Reader().read_dictionary(model_data_path)
        else:
            raise_not_implemented_error(
                "model_data file (" + model_data_path +
                ") that are not one of (.R, .npy, .mat, .pkl) cannot be read!")
        return model_data

    def set_model_data(self, debug=0, simulate=0, **kwargs):
        self.model_data_path = kwargs.get("model_data_path",
                                          self.model_data_path)
        model_data = kwargs.pop("model_data", None)
        if not (isinstance(model_data, dict)):
            model_data = self.load_model_data_from_file(self.model_data_path)
        for key, val in model_data.items():
            if isinstance(val, basestring):
                del model_data[key]
        # -1 for no debugging at all
        # 0 for printing only scalar parameters
        # 1 for printing scalar and vector parameters
        # 2 for printing all (scalar, vector and matrix) parameters
        model_data["DEBUG"] = debug
        # > 0 for simulating without using the input observation data:
        model_data["SIMULATE"] = simulate
        model_data = sort_dict(model_data)
        return model_data

    def set_or_compile_model(self, **kwargs):
        try:
            self.set_model_from_file(**kwargs)
        except:
            self.logger.info("Trying to compile model from file: " +
                             str(self.model_code_path) + str("!"))
            self.compile_stan_model(save_model=kwargs.get("save_model", True),
                                    **kwargs)
        copyfile(
            self.model_code_path,
            os.path.join(os.path.dirname(self.model_path),
                         os.path.basename(self.model_code_path)))

    def read_output_samples(self, output_filepath, **kwargs):
        samples = ensure_list(
            parse_csv(output_filepath.replace(".csv", "*"),
                      merge=kwargs.pop("merge_outputs", False)))
        if len(samples) == 1:
            return samples[0]
        return samples

    def compute_estimates_from_samples(self, samples):
        ests = []
        for chain_or_run_samples in ensure_list(samples):
            est = {}
            for pkey, pval in chain_or_run_samples.items():
                try:
                    est[pkey +
                        "_low"], est[pkey], est[pkey + "_std"] = describe(
                            chain_or_run_samples[pkey])[1:4]
                    est[pkey + "_high"] = est[pkey + "_low"][1]
                    est[pkey + "_low"] = est[pkey + "_low"][0]
                    est[pkey + "_std"] = np.sqrt(est[pkey + "_std"])
                    for skey in [
                            pkey, pkey + "_low", pkey + "_high", pkey + "_std"
                    ]:
                        est[skey] = np.squeeze(est[skey])
                except:
                    est[pkey] = chain_or_run_samples[pkey]
            ests.append(sort_dict(est))
        if len(ests) == 1:
            return ests[0]
        else:
            return ests

    def compute_information_criteria(self,
                                     samples,
                                     nparams=None,
                                     nsamples=None,
                                     ndata=None,
                                     parameters=[],
                                     skip_samples=0,
                                     merge_chains_or_runs_flag=False,
                                     log_like_str='log_likelihood'):
        """

        :param samples: a dictionary of stan outputs or a list of dictionaries for multiple runs/chains
        :param nparams: number of model parameters, it can be inferred from parameters if None
        :param nsamples: number of samples, it can be inferred from loglikelihood if None
        :param ndata: number of data points, it can be inferred from loglikelihood if None
        :param parameters: a list of parameter names, necessary for dic metric computations and in case nparams is None,
                           as well as for aicc, aic and bic computation
        :param merge_chains_or_runs_flag: logical flag for merging seperate chains/runs, default is True
        :param log_like_str: the name of the log likelihood output of stan, default ''log_likelihood
        :return:
        """

        import sys
        sys.path.insert(0, self.config.generic.MODEL_COMPARISON_PATH)
        from information_criteria.ComputeIC import maxlike, aicc, aic, bic, dic, waic
        from information_criteria.ComputePSIS import psisloo

        # if self.fitmethod.find("opt") >= 0:
        #     warning("No model comparison can be computed for optimization method!")
        #     return None

        samples = ensure_list(samples)
        if merge_chains_or_runs_flag and len(samples) > 1:
            samples = ensure_list(
                merge_samples(samples, skip_samples, flatten=True))
            skip_samples = 0

        results = []
        for sample in samples:
            log_likelihood = -1 * sample[log_like_str][skip_samples:]
            log_lik_shape = log_likelihood.shape
            if len(log_lik_shape) > 1:
                target_shape = log_lik_shape[1:]
            else:
                target_shape = (1, )
            if nsamples is None:
                nsamples = log_lik_shape[0]
            elif nsamples != log_likelihood.shape[0]:
                warning("nsamples (" + str(nsamples) +
                        ") is not equal to likelihood.shape[0] (" +
                        str(log_lik_shape[0]) + ")!")

            log_likelihood = np.reshape(log_likelihood, (log_lik_shape[0], -1))
            if log_likelihood.shape > 1:
                ndata_real = np.maximum(log_likelihood.shape[1], 1)
            else:
                ndata_real = 1
            if ndata is None:
                ndata = ndata_real
            elif ndata != ndata_real:
                warning("ndata (" + str(ndata) +
                        ") is not equal to likelihood.shape[1] (" +
                        str(ndata_real) + ")!")

            result = maxlike(log_likelihood)

            if len(parameters) == 0:
                parameters = [
                    param for param in sample.keys()
                    if param.find("_star") >= 0
                ]
            if len(parameters) > 0:
                nparams_real = 0
                zscore_params = []
                for p in parameters:
                    pval = sample[p][skip_samples:]
                    pzscore = np.array(
                        (pval - np.mean(pval, axis=0)) / np.std(pval, axis=0))
                    if len(pzscore.shape) > 2:
                        pzscore = np.reshape(pzscore, (pzscore.shape[0], -1))
                    zscore_params.append(pzscore)
                    if len(pzscore.shape) > 1:
                        nparams_real += np.maximum(pzscore.shape[1], 1)
                    else:
                        nparams_real += 1
                if nparams is None:
                    nparams = nparams_real
                elif nparams != nparams_real:
                    warning(
                        "nparams (" + str(nparams) +
                        ") is not equal to number of parameters included in the dic computation ("
                        + str(nparams_real) + ")!")
                # TODO: find out how to reduce dic to 1 value, from 1 value per parameter. mean(.) for the moment:
                result['dic'] = np.mean(dic(log_likelihood, zscore_params))
            else:
                warning(
                    "Parameters' names' list is empty and we found no _star parameters! No computation of dic!"
                )

            if nparams is not None:
                result['aicc'] = aicc(log_likelihood, nparams, ndata)
                result['aic'] = aic(log_likelihood, nparams)
                result['bic'] = bic(log_likelihood, nparams, ndata)
            else:
                warning(
                    "Unknown number of parameters! No computation of aic, aaic, bic!"
                )

            result.update(waic(log_likelihood))

            if nsamples > 1:
                result.update(psisloo(log_likelihood))
                result["loos"] = np.reshape(result["loos"], target_shape)
                result["ks"] = np.reshape(result["ks"], target_shape)
            else:
                result.pop('p_waic', None)

            for metric, value in result.items():
                result[metric] = value * np.ones(1, )

            results.append(result)

        if len(results) == 1:
            return results[0]
        else:
            return list_of_dicts_to_dicts_of_ndarrays(results)

    def compare_models(self,
                       samples,
                       nparams=None,
                       nsamples=None,
                       ndata=None,
                       parameters=[],
                       skip_samples=0,
                       merge_chains_or_runs_flag=False,
                       log_like_str='log_likelihood'):
        """

        :param samples: a dictionary of model's names and samples
        :param nparams: a number or list of numbers of parameters,
                       it can be inferred from parameters list or from _star parameters
        :param nsamples: a number or lists of numbers of samples, it can be inferred from loglikelihood if None
        :param ndata: a number or lists of numbers of data point, it can be inferred from loglikelihood if None
        :param parameters: a list (or list of lists) of parameter names,
                          it can be inferred from parameters list or from _star parameters
        :param merge_chains_or_runs_flag: logical flag for merging seperate chains/runs, default is True
        :param log_like_str: the name of the log likelihood output of stan, default ''log_likelihood
        :return:
        """
        def check_number_of_inputs(nmodels, input, input_str):
            input = ensure_list(input)
            ninput = len(input)
            if ninput != nmodels:
                if ninput == 1:
                    input *= nmodels
                else:
                    raise_value_error(
                        "The size of input " + input_str + " (" + str(ninput) +
                        ") is neither equal to the number of models (" +
                        str(nmodels) + ") nor equal to 1!")
            return input

        nmodels = len(samples)

        n_parameters = parameters
        if n_parameters > 0:
            if isinstance(parameters[0], (list, tuple)):
                parameters = check_number_of_inputs(nmodels, parameters,
                                                    "number of parameters")
            else:
                parameters = nmodels * [parameters]
        nparams = check_number_of_inputs(nmodels, nparams,
                                         "number of parameters")
        nsamples = check_number_of_inputs(nmodels, nsamples,
                                          "number of samples")
        ndata = check_number_of_inputs(nmodels, ndata, "number of data points")
        skip_samples = check_number_of_inputs(nmodels, skip_samples,
                                              "skip_samples")
        log_like_str = check_number_of_inputs(nmodels, log_like_str,
                                              "log_like_str")

        results = {}

        for i_model, (model_name, model_samples) in enumerate(samples.items()):
            results[model_name] = \
               self.compute_information_criteria(model_samples, nparams[i_model], nsamples[i_model], ndata[i_model],
                                                 parameters[i_model], skip_samples[i_model], merge_chains_or_runs_flag,
                                                 log_like_str[i_model])

        # Return result into a dictionary with metrics at the upper level and models at the lower one
        return switch_levels_of_dicts_of_dicts(results)
Esempio n. 29
0
class ProbabilityDistribution(object):
    __metaclass__ = ABCMeta

    logger = initialize_logger(__name__)

    type = ""
    n_params = 0.0
    __p_shape = ()
    __p_size = 0
    constraint_string = ""
    __mean = None
    __median = None
    __mode = None
    __var = None
    __std = None
    __skew = None
    __kurt = None
    scipy_name = ""
    numpy_name = ""

    @property
    def mean(self):
        return self.__mean

    @property
    def median(self):
        return self.__median

    @property
    def mode(self):
        return self.__mode

    @property
    def var(self):
        return self.__var

    @property
    def std(self):
        return self.__std

    @property
    def skew(self):
        return self.__skew

    @property
    def kurt(self):
        return self.__kurt

    @property
    def p_shape(self):
        return self.__p_shape

    @property
    def p_size(self):
        return self.__p_size

    @abstractmethod
    def __init__(self):
        pass

    def _repr(self, d=OrderedDict()):
        for ikey, key in enumerate([
                "type", "n_params", "shape", "mean", "median", "mode", "var",
                "std", "var", "kurt", "scipy_name", "numpy_name"
        ]):
            d.update({key: getattr(self, key)})
        d.update({"pdf_params": str(self.pdf_params())})
        d.update({"constraint": str(self.constraint())})
        return d

    def __repr__(self, d=OrderedDict()):
        return formal_repr(self, self._repr())

    def __str__(self):
        return self.__repr__()

    def __update_params__(self,
                          loc=0.0,
                          scale=1.0,
                          use="scipy",
                          check_constraint=True,
                          **params):
        if len(params) == 0:
            params = self.pdf_params()
        self.__set_params__(**params)
        # params = self.__squeeze_parameters__(update=False, loc=loc, scale=scale, use=use)
        # self.__set_params__(**params)
        self.__p_shape = self.__update_shape__(loc, scale)
        self.__p_size = shape_to_size(self.p_shape)
        self.n_params = len(self.pdf_params())
        if check_constraint and not (self.__check_constraint__()):
            raise_value_error("Constraint for " + self.type +
                              " distribution " + self.constraint_string +
                              "\nwith parameters " + str(self.pdf_params()) +
                              " is not satisfied!")
        self.__mean = self._calc_mean(loc, scale, use)
        self.__median = self._calc_median(loc, scale, use)
        self.__mode = self._calc_mode(loc, scale)
        self.__var = self._calc_var(loc, scale, use)
        self.__std = self._calc_std(loc, scale, use)
        self.__skew = self._calc_skew()
        self.__kurt = self._calc_kurt()

    def __set_params__(self, **params):
        for p_key, p_val in params.iteritems():
            setattr(self, p_key, p_val)

    def __check_constraint__(self):
        return np.all(self.constraint() > 0)

    def __update_shape__(self, loc=0.0, scale=1.0):
        try:
            shape = loc * scale * np.ones(self.p_shape)
            for p in self.pdf_params().values():
                shape *= p
            return self.p_shape
        except:
            return self.__calc_shape__(loc, scale)

    def __calc_shape__(self, loc=0.0, scale=1.0, params=None):
        if not (isinstance(params, dict)):
            params = self.pdf_params()
            p_shape = self.p_shape
        else:
            p_shape = ()
        psum = np.zeros(p_shape) * loc * scale
        for pval in params.values():
            psum = psum + np.array(pval, dtype='f')
        return psum.shape

    def __shape_parameters__(self,
                             shape=None,
                             loc=0.0,
                             scale=1.0,
                             use="scipy"):
        if isinstance(shape, tuple):
            self.__p_shape = shape
        i1 = np.ones((np.ones(self.p_shape) * loc * scale).shape)
        for p_key in self.pdf_params().keys():
            try:
                setattr(self, p_key, getattr(self, p_key) * i1)
            except:
                try:
                    setattr(self, p_key,
                            np.reshape(getattr(self, p_key), self.p_shape))
                except:
                    raise_value_error(
                        "Neither propagation nor reshaping worked for distribution parameter "
                        + p_key + " reshaping\nto shape " + str(self.p_shape) +
                        "\nfrom shape " + str(getattr(self, p_key)) + "!")
        self.__update_params__(loc, scale, use)

    def __squeeze_parameters__(self,
                               update=False,
                               loc=0.0,
                               scale=1.0,
                               use="scipy"):
        params = self.pdf_params()
        for p_key, p_val in params.iteritems():
            params.update({p_key: squeeze_array_to_scalar(p_val)})
        if update:
            self.__set_params__(**params)
            self.__update_params__(loc, scale, use)
        return params

    @abstractmethod
    def pdf_params(self):
        pass

    @abstractmethod
    def update_params(self, loc=0.0, scale=1.0, use="scipy", **params):
        pass

    @abstractmethod
    def _scipy(self, loc=0.0, scale=1.0):
        pass

    def _scipy_method(self, method, loc=0.0, scale=1.0, *args, **kwargs):
        return getattr(self._scipy(loc, scale), method)(*args, **kwargs)

    @abstractmethod
    def _numpy(self, loc=0.0, scale=1.0, size=()):
        pass

    @abstractmethod
    def constraint(self):
        pass

    @abstractmethod
    def calc_mean_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_median_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_mode_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_var_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_std_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_skew_manual(self, loc=0.0, scale=1.0):
        pass

    @abstractmethod
    def calc_kurt_manual(self, loc=0.0, scale=1.0):
        pass

    def _calc_mean(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).stats(moments="m")
        else:
            return self.calc_mean_manual(loc, scale)

    def _calc_median(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).median()
        else:
            return self.calc_median_manual(loc, scale)

    def _calc_mode(self, loc=0.0, scale=1.0, use="manual"):
        # TODO: find a more explicit solution but without printing so many warnings!
        # if isequal_string(use, "scipy"):
        #     self.logger.warning("No scipy calculation for mode! Switching to manual -following wikipedia- calculation!")
        return self.calc_mode_manual(loc, scale)

    def _calc_var(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).var()
        else:
            return self.calc_var_manual(loc, scale)

    def _calc_std(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).std()
        else:
            return self.calc_std_manual(loc, scale)

    def _calc_skew(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).stats(moments="s")
        else:
            return self.calc_skew_manual(loc, scale)

    def _calc_kurt(self, loc=0.0, scale=1.0, use="scipy"):
        if isequal_string(use, "scipy"):
            return self._scipy(loc, scale).stats(moments="k")
        else:
            return self.calc_kurt_manual(loc, scale)
Esempio n. 30
0
class ProbabilisticSampler(SamplerBase):
    logger = initialize_logger(__name__)

    def __init__(self,
                 n_samples=10,
                 sampling_module="scipy",
                 random_seed=None):
        super(ProbabilisticSampler, self).__init__(n_samples)
        self.random_seed = random_seed
        self.sampling_module = sampling_module.lower()

    def __repr__(self):

        d = {
            "01. Sampling module": self.sampling_module,
            "02. Sampler": self.sampler,
            "03. Number of samples": self.n_samples,
            "04. Samples' p_shape": self.shape,
            "05. Random seed": self.random_seed,
        }
        return formal_repr(
            self, d) + "\n06. Resulting statistics: " + dict_str(self.stats)

    def __str__(self):
        return self.__repr__()

    def _truncated_distribution_sampling(self, trunc_limits, size):
        # Following: https://stackoverflow.com/questions/25141250/
        # how-to-truncate-a-numpy-scipy-exponential-distribution-in-an-efficient-way
        # TODO: to have distributions parameters valid for the truncated distributions instead for the original one
        # pystan might be needed for that...
        rnd_cdf = nr.uniform(
            self.sampler.cdf(x=trunc_limits.get("low", -np.inf)),
            self.sampler.cdf(x=trunc_limits.get("high", np.inf)),
            size=size)
        return self.sampler.ppf(q=rnd_cdf)

    def sample(self, parameter=(), loc=0.0, scale=1.0, **kwargs):
        nr.seed(self.random_seed)
        if isinstance(parameter, (ProbabilisticParameterBase,
                                  TransformedProbabilisticParameterBase)):
            parameter_shape = parameter.p_shape
            low = parameter.low
            high = parameter.high
            prob_distr = parameter
            loc = parameter.loc
            scale = parameter.scale
        else:
            parameter_shape = kwargs.pop("shape", (1, ))
            low = kwargs.pop("low", -CalculusConfig.MAX_SINGLE_VALUE)
            high = kwargs.pop("high", CalculusConfig.MAX_SINGLE_VALUE)
            prob_distr = kwargs.pop("probability_distribution", "uniform")
        low, high = self.check_for_infinite_bounds(low, high)
        low, high, n_outputs, parameter_shape = self.check_size(
            low, high, parameter_shape)
        self.adjust_shape(parameter_shape)
        out_shape = tuple([self.n_samples] + list(self.shape)[:-1])
        if np.any(low > -CalculusConfig.MAX_SINGLE_VALUE) or np.any(
                high < CalculusConfig.MAX_SINGLE_VALUE):
            if not (isequal_string(self.sampling_module, "scipy")):
                self.logger.warning(
                    "Switching to scipy for truncated distributions' sampling!"
                )
            self.sampling_module = "scipy"
            if isinstance(prob_distr, basestring):
                self.sampler = getattr(ss, prob_distr)(*parameter, **kwargs)
                samples = self._truncated_distribution_sampling(
                    {
                        "low": low,
                        "high": high
                    }, out_shape) * scale + loc
            elif isinstance(prob_distr,
                            (ProbabilisticParameterBase,
                             TransformedProbabilisticParameterBase)):
                self.sampler = prob_distr.scipy()
                samples = self._truncated_distribution_sampling(
                    {
                        "low": low,
                        "high": high
                    }, out_shape)
        elif self.sampling_module.find("scipy") >= 0:
            if isinstance(prob_distr, basestring):
                self.sampler = getattr(ss, prob_distr)(*parameter, **kwargs)
                samples = self.sampler.rvs(size=out_shape) * scale + loc
            elif isinstance(prob_distr, ProbabilisticParameterBase):
                self.sampler = prob_distr._scipy(**kwargs)
                samples = self.sampler.rvs(size=out_shape)
        elif self.sampling_module.find("numpy") >= 0:
            if isinstance(prob_distr, basestring):
                self.sampler = lambda size: getattr(nr, prob_distr)(
                    *parameter, size=size, **kwargs)
                samples = self.sampler(out_shape) * scale + loc
            elif isinstance(prob_distr,
                            (ProbabilisticParameterBase,
                             TransformedProbabilisticParameterBase)):
                self.sampler = lambda size: prob_distr.numpy(size=size)
                samples = self.sampler(out_shape)
        return samples.T