예제 #1
0
def main_sensitivity_analysis(config=Config()):
    # -------------------------------Reading data-----------------------------------
    reader = Reader()
    writer = H5Writer()
    head = reader.read_head(config.input.HEAD)
    # --------------------------Hypothesis definition-----------------------------------
    n_samples = 100
    # Manual definition of hypothesis...:
    x0_indices = [20]
    x0_values = [0.9]
    e_indices = [70]
    e_values = [0.9]
    disease_indices = x0_indices + e_indices
    n_disease = len(disease_indices)
    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    all_regions_indices = np.array(range(head.connectivity.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)
    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
    hyp_x0_E = HypothesisBuilder(
        head.connectivity.number_of_regions).set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()
    # Now running the sensitivity analysis:
    logger.info("running sensitivity analysis PSE LSA...")
    for m in METHODS:
        try:
            model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis, sa_results, pse_results = \
                sensitivity_analysis_pse_from_hypothesis(hyp_x0_E,
                                                         head.connectivity.normalized_weights,
                                                         head.connectivity.region_labels,
                                                         n_samples, method=m, param_range=0.1,
                                                         global_coupling=[{"indices": all_regions_indices,
                                                                           "low": 0.0, "high": 2 * K_DEF}],
                                                         healthy_regions_parameters=[
                                                             {"name": "x0_values", "indices": healthy_indices}],
                                                         config=config, save_services=True)
            Plotter(config).plot_lsa(
                lsa_hypothesis,
                model_configuration,
                lsa_service.weighted_eigenvector_sum,
                lsa_service.eigen_vectors_number,
                region_labels=head.connectivity.region_labels,
                pse_results=pse_results,
                title=m + "_PSE_LSA_overview_" + lsa_hypothesis.name,
                lsa_service=lsa_service)
            # , show_flag=True, save_flag=False
            result_file = os.path.join(
                config.out.FOLDER_RES,
                m + "_PSE_LSA_results_" + lsa_hypothesis.name + ".h5")
            writer.write_dictionary(pse_results, result_file)
            result_file = os.path.join(
                config.out.FOLDER_RES,
                m + "_SA_LSA_results_" + lsa_hypothesis.name + ".h5")
            writer.write_dictionary(sa_results, result_file)
        except:
            logger.warning("Method " + m + " failed!")
예제 #2
0
def main_pse(config=Config()):
    # -------------------------------Reading data-----------------------------------
    reader = Reader()
    writer = H5Writer()
    head = reader.read_head(config.input.HEAD)
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    # --------------------------Manual Hypothesis definition-----------------------------------
    n_samples = 100
    x0_indices = [20]
    x0_values = [0.9]
    e_indices = [70]
    e_values = [0.9]
    disease_indices = x0_indices + e_indices
    n_disease = len(disease_indices)

    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)
    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
    hyp_x0_E = HypothesisBuilder(
        head.connectivity.number_of_regions).set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()

    # Now running the parameter search analysis:
    logger.info("running PSE LSA...")
    model_config, lsa_service, lsa_hypothesis, pse_res = pse_from_hypothesis(
        hyp_x0_E,
        head.connectivity.normalized_weights,
        head.connectivity.region_labels,
        n_samples,
        param_range=0.1,
        global_coupling=[{
            "indices": all_regions_indices
        }],
        healthy_regions_parameters=[{
            "name": "x0_values",
            "indices": healthy_indices
        }],
        save_services=True)[:4]

    logger.info("Plotting LSA...")
    Plotter(config).plot_lsa(lsa_hypothesis,
                             model_config,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             region_labels=head.connectivity.region_labels,
                             pse_results=pse_res,
                             lsa_service=lsa_service)

    logger.info("Saving LSA results ...")
    writer.write_dictionary(
        pse_res,
        os.path.join(config.out.FOLDER_RES,
                     lsa_hypothesis.name + "_PSE_LSA_results.h5"))
예제 #3
0
def from_head_to_hypotheses(ep_name, config, plot_head=False):
    # -------------------------------Reading model_data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    if plot_head:
        plotter = Plotter(config)
        plotter.plot_head(head)
    # --------------------------Hypothesis definition-----------------------------------
    # # Manual definition of hypothesis...:
    # x0_indices = [20]
    # x0_values = [0.9]
    # e_indices = [70]
    # e_values = [0.9]
    # disease_values = x0_values + e_values
    # disease_indices = x0_indices + e_indices
    # ...or reading a custom file:
    # FOLDER_RES = os.path.join(data_folder, ep_name)

    hypo_builder = HypothesisBuilder(head.connectivity.number_of_regions,
                                     config=config).set_normalize(0.95)

    # This is an example of Excitability Hypothesis:
    hyp_x0 = hypo_builder.build_hypothesis_from_file(ep_name)

    # This is an example of Epileptogenicity Hypothesis:
    hyp_E = hypo_builder.build_hypothesis_from_file(
        ep_name, e_indices=hyp_x0.x0_indices)

    # This is an example of Mixed Hypothesis:
    x0_indices = [hyp_x0.x0_indices[-1]]
    x0_values = [hyp_x0.x0_values[-1]]
    e_indices = hyp_x0.x0_indices[0:-1].tolist()
    e_values = hyp_x0.x0_values[0:-1].tolist()
    hyp_x0_E = hypo_builder.set_x0_hypothesis(x0_indices, x0_values). \
                                set_e_hypothesis(e_indices, e_values).build_hypothesis()

    hypos = (hyp_x0, hyp_E, hyp_x0_E)

    return head, hypos
예제 #4
0
 def fit(self,
         debug=0,
         simulate=0,
         return_output=True,
         plot_HMC=True,
         overwrite_output_files=False,
         plot_warmup=1,
         **kwargs):
     num_warmup = kwargs.get("num_warmup", 0)
     # Confirm output files and check if overwriting is necessary
     self.output_filepath, self.diagnostic_filepath, self.summary_filepath, self.command_filepath = \
         self.set_output_files(kwargs.pop("output_filepath", self.output_filepath),
                               kwargs.pop("diagnostic_filepath", self.diagnostic_filepath),
                               kwargs.pop("summary_filepath", self.summary_filepath),
                               kwargs.pop("command_path", self.command_filepath),
                               True, overwrite_output_files)
     self.model_path = kwargs.pop("model_path", self.model_path)
     self.fitmethod = kwargs.pop("fitmethod", self.fitmethod)
     self.fitmethod = kwargs.pop("method", self.fitmethod)
     self.set_options(**kwargs)
     self.command, self.output_filepath, self.diagnostic_filepath = \
         generate_cmdstan_fit_command(self.fitmethod, self.options, self.model_path,
                                      self.set_model_data(debug, simulate, **kwargs),
                                      self.output_filepath, self.diagnostic_filepath)
     self.logger.info("Model fitting with " + self.fitmethod +
                      " method of model: " + self.model_path + "...")
     with open(self.command_filepath, "w") as text_file:
         text_file.write(self.command)
     self.fitting_time = execute_command(self.command.replace("\t", ""),
                                         shell=True)[1]
     self.logger.info(
         str(self.fitting_time) + ' sec required to ' + self.fitmethod +
         "!")
     self.logger.info("Computing stan summary...")
     self.stan_summary()
     if return_output:
         est, samples, summary = self.read_output()
         if plot_HMC and self.fitmethod.find("sampl") >= 0 and \
             isequal_string(self.options.get("algorithm", "None"), "HMC"):
             Plotter(self.config).plot_HMC(
                 samples,
                 skip_samples=kwargs.pop(
                     "skip_samples",
                     num_warmup * (1 - kwargs.get("plot_warmup", True))))
         return est, samples, summary
     else:
         return None, None, None
예제 #5
0
    def fit(self,
            output_filepath=None,
            diagnostic_filepath="",
            summary_filepath=None,
            debug=0,
            simulate=0,
            return_output=True,
            plot_HMC=True,
            **kwargs):
        if output_filepath is None:
            output_filepath = os.path.join(self.config.out.FOLDER_RES,
                                           STAN_OUTPUT_OPTIONS["file"])
        if summary_filepath is None:
            summary_filepath = os.path.join(self.config.out.FOLDER_RES,
                                            "stan_summary.csv")

        self.model_path = kwargs.pop("model_path", self.model_path)
        self.fitmethod = kwargs.pop("fitmethod", self.fitmethod)
        self.fitmethod = kwargs.pop("method", self.fitmethod)
        self.set_options(**kwargs)
        self.command, output_filepath, diagnostic_filepath = \
            generate_cmdstan_fit_command(self.fitmethod, self.options, self.model_path,
                                         self.set_model_data(debug, simulate, **kwargs),
                                         output_filepath, diagnostic_filepath)
        self.logger.info("Model fitting with " + self.fitmethod +
                         " method of model: " + self.model_path + "...")
        self.fitting_time = execute_command(self.command.replace("\t", ""),
                                            shell=True)[1]
        self.logger.info(
            str(self.fitting_time) + ' sec required to ' + self.fitmethod +
            "!")
        self.logger.info("Computing stan summary...")
        summary_filepath = self.stan_summary(output_filepath, summary_filepath)
        if return_output:
            est, samples, summary = self.read_output(
                output_filepath, summary_filepath=summary_filepath, **kwargs)
            if plot_HMC and self.fitmethod.find("sampl") >= 0 and \
                isequal_string(self.options.get("algorithm", "None"), "HMC"):
                Plotter(self.config).plot_HMC(samples,
                                              kwargs.pop("skip_samples", 0))
            return est, samples, summary
        else:
            return None, None, None
예제 #6
0
def from_hypothesis_to_model_config_lsa(hyp,
                                        head,
                                        eigen_vectors_number=None,
                                        weighted_eigenvector_sum=True,
                                        config=Config(),
                                        save_flag=None,
                                        plot_flag=None,
                                        **kwargs):
    logger.info("\n\nRunning hypothesis: " + hyp.name)
    logger.info("\n\nCreating model configuration...")
    if save_flag is None:
        save_flag = config.figures.SAVE_FLAG
    if plot_flag is None:
        plot_flag = config.figures.SHOW_FLAG
    builder = ModelConfigurationBuilder(hyp.number_of_regions, **kwargs)
    if hyp.type == "Epileptogenicity":
        model_configuration = builder.build_model_from_E_hypothesis(
            hyp, head.connectivity.normalized_weights)
    else:
        model_configuration = builder.build_model_from_hypothesis(
            hyp, head.connectivity.normalized_weights)
    logger.info("\n\nRunning LSA...")
    lsa_service = LSAService(eigen_vectors_number=eigen_vectors_number,
                             weighted_eigenvector_sum=weighted_eigenvector_sum)
    lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)
    if save_flag:
        writer = H5Writer()
        path_mc = os.path.join(config.out.FOLDER_RES,
                               hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, path_mc)
        writer.write_hypothesis(
            lsa_hypothesis,
            os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + ".h5"))
    if plot_flag:
        plotter = Plotter(config)
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 "6d",
                                 head.connectivity.region_labels,
                                 special_idx=hyp.regions_disease_indices,
                                 zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")
        plotter.plot_lsa(lsa_hypothesis,
                         model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels,
                         None,
                         lsa_service=lsa_service)
    return model_configuration, lsa_hypothesis, builder, lsa_service
예제 #7
0
# coding=utf-8

import numpy as np

from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.plot.plotter import Plotter
from tvb_epilepsy.service.stochastic_parameter_builder import set_parameter

logger = initialize_logger(__name__)

if __name__ == "__main__":
    plotter = Plotter()
    x0 = set_parameter("x0", optimize_pdf=True, use="manual", x0_lo=0.0, x0_hi=2.0, x0_pdf="lognormal",
                       x0_pdf_params={"skew": 0.0, "mean": 0.5 / 0.05}, x0_mean=0.5, x0_std=0.05)

    axes, fig = plotter.plot_stochastic_parameter(x0, np.arange(-0.01, 2.0, 0.01))

    # Testing for converting from symmetric matrix to two flattened columns and backwards:
    # a = np.array([[11, 12, 13, 14],
    #               [21, 22, 23, 24],
    #               [31, 32, 33, 34],
    #               [41, 42, 43, 44]])
    # b = np.stack([a[np.triu_indices(4, 1)], a.T[np.triu_indices(4, 1)]]).T
    # c = np.ones((4,4))
    # icon = -1
    # for ii in range(4):
    #     for jj in range(ii, 4):
    #         if (ii == jj):
    #             c[ii, jj] = 0
    #         else:
    #             icon += 1
예제 #8
0
def main_vep(config=Config(), sim_type="default", test_write_read=False,
             pse_flag=PSE_FLAG, sa_pse_flag=SA_PSE_FLAG, sim_flag=SIM_FLAG):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    plotter = Plotter(config)
    plotter.plot_head(head)
    if test_write_read:
        writer.write_head(head, os.path.join(config.out.FOLDER_RES, "Head"))
    # --------------------------Hypothesis definition-----------------------------------
    n_samples = 100
    # # Manual definition of hypothesis...:
    # x0_indices = [20]
    # x0_values = [0.9]
    # e_indices = [70]
    # e_values = [0.9]
    # disease_values = x0_values + e_values
    # disease_indices = x0_indices + e_indices
    # ...or reading a custom file:

    hypo_builder = HypothesisBuilder(head.connectivity.number_of_regions, config=config).set_normalize(0.95)

    # This is an example of Epileptogenicity Hypothesis: you give as ep all indices for values > 0
    hyp_E = hypo_builder.build_hypothesis_from_file(EP_NAME, e_indices=[1, 3, 16, 25])
    # print(hyp_E.string_regions_disease(head.connectivity.region_labels))

    # This is an example of Excitability Hypothesis:
    hyp_x0 = hypo_builder.build_hypothesis_from_file(EP_NAME)

    # # This is an example of Mixed Hypothesis set manually by the user:
    # x0_indices = [hyp_x0.x0_indices[-1]]
    # x0_values = [hyp_x0.x0_values[-1]]
    # e_indices = hyp_x0.x0_indices[0:-1].tolist()
    # e_values = hyp_x0.x0_values[0:-1].tolist()
    # hyp_x0_E = hypo_builder.set_x0_hypothesis(x0_indices, x0_values). \
    #                             set_e_hypothesis(e_indices, e_values).build_hypothesis()

    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis set from file:
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, hyp_E.x0_indices + hyp_E.e_indices).tolist()
    hyp_x0_E = hypo_builder.build_hypothesis_from_file(EP_NAME, e_indices=[16, 25])

    hypotheses = (hyp_x0_E, hyp_x0, hyp_E)

    # --------------------------Simulation preparations-----------------------------------
    # If you choose model...
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # We don't want any time delays for the moment
    head.connectivity.tract_lengths *= config.simulator.USE_TIME_DELAYS_FLAG
    sim_builder = SimulatorBuilder(config.simulator.MODE)
    if isequal_string(sim_type, "realistic"):
        sim_settings = sim_builder.set_model_name("EpileptorDPrealistic").set_simulated_period(50000).build_sim_settings()
        sim_settings.noise_type = COLORED_NOISE
        sim_settings.noise_ntau = 10
    elif isequal_string(sim_type, "fitting"):
        sim_settings = sim_builder.set_model_name("EpileptorDP2D").build_sim_settings()
        sim_settings.noise_intensity = 1e-3
    elif isequal_string(sim_type, "paper"):
        sim_builder.set_model_name("Epileptor")
        sim_settings = sim_builder.build_sim_settings()
    else:
        sim_settings = sim_builder.build_sim_settings()

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("\n\nRunning hypothesis: " + hyp.name)
        logger.info("\n\nCreating model configuration...")
        builder = ModelConfigurationBuilder(hyp.number_of_regions)

        mcs_file = os.path.join(config.out.FOLDER_RES, hyp.name + "_model_config_service.h5")
        writer.write_model_configuration_builder(builder, mcs_file)
        if test_write_read:
            logger.info("Written and read model configuration services are identical?: " +
                        str(assert_equal_objects(builder, reader.read_model_configuration_builder(mcs_file),
                                                 logger=logger)))

        if hyp.type == "Epileptogenicity":
            model_configuration = builder.build_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = builder.build_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        mc_path = os.path.join(config.out.FOLDER_RES, hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, mc_path)
        if test_write_read:
            logger.info("Written and read model configuration are identical?: " +
                        str(assert_equal_objects(model_configuration, reader.read_model_configuration(mc_path),
                                                 logger=logger)))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration, "6d", head.connectivity.region_labels,
                                 special_idx=hyp_x0.x0_indices + hyp_E.e_indices, zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=None, weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")
        lsa_config_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5")
        writer.write_hypothesis(lsa_hypothesis, lsa_path)
        writer.write_lsa_service(lsa_service, lsa_config_path)
        if test_write_read:
            logger.info("Written and read LSA services are identical?: " +
                        str(assert_equal_objects(lsa_service, reader.read_lsa_service(lsa_config_path), logger=logger)))
            logger.info("Written and read LSA hypotheses are identical (no input check)?: " +
                        str(assert_equal_objects(lsa_hypothesis, reader.read_hypothesis(lsa_path), logger=logger)))
        plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number, head.connectivity.region_labels, None)

        if pse_flag:
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(lsa_hypothesis,
                                                  head.connectivity.normalized_weights,
                                                  head.connectivity.region_labels,
                                                  n_samples, param_range=0.1,
                                                  global_coupling=[{"indices": all_regions_indices}],
                                                  healthy_regions_parameters=[
                                                      {"name": "x0_values", "indices": healthy_indices}],
                                                  model_configuration_builder=builder,
                                                  lsa_service=lsa_service, logger=logger, save_flag=True)[0]
            plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number, head.connectivity.region_labels, pse_results)

            pse_lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5")
            writer.write_dictionary(pse_results, pse_lsa_path)
            if test_write_read:
                logger.info("Written and read sensitivity analysis parameter search results are identical?: " +
                            str(assert_equal_objects(pse_results, reader.read_dictionary(pse_lsa_path), logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(lsa_hypothesis,
                                                             head.connectivity.normalized_weights,
                                                             head.connectivity.region_labels,
                                                             n_samples, method="sobol", param_range=0.1,
                                                             global_coupling=[{"indices": all_regions_indices,
                                                                               "bounds": [0.0, 2 *
                                                                                          builder.K_unscaled[
                                                                                              0]]}],
                                                             healthy_regions_parameters=[
                                                                 {"name": "x0_values", "indices": healthy_indices}],
                                                             model_configuration_builder=builder,
                                                             lsa_service=lsa_service, config=config)
            plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number, head.connectivity.region_labels, pse_sa_results,
                             title="SA PSE Hypothesis Overview")

            sa_pse_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            sa_lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5")
            writer.write_dictionary(pse_sa_results, sa_pse_path)
            writer.write_dictionary(sa_results, sa_lsa_path)
            if test_write_read:
                logger.info("Written and read sensitivity analysis results are identical?: " +
                            str(assert_equal_objects(sa_results, reader.read_dictionary(sa_lsa_path), logger=logger)))
                logger.info("Written and read sensitivity analysis parameter search results are identical?: " +
                            str(assert_equal_objects(pse_sa_results, reader.read_dictionary(sa_pse_path),
                                                     logger=logger)))

        if sim_flag:
            # ------------------------------Simulation--------------------------------------
            logger.info("\n\nConfiguring simulation from model_configuration...")
            model = sim_builder.generate_model(model_configuration)
            if isequal_string(sim_type, "realistic"):
                model.tau0 = 30000.0
                model.tau1 = 0.2
                model.slope = 0.25
            elif isequal_string(sim_type, "fitting"):
                model.tau0 = 10.0
                model.tau1 = 0.5
            sim, sim_settings, model = sim_builder.build_simulator_TVB_from_model_sim_settings(model_configuration,
                                                                                 head.connectivity, model, sim_settings)

            # Integrator and initial conditions initialization.
            # By default initial condition is set right on the equilibrium point.
            writer.write_generic(sim.model, config.out.FOLDER_RES, lsa_hypothesis.name + "_sim_model.h5")
            logger.info("\n\nSimulating...")
            ttavg, tavg_data, status = sim.launch_simulation(report_every_n_monitor_steps=100)

            sim_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5")
            writer.write_simulation_settings(sim.simulation_settings, sim_path)
            if test_write_read:
                # TODO: find out why it cannot set monitor expressions
                logger.info("Written and read simulation settings are identical?: " +
                            str(assert_equal_objects(sim.simulation_settings,
                                                     reader.read_simulation_settings(sim_path), logger=logger)))
            if not status:
                logger.warning("\nSimulation failed!")
            else:
                time = np.array(ttavg, dtype='float32')
                output_sampling_time = np.mean(np.diff(time))
                tavg_data = tavg_data[:, :, :, 0]
                logger.info("\n\nSimulated signal return shape: %s", tavg_data.shape)
                logger.info("Time: %s - %s", time[0], time[-1])
                logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max())
                # Variables of interest in a dictionary:
                res_ts = prepare_vois_ts_dict(sim_settings.monitor_expressions, tavg_data)
                res_ts['time'] = time
                res_ts['time_units'] = 'msec'
                res_ts = compute_seeg_and_write_ts_h5_file(config.out.FOLDER_RES, lsa_hypothesis.name + "_ts.h5",
                                                                 sim.model, res_ts, output_sampling_time,
                                                                 sim_settings.simulated_period,
                                                                 hpf_flag=True, hpf_low=10.0, hpf_high=512.0,
                                                                 sensors_list=head.sensorsSEEG)
                # Plot results
                if model._ui_name is "EpileptorDP2D":
                    spectral_raster_plot = False
                    trajectories_plot = True
                else:
                    spectral_raster_plot = "lfp"
                    trajectories_plot = False
                #TODO: plotting fails when spectral_raster_plot="lfp". Denis will fix this
                plotter.plot_sim_results(sim.model, lsa_hypothesis.lsa_propagation_indices, res_ts,
                                         head.sensorsSEEG, hpf_flag=True, trajectories_plot=trajectories_plot,
                                         spectral_raster_plot=False, log_scale=True)
class ODEModelInversionService(ModelInversionService):

    active_seeg_th = None
    bipolar = BIPOLAR
    manual_selection = []
    auto_selection = "power"  # auto_selection=False,
    power_th = None
    gain_matrix_th = None
    normalization = "baseline-amplitude"
    decim_ratio = 1
    cut_target_data_tails = [0, 0]
    n_electrodes = 10
    sensors_per_electrode = 1
    group_electrodes = True
    plotter = Plotter()

    def __init__(self):
        super(ODEModelInversionService, self).__init__()
        self.ts_service = TimeseriesService()

    def update_active_regions_seeg(self,
                                   target_data,
                                   probabilistic_model,
                                   sensors,
                                   reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        if target_data:
            active_regions = probabilistic_model.active_regions
            gain_matrix = np.array(sensors.gain_matrix)
            seeg_inds = sensors.get_sensors_inds_by_sensors_labels(
                target_data.space_labels)
            if len(seeg_inds) != 0:
                gain_matrix = gain_matrix[seeg_inds]
                for proj in gain_matrix:
                    active_regions += select_greater_values_array_inds(
                        proj).tolist()
                    probabilistic_model.update_active_regions(active_regions)
            else:
                warning(
                    "Skipping active regions setting by seeg power because no data were assigned to sensors!"
                )
        else:
            warning(
                "Skipping active regions setting by seeg power because no target data were provided!"
            )
        return probabilistic_model

    def update_active_regions(self,
                              probabilistic_model,
                              sensors=None,
                              target_data=None,
                              e_values=[],
                              x0_values=[],
                              lsa_propagation_strengths=[],
                              reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        probabilistic_model = \
            super(ODEModelInversionService, self).update_active_regions(probabilistic_model, e_values, x0_values,
                                                                        lsa_propagation_strengths, reset=False)
        probabilistic_model = self.update_active_regions_seeg(
            target_data, probabilistic_model, sensors, reset=False)
        return probabilistic_model

    def select_target_data_seeg(self,
                                target_data,
                                sensors,
                                rois,
                                power=np.array([])):
        if self.auto_selection.find("rois") >= 0:
            if sensors.gain_matrix is not None:
                target_data = self.ts_service.select_by_rois_proximity(
                    target_data, sensors.gain_matrix.T[rois],
                    self.gain_matrix_th)
        if self.auto_selection.find(
                "correlation-power") >= 0 and target_data.number_of_labels > 1:
            if self.group_electrodes:
                disconnectivity = HeadService(
                ).sensors_in_electrodes_disconnectivity(
                    sensors, target_data.space_labels)
            target_data = self.ts_service.select_by_correlation_power(
                target_data,
                disconnectivity=disconnectivity,
                n_groups=self.n_electrodes,
                members_per_group=self.sensors_per_electrode)
        elif self.auto_selection.find("power") >= 0:
            target_data = self.ts_service.select_by_power(
                target_data, power, self.power_th)
        return target_data

    def set_gain_matrix(self, target_data, probabilistic_model, sensors=None):
        if probabilistic_model.observation_model in OBSERVATION_MODELS.SEEG.value:
            signals_inds = sensors.get_sensors_inds_by_sensors_labels(
                target_data.space_labels)
            gain_matrix = np.array(sensors.gain_matrix[signals_inds]
                                   [:, probabilistic_model.active_regions])
        else:
            gain_matrix = np.eye(target_data.number_of_labels)
        return gain_matrix

    def set_target_data_and_time(self,
                                 target_data,
                                 probabilistic_model,
                                 head=None,
                                 sensors=None,
                                 sensor_id=0,
                                 power=np.array([])):
        if sensors is None and head is not None:
            try:
                sensors = sensors.head.get_sensors_id(sensor_ids=sensor_id)
            except:
                if probabilistic_model.observation_model in OBSERVATION_MODELS.SEEG.value:
                    raise_error(
                        "No sensors instance! Needed for gain_matrix computation!"
                    )
                else:
                    pass
        if len(self.manual_selection) > 0:
            target_data = target_data.get_subspace_by_index(
                self.manual_selection)
        if self.auto_selection:
            if probabilistic_model.observation_model in OBSERVATION_MODELS.SEEG.value:
                target_data = self.select_target_data_seeg(
                    target_data, sensors, probabilistic_model.active_regions,
                    power)
            else:
                target_data = target_data.get_subspace_by_index(
                    probabilistic_model.active_regions)
        if self.decim_ratio > 1:
            target_data = self.ts_service.decimate(target_data,
                                                   self.decim_ratio)
        if np.any(np.array(self.cut_target_data_tails)):
            target_data = target_data.get_time_window(
                np.maximum(self.cut_target_data_tails[0],
                           0), target_data.time_length -
                np.maximum(self.cut_target_data_tails[1], 0))
        if self.bipolar:
            target_data = target_data.get_bipolar()
        # TODO: decide about target_data' normalization for the different (sensors', sources' cases)
        if self.normalization:
            target_data = self.ts_service.normalize(target_data,
                                                    self.normalization)
        probabilistic_model.time = target_data.time_line
        probabilistic_model.time_length = len(probabilistic_model.time)
        probabilistic_model.number_of_target_data = target_data.number_of_labels
        return target_data, probabilistic_model, self.set_gain_matrix(
            target_data, probabilistic_model, sensors)
예제 #10
0
def from_model_configuration_to_simulation(model_configuration,
                                           head,
                                           lsa_hypothesis,
                                           sim_type="realistic",
                                           dynamical_model="EpileptorDP2D",
                                           ts_file=None,
                                           plot_flag=True,
                                           config=Config()):
    # Choose model
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # Optional variations:
    if dynamical_model is "EpileptorDP2D":
        spectral_raster_plot = False
        trajectories_plot = True
    else:
        spectral_raster_plot = False  # "lfp"
        trajectories_plot = False

    # ------------------------------Simulation--------------------------------------
    logger.info("\n\nConfiguring simulation...")
    if isequal_string(sim_type, "realistic"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_realistic(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "fitting"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_fitting(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "paper"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_paper(
            model_configuration, head.connectivity)
    else:
        sim, sim_settings, dynamical_model = build_simulator_TVB_default(
            model_configuration, head.connectivity)

    writer = H5Writer()
    writer.write_generic(sim.model, config.out.FOLDER_RES,
                         dynamical_model._ui_name + "_model.h5")

    vois_ts_dict = {}
    if ts_file is not None and os.path.isfile(ts_file):
        logger.info("\n\nLoading previously simulated time series...")
        vois_ts_dict = H5Reader().read_dictionary(ts_file)
    else:
        logger.info("\n\nSimulating...")
        ttavg, tavg_data, status = sim.launch_simulation(
            report_every_n_monitor_steps=100)
        if not status:
            logger.warning("\nSimulation failed!")
        else:
            time = np.array(ttavg, dtype='float32').flatten()
            output_sampling_time = np.mean(np.diff(time))
            tavg_data = tavg_data[:, :, :, 0]
            logger.info("\n\nSimulated signal return shape: %s",
                        tavg_data.shape)
            logger.info("Time: %s - %s", time[0], time[-1])
            logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max())
            # Variables of interest in a dictionary:
            vois_ts_dict = prepare_vois_ts_dict(
                dynamical_model.variables_of_interest, tavg_data)
            vois_ts_dict['time'] = time
            vois_ts_dict['time_units'] = 'msec'
            vois_ts_dict = compute_seeg_and_write_ts_h5_file(
                config.out.FOLDER_RES,
                dynamical_model._ui_name + "_ts.h5",
                sim.model,
                vois_ts_dict,
                output_sampling_time,
                sim_settings.simulated_period,
                hpf_flag=True,
                hpf_low=10.0,
                hpf_high=512.0,
                sensors_list=head.sensorsSEEG,
                save_flag=True)
            if isinstance(ts_file, basestring):
                writer.write_dictionary(
                    vois_ts_dict,
                    os.path.join(os.path.dirname(ts_file),
                                 os.path.basename(ts_file)))
    if plot_flag and len(vois_ts_dict) > 0:
        # Plot results
        Plotter(config).plot_sim_results(
            sim.model,
            lsa_hypothesis.lsa_propagation_indices,
            vois_ts_dict,
            sensorsSEEG=head.sensorsSEEG,
            hpf_flag=False,
            trajectories_plot=trajectories_plot,
            spectral_raster_plot=spectral_raster_plot,
            log_scale=True,
            region_labels=head.connectivity.region_labels)
    return vois_ts_dict
예제 #11
0
class TestPlotter(BaseTest):
    plotter = Plotter(BaseTest.config)

    def test_plot_head(self):
        head = self._prepare_dummy_head()
        # TODO: this filenames may change because they are composed inside the plotting functions
        filename1 = "Connectivity_.png"
        filename2 = "HeadStats.png"
        filename3 = "1_-_SEEG_-_Projection.png"

        assert not os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename1))
        assert not os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename2))
        assert not os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename3))

        self.plotter.plot_head(head)

        assert os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename1))
        assert os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename2))
        assert os.path.exists(
            os.path.join(self.config.out.FOLDER_FIGURES, filename3))

    #TODO: check TypeError: unique() got an unexpected keyword argument 'axis' in prepare_target_stats()
    # def test_plot_stochastic_parameter(self):
    #     K_mean = 10 * 2.5 / 87
    #     K_std = numpy.min([K_mean - 0.0, 3.0 - K_mean]) / 6.0
    #     K = set_parameter("K", optimize_pdf=True, use="manual", K_lo=0.0, K_hi=3.0, K_pdf="lognormal",
    #                       K_pdf_params={"skew": 0.0, "mean": K_mean / K_std}, K_mean=K_mean,
    #                       K_std=K_std)
    #     figure_name = "K_parameter"
    #     figure_file = os.path.join(self.config.out.FOLDER_FIGURES, figure_name + ".png")
    #     assert not os.path.exists(figure_file)
    #
    #     self.plotter.plot_probabilistic_parameter(K, figure_name=figure_name)
    #
    #     assert os.path.exists(figure_file)

    def test_plot_lsa(self):
        figure_name = "LSAPlot"
        hypo_builder = HypothesisBuilder(
            config=self.config).set_name(figure_name)
        lsa_hypothesis = hypo_builder.build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(
            lsa_hypothesis, numpy.array([1]))

        figure_file = os.path.join(self.config.out.FOLDER_FIGURES,
                                   figure_name + ".png")
        assert not os.path.exists(figure_file)

        self.plotter.plot_lsa(lsa_hypothesis,
                              mc,
                              True,
                              None,
                              region_labels=numpy.array(["a"]),
                              title="")

        assert not os.path.exists(figure_file)

    def test_plot_state_space(self):
        lsa_hypothesis = HypothesisBuilder(
            config=self.config).build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(
            lsa_hypothesis, numpy.array([1]))

        model = "6d"
        zmode = "lin"
        # TODO: this figure_name is constructed inside plot method, so it can change
        figure_name = "_" + "Epileptor_" + model + "_z-" + str(zmode)
        file_name = os.path.join(self.config.out.FOLDER_FIGURES,
                                 figure_name + ".png")
        assert not os.path.exists(file_name)

        self.plotter.plot_state_space(mc,
                                      region_labels=numpy.array(["a"]),
                                      special_idx=[0],
                                      model=model,
                                      zmode=zmode,
                                      figure_name="")

        assert os.path.exists(file_name)

    def test_plot_sim_results(self):
        lsa_hypothesis = HypothesisBuilder(
            config=self.config).build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(
            lsa_hypothesis, numpy.array([1]))
        model = build_EpileptorDP2D(mc)

        # TODO: this figure_name is constructed inside plot method, so it can change
        figure_name = "Simulated_TAVG"
        file_name = os.path.join(self.config.out.FOLDER_FIGURES,
                                 figure_name + ".png")
        assert not os.path.exists(file_name)

        data_3D = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]],
                               [[3, 4, 5], [6, 7, 8], [9, 0, 1], [2, 3, 4]],
                               [[5, 6, 7], [8, 9, 0], [1, 2, 3], [4, 5, 6]]])

        self.plotter.plot_simulated_timeseries(
            Timeseries(
                data_3D, {
                    TimeseriesDimensions.SPACE.value: ["r1", "r2", "r3", "r4"],
                    TimeseriesDimensions.VARIABLES.value: ["x1", "x2", "z"]
                }, 0, 1), model, [0])

        assert os.path.exists(file_name)
예제 #12
0
# coding=utf-8

import numpy as np

from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.plot.plotter import Plotter
from tvb_epilepsy.service.probabilistic_parameter_builder import set_parameter

logger = initialize_logger(__name__)

if __name__ == "__main__":
    plotter = Plotter()
    x0 = set_parameter("x0",
                       optimize_pdf=True,
                       use="manual",
                       x0_lo=0.0,
                       x0_hi=2.0,
                       x0_pdf="lognormal",
                       x0_pdf_params={
                           "skew": 0.0,
                           "mean": 0.5 / 0.05
                       },
                       x0_mean=0.5,
                       x0_std=0.05)

    axes, fig = plotter.plot_probabilistic_parameter(
        x0, np.arange(-0.01, 2.0, 0.01))

    # Testing for converting from symmetric matrix to two flattened columns and backwards:
    # a = np.array([[11, 12, 13, 14],
    #               [21, 22, 23, 24],
예제 #13
0
class TestPlotter(BaseTest):
    plotter = Plotter(BaseTest.config)

    def test_plot_head(self):
        head = self._prepare_dummy_head()
        # TODO: this filenames may change because they are composed inside the plotting functions
        filename1 = "Connectivity_.png"
        filename2 = "HeadStats.png"
        filename3 = "1_-_SEEG_-_Projection.png"

        assert not os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename1))
        assert not os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename2))
        assert not os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename3))

        self.plotter.plot_head(head)

        assert os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename1))
        assert os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename2))
        assert os.path.exists(os.path.join(self.config.out.FOLDER_FIGURES, filename3))

    def test_plot_stochastic_parameter(self):
        K_mean = 10 * 2.5 / 87
        K_std = numpy.min([K_mean - 0.0, 3.0 - K_mean]) / 6.0
        K = set_parameter("K", optimize_pdf=True, use="manual", K_lo=0.0, K_hi=3.0, K_pdf="lognormal",
                          K_pdf_params={"skew": 0.0, "mean": K_mean / K_std}, K_mean=K_mean,
                          K_std=K_std)
        figure_name = "K_parameter"
        figure_file = os.path.join(self.config.out.FOLDER_FIGURES, figure_name + ".png")
        assert not os.path.exists(figure_file)

        self.plotter.plot_stochastic_parameter(K, figure_name=figure_name)

        assert os.path.exists(figure_file)

    def test_plot_lsa(self):
        figure_name = "LSAPlot"
        hypo_builder = HypothesisBuilder(config=self.config).set_name(figure_name)
        lsa_hypothesis = hypo_builder.build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(lsa_hypothesis, numpy.array([1]))

        figure_file = os.path.join(self.config.out.FOLDER_FIGURES, figure_name + ".png")
        assert not os.path.exists(figure_file)

        self.plotter.plot_lsa(lsa_hypothesis, mc, True, None, region_labels=numpy.array(["a"]), title="")

        assert not os.path.exists(figure_file)

    def test_plot_state_space(self):
        lsa_hypothesis = HypothesisBuilder(config=self.config).build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(lsa_hypothesis, numpy.array([1]))

        model = "6d"
        zmode = "lin"
        # TODO: this figure_name is constructed inside plot method, so it can change
        figure_name = "_" + "Epileptor_" + model + "_z-" + str(zmode)
        file_name = os.path.join(self.config.out.FOLDER_FIGURES, figure_name + ".png")
        assert not os.path.exists(file_name)

        self.plotter.plot_state_space(mc, region_labels=numpy.array(["a"]), special_idx=[0], model=model, zmode=zmode,
                                      figure_name="")

        assert os.path.exists(file_name)

    def test_plot_sim_results(self):
        lsa_hypothesis = HypothesisBuilder(config=self.config).build_lsa_hypothesis()
        mc = ModelConfigurationBuilder().build_model_from_E_hypothesis(lsa_hypothesis, numpy.array([1]))
        model = build_EpileptorDP2D(mc)
        res = prepare_vois_ts_dict(VOIS["EpileptorDP2D"], numpy.array([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]]))
        res['time'] = numpy.array([1, 2, 3])
        res['time_units'] = 'msec'

        # TODO: this figure_name is constructed inside plot method, so it can change
        figure_name = "EpileptorDP2D_Simulated_TAVG"
        file_name = os.path.join(self.config.out.FOLDER_FIGURES, figure_name + ".png")
        assert not os.path.exists(file_name)

        self.plotter.plot_sim_results(model, [0], res)

        assert os.path.exists(file_name)
예제 #14
0
import numpy as np
from tvb_epilepsy.service.model_inversion.epileptor_params_factory import generate_negative_lognormal_parameter
from tvb_epilepsy.plot.plotter import Plotter
from tvb_epilepsy.tests.base import BaseTest

if __name__ == "__main__":

    x0 = generate_negative_lognormal_parameter("x0",
                                               -2.5 * np.ones(2, ),
                                               -4.0,
                                               1.0,
                                               sigma=None,
                                               sigma_scale=2,
                                               p_shape=(2, ),
                                               use="scipy")

    Plotter(BaseTest.config).plot_probabilistic_parameter(
        x0, figure_name="test_transformed_probabilistic_parameter")
    Plotter(BaseTest.config).plot_probabilistic_parameter(
        x0.star, figure_name="test_transformed_probabilistic_parameter_star")
    print(x0)
    print("Done")
예제 #15
0
import os
from tvb_epilepsy.base.constants.config import Config
from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.io.tvb_data_reader import TVBReader
from tvb_epilepsy.io.h5_reader import H5Reader
from tvb_epilepsy.io.h5_writer import H5Writer
from tvb_epilepsy.plot.plotter import Plotter
# input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "tvb")
# head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "Head")
input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                            'VBtech', 'VEP', "results", "INS", "JUNCH", "tvb")
head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                           'VBtech', 'VEP', "results", "INS", "JUNCH", "Head")
output_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                             'VBtech', 'VEP', "results", "tests")
config = Config(head_folder=input_folder,
                output_base=output_folder,
                data_mode="tvb")  #, data_mode="java"
config.hypothesis.head_folder = head_folder
config.figures.MATPLOTLIB_BACKEND = "inline"
config.figures.SHOW_FLAG = True
logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
writer = H5Writer()
plotter = Plotter(config)

logger.info("Reading from: " + config.input.HEAD)
head = reader.read_head(config.input.HEAD,
                        seeg_sensors_files=[("seeg_xyz.txt", )])
print("OK!")
예제 #16
0
def main_vep(config=Config(),
             ep_name=EP_NAME,
             K_unscaled=K_DEF,
             ep_indices=[],
             hyp_norm=0.99,
             manual_hypos=[],
             sim_type="paper",
             pse_flag=PSE_FLAG,
             sa_pse_flag=SA_PSE_FLAG,
             sim_flag=SIM_FLAG,
             n_samples=1000,
             test_write_read=False):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    plotter = Plotter(config)
    plotter.plot_head(head)
    if test_write_read:
        writer.write_head(head, os.path.join(config.out.FOLDER_RES, "Head"))
    # --------------------------Hypothesis definition-----------------------------------

    hypotheses = []
    # Reading a h5 file:

    if len(ep_name) > 0:
        # For an Excitability Hypothesis you leave e_indices empty
        # For a Mixed Hypothesis: you give as e_indices some indices for values > 0
        # For an Epileptogenicity Hypothesis: you give as e_indices all indices for values > 0
        hyp_file = HypothesisBuilder(head.connectivity.number_of_regions, config=config).set_normalize(hyp_norm). \
            build_hypothesis_from_file(ep_name, e_indices=ep_indices)
        hyp_file.name += ep_name
        # print(hyp_file.string_regions_disease(head.connectivity.region_labels))
        hypotheses.append(hyp_file)

    hypotheses += manual_hypos

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("\n\nRunning hypothesis: " + hyp.name)

        all_regions_indices = np.array(range(head.number_of_regions))
        healthy_indices = np.delete(all_regions_indices,
                                    hyp.regions_disease_indices).tolist()

        logger.info("\n\nCreating model configuration...")
        model_config_builder = ModelConfigurationBuilder(hyp.number_of_regions,
                                                         K=K_unscaled,
                                                         tau1=TAU1_DEF,
                                                         tau0=TAU0_DEF)
        mcs_file = os.path.join(config.out.FOLDER_RES,
                                hyp.name + "_model_config_builder.h5")
        writer.write_model_configuration_builder(model_config_builder,
                                                 mcs_file)
        if test_write_read:
            logger.info(
                "Written and read model configuration services are identical?: "
                + str(
                    assert_equal_objects(
                        model_config_builder,
                        reader.read_model_configuration_builder(mcs_file),
                        logger=logger)))
        # Fix healthy regions to default equilibria:
        # model_configuration = \
        #        model_config_builder.build_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        # Fix healthy regions to default x0s:
        model_configuration = \
                model_config_builder.build_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        mc_path = os.path.join(config.out.FOLDER_RES,
                               hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, mc_path)
        if test_write_read:
            logger.info(
                "Written and read model configuration are identical?: " + str(
                    assert_equal_objects(model_configuration,
                                         reader.read_model_configuration(
                                             mc_path),
                                         logger=logger)))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 "6d",
                                 head.connectivity.region_labels,
                                 special_idx=hyp.regions_disease_indices,
                                 zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=1)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_path = os.path.join(config.out.FOLDER_RES,
                                lsa_hypothesis.name + "_LSA.h5")
        lsa_config_path = os.path.join(config.out.FOLDER_RES,
                                       lsa_hypothesis.name + "_LSAConfig.h5")
        writer.write_hypothesis(lsa_hypothesis, lsa_path)
        writer.write_lsa_service(lsa_service, lsa_config_path)
        if test_write_read:
            logger.info("Written and read LSA services are identical?: " + str(
                assert_equal_objects(lsa_service,
                                     reader.read_lsa_service(lsa_config_path),
                                     logger=logger)))
            logger.info(
                "Written and read LSA hypotheses are identical (no input check)?: "
                + str(
                    assert_equal_objects(lsa_hypothesis,
                                         reader.read_hypothesis(lsa_path),
                                         logger=logger)))
        plotter.plot_lsa(lsa_hypothesis,
                         model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels,
                         None,
                         lsa_service=lsa_service)

        if pse_flag:
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                n_samples,
                lsa_hypothesis,
                head.connectivity.normalized_weights,
                model_config_builder,
                lsa_service,
                head.connectivity.region_labels,
                param_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                logger=logger,
                save_flag=True)[0]
            plotter.plot_lsa(lsa_hypothesis, model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels, pse_results)

            pse_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_PSE_LSA_results.h5")
            writer.write_dictionary(pse_results, pse_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_results,
                                             reader.read_dictionary(
                                                 pse_lsa_path),
                                             logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(n_samples, lsa_hypothesis,
                                                             head.connectivity.normalized_weights,
                                                             model_config_builder, lsa_service,
                                                             head.connectivity.region_labels,
                                                             method="sobol", param_range=0.1,
                                                             global_coupling=[{"indices": all_regions_indices,
                                                                               "bounds": [0.0, 2 *
                                                                                          model_config_builder.K_unscaled[
                                                                                              0]]}],
                                                             healthy_regions_parameters=[
                                                                 {"name": "x0_values", "indices": healthy_indices}],
                                                             config=config)
            plotter.plot_lsa(lsa_hypothesis,
                             model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels,
                             pse_sa_results,
                             title="SA PSE Hypothesis Overview")

            sa_pse_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            sa_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_LSA_results.h5")
            writer.write_dictionary(pse_sa_results, sa_pse_path)
            writer.write_dictionary(sa_results, sa_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis results are identical?: "
                    + str(
                        assert_equal_objects(sa_results,
                                             reader.read_dictionary(
                                                 sa_lsa_path),
                                             logger=logger)))
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_sa_results,
                                             reader.read_dictionary(
                                                 sa_pse_path),
                                             logger=logger)))

        if sim_flag:
            # --------------------------Simulation preparations-----------------------------------
            # If you choose model...
            # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
            # EpileptorDP: similar to the TVB Epileptor + optional variations,
            # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
            # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
            #      -x0, Iext1, Iext2, slope and K become noisy state variables,
            #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
            #      -correlated noise is also used
            # We don't want any time delays for the moment
            head.connectivity.tract_lengths *= config.simulator.USE_TIME_DELAYS_FLAG

            sim_types = ensure_list(sim_type)
            integrator = "HeunStochastic"
            for sim_type in sim_types:
                # ------------------------------Simulation--------------------------------------
                logger.info(
                    "\n\nConfiguring simulation from model_configuration...")
                sim_builder = SimulatorBuilder(config.simulator.MODE)
                if isequal_string(sim_type, "realistic"):
                    model.tau0 = 60000.0
                    model.tau1 = 0.2
                    model.slope = 0.25
                    model.Iext2 = 0.45
                    model.pmode = np.array(
                        "z")  # np.array("None") to opt out for feedback
                    sim_settings = \
                        sim_builder.set_fs(2048.0).set_fs_monitor(1024.0).set_simulated_period(60000).build_sim_settings()
                    sim_settings.noise_type = COLORED_NOISE
                    sim_settings.noise_ntau = 20
                    integrator = "Dop853Stochastic"
                elif isequal_string(sim_type, "fitting"):
                    sim_settings = sim_builder.set_model_name("EpileptorDP2D").set_fs(2048.0).set_fs_monitor(2048.0).\
                                                                    set_simulated_period(2000).build_sim_settings()
                    sim_settings.noise_intensity = 1e-5
                    model = sim_builder.generate_model_tvb(model_configuration)
                    model.tau0 = 300.0
                    model.tau1 = 0.5
                elif isequal_string(sim_type, "reduced"):
                    sim_settings = sim_builder.set_model_name("EpileptorDP2D").set_fs(4096.0). \
                                                                    set_simulated_period(1000).build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)
                elif isequal_string(sim_type, "paper"):
                    sim_builder.set_model_name("Epileptor")
                    sim_settings = sim_builder.build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)
                else:
                    sim_settings = sim_builder.build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)

                sim, sim_settings, model = \
                    sim_builder.build_simulator_TVB_from_model_sim_settings(model_configuration,head.connectivity,
                                                                            model, sim_settings, integrator=integrator)

                # Integrator and initial conditions initialization.
                # By default initial condition is set right on the equilibrium point.
                writer.write_simulator_model(
                    sim.model, sim.connectivity.number_of_regions,
                    os.path.join(config.out.FOLDER_RES,
                                 lsa_hypothesis.name + "_sim_model.h5"))
                logger.info("\n\nSimulating...")
                sim_output, status = sim.launch_simulation(
                    report_every_n_monitor_steps=100)

                sim_path = os.path.join(
                    config.out.FOLDER_RES,
                    lsa_hypothesis.name + "_sim_settings.h5")
                writer.write_simulation_settings(sim.simulation_settings,
                                                 sim_path)
                if test_write_read:
                    # TODO: find out why it cannot set monitor expressions
                    logger.info(
                        "Written and read simulation settings are identical?: "
                        + str(
                            assert_equal_objects(
                                sim.simulation_settings,
                                reader.read_simulation_settings(sim_path),
                                logger=logger)))
                if not status:
                    logger.warning("\nSimulation failed!")
                else:
                    time = np.array(sim_output.time_line).astype("f")
                    logger.info("\n\nSimulated signal return shape: %s",
                                sim_output.shape)
                    logger.info("Time: %s - %s", time[0], time[-1])
                    logger.info("Values: %s - %s", sim_output.data.min(),
                                sim_output.data.max())
                    if not status:
                        logger.warning("\nSimulation failed!")
                    else:
                        sim_output, seeg = compute_seeg_and_write_ts_to_h5(
                            sim_output,
                            sim.model,
                            head.sensorsSEEG,
                            os.path.join(config.out.FOLDER_RES,
                                         model._ui_name + "_ts.h5"),
                            seeg_gain_mode="lin",
                            hpf_flag=True,
                            hpf_low=10.0,
                            hpf_high=512.0)

                    # Plot results
                    plotter.plot_simulated_timeseries(
                        sim_output,
                        sim.model,
                        lsa_hypothesis.lsa_propagation_indices,
                        seeg_list=seeg,
                        spectral_raster_plot=False,
                        title_prefix=hyp.name,
                        spectral_options={"log_scale": True})
예제 #17
0
def prepare_seeg_observable(seeg_path,
                            on_off_set,
                            channels,
                            win_len=5.0,
                            low_freq=10.0,
                            high_freq=None,
                            log_flag=True,
                            plot_flag=False):
    import re
    from pylab import detrend_linear
    from mne.io import read_raw_edf
    raw_data = read_raw_edf(seeg_path, preload=True)
    rois = np.where(
        [np.in1d(s.split("POL ")[-1], channels) for s in raw_data.ch_names])[0]
    raw_data.resample(128.0)
    fs = raw_data.info['sfreq']
    data, times = raw_data[:, :]
    data = data[rois].T
    plotter = Plotter()
    if plot_flag:
        plotter.plot_spectral_analysis_raster(times,
                                              data,
                                              time_units="sec",
                                              freq=np.array(range(1, 51, 1)),
                                              title='Spectral Analysis',
                                              figure_name='Spectral Analysis',
                                              labels=channels,
                                              log_scale=True)
    data_bipolar = []
    bipolar_channels = []
    data_filtered = []
    bipolar_ch_inds = []
    for iS in range(data.shape[1] - 1):
        if (channels[iS][0] == channels[iS+1][0]) and \
                (int(re.findall(r'\d+', channels[iS])[0]) == int(re.findall(r'\d+', channels[iS+1])[0])-1):
            data_bipolar.append(data[:, iS] - data[:, iS + 1])
            bipolar_channels.append(channels[iS] + "-" + channels[iS + 1])
            data_filtered.append(
                filter_data(data_bipolar[-1],
                            fs,
                            low_freq,
                            60.0,
                            "bandpass",
                            order=3))
            bipolar_ch_inds.append(iS)
    data_bipolar = np.array(data_bipolar).T
    data_filtered = np.array(data_filtered).T
    # filter_data, times = raw_data.filter(low_freq, 100.0, picks=rois)[:, :]
    if plot_flag:
        plotter.plot_spectral_analysis_raster(
            times,
            data_bipolar,
            time_units="sec",
            freq=np.array(range(1, 51, 1)),
            title='Spectral Analysis',
            figure_name='Spectral Analysis Bipolar',
            labels=bipolar_channels,
            log_scale=True)
        plotter.plot_spectral_analysis_raster(
            times,
            data_filtered,
            time_units="sec",
            freq=np.array(range(1, 51, 1)),
            title='Spectral Analysis_bipolar',
            figure_name='Spectral Analysis Filtered',
            labels=bipolar_channels,
            log_scale=True)
    del data
    t_onset = np.where(times > (on_off_set[0] - 2 * win_len))[0][0]
    t_offset = np.where(times > (on_off_set[1] + 2 * win_len))[0][0]
    times = times[t_onset:t_offset]
    data_filtered = data_filtered[t_onset:t_offset]
    observation = np.abs(data_filtered)
    del data_filtered
    if log_flag:
        observation = np.log(observation)
    for iS in range(observation.shape[1]):
        observation[:, iS] = detrend_linear(observation[:, iS])
    observation -= observation.min()
    for iS in range(observation.shape[1]):
        observation[:,
                    iS] = np.convolve(observation[:, iS],
                                      np.ones(
                                          (np.int(np.round(win_len * fs), ))),
                                      mode='same')
    n_times = times.shape[0]
    dtimes = n_times - 4096
    t_onset = int(np.ceil(dtimes / 2.0))
    t_offset = n_times - int(np.floor(dtimes / 2.0))
    # t_onset = np.where(times > (on_off_set[0] - win_len))[0][0]
    # t_offset = np.where(times > (on_off_set[1] + win_len))[0][0]
    times = times[t_onset:t_offset]
    observation = observation[t_onset:t_offset]
    observation = zscore(observation, axis=None) / 3.0
    # observation -= observation.min()
    # observation /= observation.max()
    if plot_flag:
        plotter.plot_raster({"observation": observation},
                            times,
                            time_units="sec",
                            special_idx=None,
                            title='Time Series',
                            offset=1.0,
                            figure_name='TimeSeries',
                            labels=bipolar_channels)
    # n_times = times.shape[0]
    # observation = resample_poly(observation, 2048, n_times)
    observation = decimate(observation, 2, axis=0, zero_phase=True)
    times = decimate(times, 2, zero_phase=True)
    if plot_flag:
        plotter.plot_timeseries({"observation": observation},
                                times,
                                time_units="sec",
                                special_idx=None,
                                title='Time Series',
                                figure_name='TimeSeriesDecimated',
                                labels=bipolar_channels)
    return observation, times, fs / 2
예제 #18
0
def main_cc_vep(config,
                head_folder,
                ep_name="clinical_hypothesis",
                x0_indices=[],
                pse_flag=False,
                sim_flag=True):
    if not (os.path.isdir(config.out.FOLDER_RES)):
        os.mkdir(config.out.FOLDER_RES)
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: %s", head_folder)
    head = reader.read_head(head_folder)
    plotter = Plotter(config)
    plotter.plot_head(head)

    # --------------------------Hypothesis definition-----------------------------------
    hypo_builder = HypothesisBuilder(head.connectivity.number_of_regions)
    all_regions_indices = np.array(range(head.number_of_regions))

    # This is an example of Epileptogenicity Hypothesis:
    hyp_E = hypo_builder.build_hypothesis_from_file(ep_name, x0_indices)
    # This is an example of Excitability Hypothesis:
    hyp_x0 = hypo_builder.build_hypothesis_from_file(ep_name)

    disease_indices = hyp_E.e_indices + hyp_x0.x0_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()

    if len(x0_indices) > 0:
        # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
        disease_values = reader.read_epileptogenicity(head_folder,
                                                      name=ep_name)
        disease_values = disease_values.tolist()
        x0_values = []
        for ix0 in x0_indices:
            ind = disease_indices.index(ix0)
            del disease_indices[ind]
            x0_values.append(disease_values.pop(ind))
        e_indices = disease_indices
        e_values = np.array(disease_values)
        x0_values = np.array(x0_values)
        hyp_x0_E = hypo_builder.set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()
        hypotheses = (hyp_E, hyp_x0, hyp_x0_E)

    else:
        hypotheses = (
            hyp_E,
            hyp_x0,
        )

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("Running hypothesis: %s", hyp.name)
        logger.info("Creating model configuration...")
        builder = ModelConfigurationBuilder(hyp.number_of_regions)
        writer.write_model_configuration_builder(
            builder,
            os.path.join(config.out.FOLDER_RES, "model_config_service.h5"))
        if hyp.type == "Epileptogenicity":
            model_configuration = builder.build_model_from_E_hypothesis(
                hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = builder.build_model_from_hypothesis(
                hyp, head.connectivity.normalized_weights)
        writer.write_model_configuration(
            model_configuration,
            os.path.join(config.out.FOLDER_RES, "ModelConfiguration.h5"))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 region_labels=head.connectivity.region_labels,
                                 special_idx=disease_indices,
                                 model="2d",
                                 zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")
        logger.info("Running LSA...")
        lsa_service = LSAService(eigen_vectors_number=None,
                                 weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)
        writer.write_hypothesis(
            lsa_hypothesis,
            os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + ".h5"))
        writer.write_lsa_service(
            lsa_service,
            os.path.join(config.out.FOLDER_RES, "lsa_config_service.h5"))
        plotter.plot_lsa(lsa_hypothesis, model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels, None)
        if pse_flag:
            n_samples = 100
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("Running PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                lsa_hypothesis,
                head.connectivity.normalized_weights,
                head.connectivity.region_labels,
                n_samples,
                param_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                model_configuration_builder=builder,
                lsa_service=lsa_service,
                save_flag=True,
                folder_res=config.out.FOLDER_RES,
                filename="PSE_LSA",
                logger=logger)[0]
            plotter.plot_lsa(lsa_hypothesis,
                             model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels,
                             pse_results,
                             title="Hypothesis PSE LSA Overview")
        if sim_flag:
            config.out.subfolder = "simulations"
            for folder in (config.out.FOLDER_RES, config.out.FOLDER_FIGURES):
                if not (os.path.isdir(folder)):
                    os.mkdir(folder)
            dynamical_models = ["EpileptorDP2D", "EpileptorDPrealistic"]

            for dynamical_model, sim_type in zip(dynamical_models,
                                                 ["fitting", "realistic"]):
                ts_file = None  # os.path.join(sim_folder_res, dynamical_model + "_ts.h5")
                vois_ts_dict = \
                    from_model_configuration_to_simulation(model_configuration, head, lsa_hypothesis,
                                                           sim_type=sim_type, dynamical_model=dynamical_model,
                                                           ts_file=ts_file, plot_flag=True, config=config)
예제 #19
0
def from_model_configuration_to_simulation(model_configuration,
                                           head,
                                           lsa_hypothesis,
                                           sim_type="realistic",
                                           ts_file=None,
                                           seeg_gain_mode="lin",
                                           hpf_flag=False,
                                           hpf_low=10.0,
                                           hpf_high=512.0,
                                           config=Config(),
                                           plotter=False):
    # Choose model
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # Optional variations:

    # ------------------------------Simulation--------------------------------------
    hypname = lsa_hypothesis.name.replace("_LSA", "")
    logger.info("\n\nConfiguring simulation...")
    if isequal_string(sim_type, "realistic"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_realistic(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "fitting"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_fitting(
            model_configuration, head.connectivity)
    elif isequal_string(sim_type, "paper"):
        sim, sim_settings, dynamical_model = build_simulator_TVB_paper(
            model_configuration, head.connectivity)
    else:
        sim, sim_settings, dynamical_model = build_simulator_TVB_default(
            model_configuration, head.connectivity)

    writer = H5Writer()
    writer.write_simulator_model(
        sim.model, sim.connectivity.number_of_regions,
        os.path.join(config.out.FOLDER_RES,
                     hypname + dynamical_model._ui_name + "_model.h5"))
    sim_output = []
    seeg = []
    if ts_file is not None and os.path.isfile(ts_file):
        logger.info(
            "\n\nLoading previously simulated time series from file: " +
            ts_file)
        sim_output = H5Reader().read_timeseries(ts_file)
        seeg = TimeseriesService().compute_seeg(sim_output.get_source(),
                                                head.sensorsSEEG,
                                                sum_mode=seeg_gain_mode)
    else:
        logger.info("\n\nSimulating...")
        sim_output, status = sim.launch_simulation(
            report_every_n_monitor_steps=100)
        if not status:
            logger.warning("\nSimulation failed!")
        else:
            time = np.array(sim_output.time_line).astype("f")
            logger.info("\n\nSimulated signal return shape: %s",
                        sim_output.shape)
            logger.info("Time: %s - %s", time[0], time[-1])
            sim_output, seeg = compute_seeg_and_write_ts_to_h5(
                sim_output,
                sim.model,
                head.sensorsSEEG,
                ts_file,
                seeg_gain_mode=seeg_gain_mode,
                hpf_flag=hpf_flag,
                hpf_low=hpf_low,
                hpf_high=hpf_high)

    if plotter:
        if not isinstance(plotter, Plotter):
            plotter = Plotter(config)
        # Plot results
        plotter.plot_simulated_timeseries(
            sim_output,
            sim.model,
            lsa_hypothesis.lsa_propagation_indices,
            seeg_list=seeg,
            spectral_raster_plot=False,
            title_prefix=hypname,
            spectral_options={"log_scale": True})

    return {"source": sim_output, "seeg": seeg}, sim
예제 #20
0
def main_fit_sim_hyplsa(
        stan_model_name="vep_sde_ins.stan",
        empirical_file="",
        observation_model=OBSERVATION_MODELS.SEEG_LOGPOWER.value,
        sensors_lbls=[],
        sensor_id=0,
        times_on_off=[],
        fitmethod="optimizing",
        pse_flag=True,
        fit_flag=True,
        config=Config(),
        **kwargs):
    def path(name):
        if len(name) > 0:
            return base_path + "_" + name + ".h5"
        else:
            return base_path + ".h5"

    # Prepare necessary services:
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    reader = H5Reader()
    writer = H5Writer()
    plotter = Plotter(config)

    # Read head
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    sensors = head.get_sensors_id(sensor_ids=sensor_id)
    plotter.plot_head(head)

    # Set hypotheses:
    hypotheses = set_hypotheses(head, config)

    # ------------------------------Stan model and service--------------------------------------
    model_code_path = os.path.join(config.generic.PROBLSTC_MODELS_PATH,
                                   stan_model_name + ".stan")
    stan_service = CmdStanService(model_name=stan_model_name,
                                  model_code_path=model_code_path,
                                  fitmethod=fitmethod,
                                  config=config)
    stan_service.set_or_compile_model()

    for hyp in hypotheses[:1]:
        base_path = os.path.join(config.out.FOLDER_RES, hyp.name)
        # Set model configuration and compute LSA
        model_configuration, lsa_hypothesis, pse_results = \
            set_model_config_LSA(head, hyp, reader, config, K_unscaled=3*K_DEF, tau1=TAU1_DEF, tau0=TAU0_DEF,
                                 pse_flag=pse_flag, plotter=plotter, writer=writer)

        # -------------------------- Get model_data and observation signals: -------------------------------------------
        # Create model inversion service (stateless)
        problstc_model_file = path("ProblstcModel")
        model_data_file = path("ModelData")
        target_data_file = path("TargetData")
        if os.path.isfile(problstc_model_file) and os.path.isfile(
                model_data_file) and os.path.isfile(target_data_file):
            # Read existing probabilistic model and model data...
            probabilistic_model = reader.read_probabilistic_model(
                problstc_model_file)
            model_data = stan_service.load_model_data_from_file(
                model_data_path=model_data_file)
            target_data = reader.read_timeseries(target_data_file)
        else:
            model_inversion = SDEModelInversionService()

            # ...or generate a new probabilistic model and model data
            probabilistic_model = \
                SDEProbabilisticModelBuilder(model_name="vep_sde_ins.stan", model_config=model_configuration,
                                             parameters=[XModes.X0MODE.value, "sigma_"+XModes.X0MODE.value,
                                                        "x1_init", "z_init", "tau1",  # "tau0", "K",
                                                        "sigma", "dZt", "epsilon", "scale", "offset"],  # "dX1t",
                                             xmode=XModes.X0MODE.value, priors_mode=PriorsModes.NONINFORMATIVE.value,
                                             sde_mode=SDE_MODES.NONCENTERED.value, observation_model=observation_model).\
                                                                                                       generate_model()

            # Update active model's active region nodes
            e_values = pse_results.get("e_values_mean",
                                       model_configuration.e_values)
            lsa_propagation_strength = pse_results.get(
                "lsa_propagation_strengths_mean",
                lsa_hypothesis.lsa_propagation_strengths)
            model_inversion.active_e_th = 0.2
            probabilistic_model = \
                model_inversion.update_active_regions(probabilistic_model, e_values=e_values,
                                                      lsa_propagation_strengths=lsa_propagation_strength, reset=True)

            # Now some scripts for settting and preprocessing target signals:
            if os.path.isfile(empirical_file):
                probabilistic_model.target_data_type = TARGET_DATA_TYPE.EMPIRICAL.value
                # -------------------------- Get empirical data (preprocess edf if necessary) --------------------------
                signals = set_empirical_data(
                    empirical_file,
                    path("ts_empirical"),
                    head,
                    sensors_lbls,
                    sensor_id,
                    probabilistic_model.time_length,
                    times_on_off,
                    label_strip_fun=lambda s: s.split("POL ")[-1],
                    plotter=plotter,
                    title_prefix=hyp.name,
                    bipolar=False)
            else:
                # -------------------------- Get simulated data (simulate if necessary) -------------------------------
                probabilistic_model.target_data_type = TARGET_DATA_TYPE.SYNTHETIC.value
                signals, simulator = \
                    set_simulated_target_data(path("ts"), model_configuration, head, lsa_hypothesis, probabilistic_model,
                                              sensor_id, sim_type="fitting", times_on_off=times_on_off, config=config,
                                              plotter=plotter, title_prefix=hyp.name, bipolar=False, filter_flag=False,
                                              envelope_flag=False, smooth_flag=False, **kwargs)

            # -------------------------- Select and set target data from signals ---------------------------------------
            if probabilistic_model.observation_model in OBSERVATION_MODELS.SEEG.value:
                model_inversion.auto_selection = "correlation-power"
                model_inversion.sensors_per_electrode = 2
            target_data, probabilistic_model, gain_matrix = \
                model_inversion.set_target_data_and_time(signals, probabilistic_model, head=head, sensors=sensors)

            plotter.plot_probabilistic_model(probabilistic_model,
                                             hyp.name + " Probabilistic Model")
            plotter.plot_raster({'Target Signals': target_data.squeezed},
                                target_data.time_line,
                                time_units=target_data.time_unit,
                                title=hyp.name + ' Target Signals raster',
                                offset=0.1,
                                labels=target_data.space_labels)
            plotter.plot_timeseries({'Target Signals': target_data.squeezed},
                                    target_data.time_line,
                                    time_units=target_data.time_unit,
                                    title=hyp.name + ' Target Signals',
                                    labels=target_data.space_labels)

            writer.\
              write_probabilistic_model(probabilistic_model, model_configuration.number_of_regions, problstc_model_file)
            writer.write_timeseries(target_data, target_data_file)

            # Construct the stan model data dict:
            model_data = build_stan_model_data_dict(
                probabilistic_model,
                target_data.squeezed,
                model_configuration.model_connectivity,
                gain_matrix,
                time=target_data.time_line)
            # # ...or interface with INS stan models
            # model_data = build_stan_model_data_dict_to_interface_ins(probabilistic_model, target_data.squeezed,
            #                                                          model_configuration.model_connectivity, gain_matrix,
            #                                                          time=target_data.time_line)
            writer.write_dictionary(model_data, model_data_file)

        # -------------------------- Fit and get estimates: ------------------------------------------------------------
        n_chains_or_runs = 4
        output_samples = max(int(np.round(1000.0 / n_chains_or_runs)), 500)
        # Sampling (HMC)
        num_samples = output_samples
        num_warmup = 1000
        max_depth = 12
        delta = 0.9
        # ADVI or optimization:
        iter = 1000000
        tol_rel_obj = 1e-6
        if fitmethod.find("sampl") >= 0:
            skip_samples = num_warmup
        else:
            skip_samples = 0
        prob_model_name = probabilistic_model.name.split(".")[0]
        if fit_flag:
            estimates, samples, summary = stan_service.fit(
                debug=0,
                simulate=0,
                model_data=model_data,
                refresh=1,
                n_chains_or_runs=n_chains_or_runs,
                iter=iter,
                tol_rel_obj=tol_rel_obj,
                num_warmup=num_warmup,
                num_samples=num_samples,
                max_depth=max_depth,
                delta=delta,
                save_warmup=1,
                plot_warmup=1,
                **kwargs)
            writer.write_generic(estimates, path(prob_model_name + "_FitEst"))
            writer.write_generic(samples,
                                 path(prob_model_name + "_FitSamples"))
            if summary is not None:
                writer.write_generic(summary,
                                     path(prob_model_name + "_FitSummary"))
        else:
            estimates, samples, summary = stan_service.read_output()
            if fitmethod.find("sampl") >= 0:
                plotter.plot_HMC(samples,
                                 figure_name=hyp.name + "-" + prob_model_name +
                                 " HMC NUTS trace")

        # Model comparison:
        # scale_signal, offset_signal, time_scale, epsilon, sigma -> 5 (+ K = 6)
        # x0[active] -> probabilistic_model.model.number_of_active_regions
        # x1init[active], zinit[active] -> 2 * probabilistic_model.number_of_active_regions
        # dZt[active, t] -> probabilistic_model.number_of_active_regions * (probabilistic_model.time_length-1)
        number_of_total_params =\
            5 + probabilistic_model.number_of_active_regions * (3 + (probabilistic_model.time_length-1))
        info_crit = \
            stan_service.compute_information_criteria(samples, number_of_total_params, skip_samples=skip_samples,
                                                      # parameters=["amplitude_star", "offset_star", "epsilon_star",
                                                      #                  "sigma_star", "time_scale_star", "x0_star",
                                                      #                  "x_init_star", "z_init_star", "z_eta_star"],
                                                      merge_chains_or_runs_flag=False)

        writer.write_generic(info_crit, path(prob_model_name + "_InfoCrit"))

        Rhat = stan_service.get_Rhat(summary)
        # Interface backwards with INS stan models
        # estimates, samples, Rhat, model_data = \
        #     convert_params_names_from_ins([estimates, samples, Rhat, model_data])
        if fitmethod.find("opt") < 0:
            stats = {"Rhat": Rhat}
        else:
            stats = None

        # -------------------------- Plot fitting results: ------------------------------------------------------------
        # if stan_service.fitmethod.find("opt") < 0:
        plotter.plot_fit_results(
            estimates,
            samples,
            model_data,
            target_data,
            probabilistic_model,
            info_crit,
            stats=stats,
            pair_plot_params=["tau1", "sigma", "epsilon", "scale",
                              "offset"],  #  "K",
            region_violin_params=["x0", "x1_init", "z_init"],
            regions_labels=head.connectivity.region_labels,
            skip_samples=skip_samples,
            title_prefix=hyp.name + "-" + prob_model_name)

        # -------------------------- Reconfigure model after fitting:---------------------------------------------------
        for id_est, est in enumerate(ensure_list(estimates)):
            K = est.get("K", model_configuration.K)
            tau1 = est.get("tau1", model_configuration.tau1)
            tau0 = est.get("tau0", model_configuration.tau0)
            fit_model_configuration_builder = \
                ModelConfigurationBuilder(hyp.number_of_regions, K=K * hyp.number_of_regions, tau1=tau1, tau0=tau0)
            x0_values_fit = model_configuration.x0_values
            x0_values_fit[probabilistic_model.active_regions] = \
                fit_model_configuration_builder._compute_x0_values_from_x0_model(est['x0'])
            hyp_fit = HypothesisBuilder().set_nr_of_regions(head.connectivity.number_of_regions).\
                                          set_name('fit' + str(id_est+1) + "_" + hyp.name).\
                                          set_x0_hypothesis(list(probabilistic_model.active_regions),
                                                            x0_values_fit[probabilistic_model.active_regions]).\
                                          build_hypothesis()
            base_path = os.path.join(config.out.FOLDER_RES, hyp_fit.name)
            writer.write_hypothesis(hyp_fit, path(""))

            model_configuration_fit = \
                fit_model_configuration_builder.build_model_from_hypothesis(hyp_fit,  # est["MC"]
                                                                            model_configuration.model_connectivity)

            writer.write_model_configuration(model_configuration_fit,
                                             path("ModelConfig"))

            # Plot nullclines and equilibria of model configuration
            plotter.plot_state_space(
                model_configuration_fit,
                region_labels=head.connectivity.region_labels,
                special_idx=probabilistic_model.active_regions,
                model="6d",
                zmode="lin",
                figure_name=hyp_fit.name + "_Nullclines and equilibria")
        logger.info("Done!")