예제 #1
0
def main_fit_sim_hyplsa(stats_model_name="vep_original",
                        EMPIRICAL='',
                        times_on_off=[],
                        channel_lbls=[],
                        channel_inds=[]):

    # ------------------------------Model code--------------------------------------
    # Compile or load model:
    model_file = os.path.join(FOLDER_VEP_HOME,
                              stats_model_name + "_stan_model.pkl")
    if os.path.isfile(model_file):
        stats_model = pickle.load(open(model_file, 'rb'))
    else:
        # vep_original_DP
        if stats_model_name is "vep_dWt":
            model_path = os.path.join(STATISTICAL_MODELS_PATH, "vep_dWt.stan")
        elif stats_model_name is "vep_original_x0":
            model_path = os.path.join(STATISTICAL_MODELS_PATH,
                                      "vep_original_x0.stan")
        else:
            model_path = os.path.join(STATISTICAL_MODELS_PATH,
                                      "vep_original_DP.stan")
        stats_model = compile_model(model_stan_code_path=model_path)
        with open(model_file, 'wb') as f:
            pickle.dump(stats_model, f)

    # -------------------------------Reading data-----------------------------------

    data_folder = os.path.join(DATA_CUSTOM, 'Head')

    reader = Reader()

    logger.info("Reading from: " + data_folder)
    head = reader.read_head(data_folder,
                            seeg_sensors_files=[("SensorsInternal.h5", "")])

    # head.plot()

    if len(channel_inds) > 1:
        channel_inds, _ = get_bipolar_channels(channel_inds, channel_lbls)

    # --------------------------Hypothesis definition-----------------------------------

    n_samples = 100

    # # Manual definition of hypothesis...:
    # x0_indices = [20]
    # x0_values = [0.9]
    # e_indices = [70]
    # e_values = [0.9]
    # disease_values = x0_values + e_values
    # disease_indices = x0_indices + e_indices

    # ...or reading a custom file:
    ep_name = "ep_l_frontal_complex"
    # FOLDER_RES = os.path.join(data_folder, ep_name)
    from tvb_epilepsy.custom.readers_custom import CustomReader

    if not isinstance(reader, CustomReader):
        reader = CustomReader()
    disease_values = reader.read_epileptogenicity(data_folder, name=ep_name)
    disease_indices, = np.where(disease_values > np.min([X0_DEF, E_DEF]))
    disease_values = disease_values[disease_indices]
    inds = np.argsort(disease_values)
    disease_values = disease_values[inds]
    disease_indices = disease_indices[inds]
    x0_indices = [disease_indices[-1]]
    x0_values = [disease_values[-1]]
    e_indices = disease_indices[0:-1].tolist()
    e_values = disease_values[0:-1].tolist()
    disease_indices = list(disease_indices)

    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    n_disease = len(disease_indices)
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)

    # This is an example of Excitability Hypothesis:
    hyp_x0 = DiseaseHypothesis(
        head.connectivity.number_of_regions,
        excitability_hypothesis={tuple(disease_indices): disease_values},
        epileptogenicity_hypothesis={},
        connectivity_hypothesis={})

    # This is an example of Mixed Hypothesis:
    hyp_x0_E = DiseaseHypothesis(
        head.connectivity.number_of_regions,
        excitability_hypothesis={tuple(x0_indices): x0_values},
        epileptogenicity_hypothesis={tuple(e_indices): e_values},
        connectivity_hypothesis={})

    hyp_E = DiseaseHypothesis(
        head.connectivity.number_of_regions,
        excitability_hypothesis={},
        epileptogenicity_hypothesis={tuple(disease_indices): disease_values},
        connectivity_hypothesis={})

    hypos = (hyp_x0_E, hyp_x0, hyp_E)
    # --------------------------Simulation preparations-----------------------------------
    tau1 = 0.5
    # TODO: maybe use a custom Monitor class
    fs = 10 * 2048.0 * (
        2 * tau1
    )  # this is the simulation sampling rate that is necessary for the simulation to be stable
    time_length = 50.0 / tau1  # msecs, the final output nominal time length of the simulation
    report_every_n_monitor_steps = 100.0
    (dt, fsAVG, sim_length, monitor_period, n_report_blocks) = \
        set_time_scales(fs=fs, time_length=time_length, scale_fsavg=1,
                        report_every_n_monitor_steps=report_every_n_monitor_steps)

    # Choose model
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # Optional variations:
    zmode = "lin"  # by default, or "sig" for the sigmoidal expression for the slow z variable in Proix et al. 2014
    pmode = "z"  # by default, "g" or "z*g" for the feedback coupling to Iext2 and slope for EpileptorDPrealistic

    model_name = "EpileptorDP2D"
    if model_name is "EpileptorDP2D":
        spectral_raster_plot = False
        trajectories_plot = True
    else:
        spectral_raster_plot = False  # "lfp"
        trajectories_plot = False
    # We don't want any time delays for the moment
    # head.connectivity.tract_lengths *= TIME_DELAYS_FLAG

    # --------------------------Hypothesis and LSA-----------------------------------

    for hyp in hypos:  #hypotheses:

        logger.info("\n\nRunning hypothesis: " + hyp.name)

        # hyp.write_to_h5(FOLDER_RES, hyp.name + ".h5")

        logger.info("\n\nCreating model configuration...")
        model_configuration_service = ModelConfigurationService(
            hyp.number_of_regions, K=10.0)
        # model_configuration_service.write_to_h5(FOLDER_RES, hyp.name + "_model_config_service.h5")

        if hyp.type == "Epileptogenicity":
            model_configuration = model_configuration_service. \
                configure_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = model_configuration_service. \
                configure_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        model_configuration.write_to_h5(FOLDER_RES,
                                        hyp.name + "_ModelConfig.h5")

        # Plot nullclines and equilibria of model configuration
        model_configuration_service.plot_nullclines_eq(
            model_configuration,
            head.connectivity.region_labels,
            special_idx=disease_indices,
            model="6d",
            zmode="lin",
            figure_name=hyp.name + "_Nullclines and equilibria")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=None,
                                 weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_hypothesis.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")
        lsa_service.write_to_h5(FOLDER_RES,
                                lsa_hypothesis.name + "_LSAConfig.h5")

        lsa_service.plot_lsa(lsa_hypothesis, model_configuration,
                             head.connectivity.region_labels, None)

        # ------------------------------Simulation--------------------------------------
        logger.info("\n\nConfiguring simulation...")
        noise_intensity = 10**-2.8
        sim = setup_simulation_from_model_configuration(
            model_configuration,
            head.connectivity,
            dt,
            sim_length,
            monitor_period,
            model_name,
            zmode=np.array(zmode),
            pmode=np.array(pmode),
            noise_instance=None,
            noise_intensity=noise_intensity,
            monitor_expressions=None)
        sim.model.tau1 = tau1
        sim.model.tau0 = 30.0

        # Integrator and initial conditions initialization.
        # By default initial condition is set right on the equilibrium point.
        sim.config_simulation(initial_conditions=None)

        # convert_to_h5_model(sim.model).write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_sim_model.h5")

        if os.path.isfile(EMPIRICAL):

            observation, time, fs = prepare_seeg_observable(EMPIRICAL,
                                                            times_on_off,
                                                            channel_lbls,
                                                            log_flag=True)
            vois_ts_dict = {"time": time, "signals": observation}
            #
            # prepare_seeg_observable(os.path.join(SEEG_data, 'SZ2_0001.edf'), [15.0, 40.0], channel_lbls)

            # prepare_seeg_observable(os.path.join(SEEG_data, 'SZ5_0001.edf'), [20.0, 45.0], channel_lbls)
        else:

            ts_file = os.path.join(FOLDER_VEP_HOME,
                                   lsa_hypothesis.name + "_ts.mat")
            if os.path.isfile(ts_file):
                logger.info("\n\nLoading previously simulated time series...")
                vois_ts_dict = loadmat(ts_file)
            else:
                logger.info("\n\nSimulating...")
                ttavg, tavg_data, status = sim.launch_simulation(
                    n_report_blocks)

                # convert_to_h5_model(sim.simulation_settings).write_to_h5(FOLDER_RES,
                #                                                          lsa_hypothesis.name + "_sim_settings.h5")

                if not status:
                    warning("\nSimulation failed!")

                else:

                    time = np.array(ttavg, dtype='float32').flatten()

                    output_sampling_time = np.mean(np.diff(time))
                    tavg_data = tavg_data[:, :, :, 0]

                    logger.info("\n\nSimulated signal return shape: %s",
                                tavg_data.shape)
                    logger.info("Time: %s - %s", time[0], time[-1])
                    logger.info("Values: %s - %s", tavg_data.min(),
                                tavg_data.max())

                    # Variables of interest in a dictionary:
                    vois_ts_dict = prepare_vois_ts_dict(
                        VOIS[model_name], tavg_data)
                    vois_ts_dict['time'] = time
                    vois_ts_dict['time_units'] = 'msec'

                    vois_ts_dict = compute_seeg_and_write_ts_h5_file(
                        FOLDER_RES,
                        lsa_hypothesis.name + "_ts.h5",
                        sim.model,
                        vois_ts_dict,
                        output_sampling_time,
                        time_length,
                        hpf_flag=True,
                        hpf_low=10.0,
                        hpf_high=512.0,
                        sensor_dicts_list=[head.sensorsSEEG])

                    # Plot results
                    plot_sim_results(sim.model,
                                     lsa_hypothesis.propagation_indices,
                                     lsa_hypothesis.name,
                                     head,
                                     vois_ts_dict,
                                     head.sensorsSEEG.keys(),
                                     hpf_flag=False,
                                     trajectories_plot=trajectories_plot,
                                     spectral_raster_plot=spectral_raster_plot,
                                     log_scale=True)

                    # Optionally save results in mat files
                    savemat(
                        os.path.join(FOLDER_RES,
                                     lsa_hypothesis.name + "_ts.mat"),
                        vois_ts_dict)

        # Get data and observation signals:
        data, active_regions = prepare_data_for_fitting_vep(
            stats_model_name,
            model_configuration,
            lsa_hypothesis,
            fsAVG,
            vois_ts_dict,
            sim.model,
            noise_intensity,
            active_regions=None,
            active_regions_th=0.1,
            euler_method=1,
            observation_model=1,
            channel_inds=channel_inds,
            mixing=head.sensorsSEEG.values()[0])
        savemat(
            os.path.join(FOLDER_RES, lsa_hypothesis.name + "_fit_data.mat"),
            data)

        # Fit and get estimates:
        est, fit = stanfit_model(stats_model,
                                 data,
                                 mode="optimizing",
                                 iter=30000)  #
        savemat(os.path.join(FOLDER_RES, lsa_hypothesis.name + "_fit_est.mat"),
                est)

        plot_fit_results(lsa_hypothesis.name,
                         head,
                         est,
                         data,
                         active_regions,
                         time=vois_ts_dict['time'],
                         seizure_indices=[0, 1],
                         trajectories_plot=True)

        # Reconfigure model after fitting:
        fit_model_configuration_service = ModelConfigurationService(
            hyp.number_of_regions, K=est['K'] * hyp.number_of_regions)

        x0_values_fit = fit_model_configuration_service._compute_x0_values_from_x0_model(
            est['x0'])
        disease_indices = active_regions.tolist()
        hyp_fit = DiseaseHypothesis(
            head.connectivity.number_of_regions,
            excitability_hypothesis={tuple(disease_indices): x0_values_fit},
            epileptogenicity_hypothesis={},
            connectivity_hypothesis={},
            name='fit_' + hyp_x0.name)

        connectivity_matrix_fit = np.array(
            model_configuration.connectivity_matrix)
        connectivity_matrix_fit[active_regions][:, active_regions] = est["FC"]
        model_configuration_fit = fit_model_configuration_service.configure_model_from_hypothesis(
            hyp_fit, connectivity_matrix_fit)
        model_configuration_fit.write_to_h5(FOLDER_RES,
                                            hyp_fit.name + "_ModelConfig.h5")

        # Plot nullclines and equilibria of model configuration
        model_configuration_service.plot_nullclines_eq(
            model_configuration_fit,
            head.connectivity.region_labels,
            special_idx=disease_indices,
            model="6d",
            zmode="lin",
            figure_name=hyp_fit.name + "_Nullclines and equilibria")

        print("Done!")
예제 #2
0
def main_vep(test_write_read=False,
             pse_flag=PSE_FLAG,
             sa_pse_flag=SA_PSE_FLAG,
             sim_flag=SIM_FLAG):

    logger = initialize_logger(__name__)

    # -------------------------------Reading data-----------------------------------

    data_folder = os.path.join(DATA_CUSTOM, 'Head')

    reader = Reader()

    logger.info("Reading from: " + data_folder)
    head = reader.read_head(data_folder)

    head.plot()

    # --------------------------Hypothesis definition-----------------------------------

    n_samples = 100

    # # Manual definition of hypothesis...:
    # x0_indices = [20]
    # x0_values = [0.9]
    # e_indices = [70]
    # e_values = [0.9]
    # disease_values = x0_values + e_values
    # disease_indices = x0_indices + e_indices

    # ...or reading a custom file:
    ep_name = "ep_test1"
    #FOLDER_RES = os.path.join(data_folder, ep_name)
    from tvb_epilepsy.custom.readers_custom import CustomReader

    if not isinstance(reader, CustomReader):
        reader = CustomReader()
    disease_values = reader.read_epileptogenicity(data_folder, name=ep_name)
    disease_indices, = np.where(disease_values > np.min([X0_DEF, E_DEF]))
    disease_values = disease_values[disease_indices]
    if disease_values.size > 1:
        inds_split = np.ceil(disease_values.size * 1.0 / 2).astype("int")
        x0_indices = disease_indices[:inds_split].tolist()
        e_indices = disease_indices[inds_split:].tolist()
        x0_values = disease_values[:inds_split].tolist()
        e_values = disease_values[inds_split:].tolist()
    else:
        x0_indices = disease_indices.tolist()
        x0_values = disease_values.tolist()
        e_indices = []
        e_values = []
    disease_indices = list(disease_indices)

    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    n_disease = len(disease_indices)
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)

    # This is an example of Excitability Hypothesis:
    hyp_x0 = DiseaseHypothesis(
        head.connectivity.number_of_regions,
        excitability_hypothesis={tuple(disease_indices): disease_values},
        epileptogenicity_hypothesis={},
        connectivity_hypothesis={})

    # This is an example of Epileptogenicity Hypothesis:
    hyp_E = DiseaseHypothesis(
        head.connectivity.number_of_regions,
        excitability_hypothesis={},
        epileptogenicity_hypothesis={tuple(disease_indices): disease_values},
        connectivity_hypothesis={})

    if len(e_indices) > 0:
        # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
        hyp_x0_E = DiseaseHypothesis(
            head.connectivity.number_of_regions,
            excitability_hypothesis={tuple(x0_indices): x0_values},
            epileptogenicity_hypothesis={tuple(e_indices): e_values},
            connectivity_hypothesis={})
        hypotheses = (hyp_x0, hyp_E, hyp_x0_E)
    else:
        hypotheses = (hyp_x0, hyp_E)

    # --------------------------Simulation preparations-----------------------------------

    # TODO: maybe use a custom Monitor class
    fs = 2048.0  # this is the simulation sampling rate that is necessary for the simulation to be stable
    time_length = 10000.0  # =100 secs, the final output nominal time length of the simulation
    report_every_n_monitor_steps = 100.0
    (dt, fsAVG, sim_length, monitor_period, n_report_blocks) = \
        set_time_scales(fs=fs, time_length=time_length, scale_fsavg=None,
                        report_every_n_monitor_steps=report_every_n_monitor_steps)

    # Choose model
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # Optional variations:
    zmode = "lin"  # by default, or "sig" for the sigmoidal expression for the slow z variable in Proix et al. 2014
    pmode = "z"  # by default, "g" or "z*g" for the feedback coupling to Iext2 and slope for EpileptorDPrealistic

    model_name = "EpileptorDPrealistic"
    if model_name is "EpileptorDP2D":
        spectral_raster_plot = False
        trajectories_plot = True
    else:
        spectral_raster_plot = "lfp"
        trajectories_plot = False
    # We don't want any time delays for the moment
    # head.connectivity.tract_lengths *= TIME_DELAYS_FLAG

    # --------------------------Hypothesis and LSA-----------------------------------

    for hyp in hypotheses:

        logger.info("\n\nRunning hypothesis: " + hyp.name)

        # hyp.write_to_h5(FOLDER_RES, hyp.name + ".h5")

        logger.info("\n\nCreating model configuration...")
        model_configuration_service = ModelConfigurationService(
            hyp.number_of_regions)
        model_configuration_service.write_to_h5(
            FOLDER_RES, hyp.name + "_model_config_service.h5")
        if test_write_read:
            logger.info(
                "Written and read model configuration services are identical?: "
                + str(
                    assert_equal_objects(
                        model_configuration_service,
                        read_h5_model(
                            os.path.join(FOLDER_RES, hyp.name +
                                         "_model_config_service.h5")
                        ).convert_from_h5_model(
                            obj=deepcopy(model_configuration_service)),
                        logger=logger)))

        if hyp.type == "Epileptogenicity":
            model_configuration = model_configuration_service.\
                                            configure_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = model_configuration_service.\
                                              configure_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        model_configuration.write_to_h5(FOLDER_RES,
                                        hyp.name + "_ModelConfig.h5")
        if test_write_read:
            logger.info(
                "Written and read model configuration are identical?: " + str(
                    assert_equal_objects(
                        model_configuration,
                        read_h5_model(
                            os.path.join(
                                FOLDER_RES, hyp.name +
                                "_ModelConfig.h5")).convert_from_h5_model(
                                    obj=deepcopy(model_configuration)),
                        logger=logger)))

        # Plot nullclines and equilibria of model configuration
        model_configuration_service.plot_nullclines_eq(
            model_configuration,
            head.connectivity.region_labels,
            special_idx=disease_indices,
            model="6d",
            zmode="lin",
            figure_name=hyp.name + "_Nullclines and equilibria")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=None,
                                 weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_hypothesis.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")
        lsa_service.write_to_h5(FOLDER_RES,
                                lsa_hypothesis.name + "_LSAConfig.h5")
        if test_write_read:

            hypothesis_template = DiseaseHypothesis(hyp.number_of_regions)

            logger.info("Written and read LSA services are identical?: " + str(
                assert_equal_objects(
                    lsa_service,
                    read_h5_model(
                        os.path.join(FOLDER_RES, lsa_hypothesis.name +
                                     "_LSAConfig.h5")).convert_from_h5_model(
                                         obj=deepcopy(lsa_service)),
                    logger=logger)))
            logger.info(
                "Written and read LSA hypotheses are identical (input object check)?: "
                + str(
                    assert_equal_objects(
                        lsa_hypothesis,
                        read_h5_model(
                            os.path.join(FOLDER_RES, lsa_hypothesis.name +
                                         "_LSA.h5")).convert_from_h5_model(
                                             obj=deepcopy(lsa_hypothesis),
                                             hypothesis=True),
                        logger=logger)))
            logger.info(
                "Written and read LSA hypotheses are identical (input template check)?: "
                + str(
                    assert_equal_objects(
                        lsa_hypothesis,
                        read_h5_model(
                            os.path.join(FOLDER_RES, lsa_hypothesis.name +
                                         "_LSA.h5")).convert_from_h5_model(
                                             obj=hypothesis_template,
                                             hypothesis=True),
                        logger=logger)))

        lsa_service.plot_lsa(lsa_hypothesis, model_configuration,
                             head.connectivity.region_labels, None)

        if pse_flag:
            #--------------Parameter Search Exploration (PSE)-------------------------------

            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                lsa_hypothesis,
                head.connectivity.normalized_weights,
                head.connectivity.region_labels,
                n_samples,
                half_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                model_configuration_service=model_configuration_service,
                lsa_service=lsa_service,
                logger=logger)[0]

            lsa_service.plot_lsa(lsa_hypothesis, model_configuration,
                                 head.connectivity.region_labels, pse_results)
            # , show_flag=True, save_flag=False

            convert_to_h5_model(pse_results).write_to_h5(
                FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5")
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(
                            pse_results,
                            read_h5_model(
                                os.path.join(
                                    FOLDER_RES, lsa_hypothesis.name +
                                    "_PSE_LSA_results.h5")
                            ).convert_from_h5_model(),
                            logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------

            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(lsa_hypothesis,
                                                         head.connectivity.normalized_weights,
                                                         head.connectivity.region_labels,
                                                         n_samples, method="sobol", half_range=0.1,
                                         global_coupling=[{"indices": all_regions_indices,
                                                     "bounds":[0.0, 2 * model_configuration_service.K_unscaled[ 0]]}],
                                         healthy_regions_parameters=[{"name": "x0_values", "indices": healthy_indices}],
                                         model_configuration_service=model_configuration_service,
                                         lsa_service=lsa_service, logger=logger)

            lsa_service.plot_lsa(lsa_hypothesis,
                                 model_configuration,
                                 head.connectivity.region_labels,
                                 pse_sa_results,
                                 title="SA PSE Hypothesis Overview")
            # , show_flag=True, save_flag=False

            convert_to_h5_model(pse_sa_results).write_to_h5(
                FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            convert_to_h5_model(sa_results).write_to_h5(
                FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5")
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis results are identical?: "
                    + str(
                        assert_equal_objects(
                            sa_results,
                            read_h5_model(
                                os.path.join(
                                    FOLDER_RES, lsa_hypothesis.name +
                                    "_SA_LSA_results.h5")
                            ).convert_from_h5_model(),
                            logger=logger)))
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(
                            pse_sa_results,
                            read_h5_model(
                                os.path.join(
                                    FOLDER_RES, lsa_hypothesis.name +
                                    "_SA_PSE_LSA_results.h5")
                            ).convert_from_h5_model(),
                            logger=logger)))

        if sim_flag:
            # ------------------------------Simulation--------------------------------------
            logger.info("\n\nConfiguring simulation...")
            sim = setup_simulation_from_model_configuration(
                model_configuration,
                head.connectivity,
                dt,
                sim_length,
                monitor_period,
                model_name,
                zmode=np.array(zmode),
                pmode=np.array(pmode),
                noise_instance=None,
                noise_intensity=None,
                monitor_expressions=None)

            # Integrator and initial conditions initialization.
            # By default initial condition is set right on the equilibrium point.
            sim.config_simulation(initial_conditions=None)

            convert_to_h5_model(sim.model).write_to_h5(
                FOLDER_RES, lsa_hypothesis.name + "_sim_model.h5")

            logger.info("\n\nSimulating...")
            ttavg, tavg_data, status = sim.launch_simulation(n_report_blocks)

            convert_to_h5_model(sim.simulation_settings).write_to_h5(
                FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5")

            if test_write_read:
                logger.info(
                    "Written and read simulation settings are identical?: " +
                    str(
                        assert_equal_objects(
                            sim.simulation_settings,
                            read_h5_model(
                                os.path.join(
                                    FOLDER_RES, lsa_hypothesis.name +
                                    "_sim_settings.h5")).convert_from_h5_model(
                                        obj=deepcopy(sim.simulation_settings)),
                            logger=logger)))

            if not status:
                warning("\nSimulation failed!")

            else:

                time = np.array(ttavg, dtype='float32')

                output_sampling_time = np.mean(np.diff(time))
                tavg_data = tavg_data[:, :, :, 0]

                logger.info("\n\nSimulated signal return shape: %s",
                            tavg_data.shape)
                logger.info("Time: %s - %s", time[0], time[-1])
                logger.info("Values: %s - %s", tavg_data.min(),
                            tavg_data.max())

                # Variables of interest in a dictionary:
                vois_ts_dict = prepare_vois_ts_dict(VOIS[model_name],
                                                    tavg_data)
                vois_ts_dict['time'] = time
                vois_ts_dict['time_units'] = 'msec'

                vois_ts_dict = compute_seeg_and_write_ts_h5_file(
                    FOLDER_RES,
                    lsa_hypothesis.name + "_ts.h5",
                    sim.model,
                    vois_ts_dict,
                    output_sampling_time,
                    time_length,
                    hpf_flag=True,
                    hpf_low=10.0,
                    hpf_high=512.0,
                    sensor_dicts_list=[head.sensorsSEEG])

                # Plot results
                plot_sim_results(sim.model,
                                 lsa_hypothesis.propagation_indices,
                                 lsa_hypothesis.name,
                                 head,
                                 vois_ts_dict,
                                 head.sensorsSEEG.keys(),
                                 hpf_flag=True,
                                 trajectories_plot=trajectories_plot,
                                 spectral_raster_plot=spectral_raster_plot,
                                 log_scale=True)