def prepare_for_plot(self, connectivity_matrix=None):
     plot_dict_list = []
     width_ratios = []
     if len(self.lsa_propagation_indices) > 0:
         if connectivity_matrix is None:
             width_ratios += [1]
             name = "LSA Propagation Strength"
             names = [name]
             data = [self.lsa_propagation_strengths]
             indices = [self.lsa_propagation_indices]
             plot_types = ["vector"]
         else:
             width_ratios += [1, 2]
             name = "LSA Propagation Strength"
             names = [
                 name, "Afferent connectivity \n from seizuring regions"
             ]
             data = [self.lsa_propagation_strengths, connectivity_matrix]
             indices = [
                 self.lsa_propagation_indices, self.lsa_propagation_indices
             ]
             plot_types = ["vector", "regions2regions"]
         plot_dict_list = dicts_of_lists_to_lists_of_dicts({
             "name":
             names,
             "data":
             data,
             "focus_indices":
             indices,
             "plot_type":
             plot_types
         })
     return plot_dict_list
 def prepare_for_plot(self,
                      x0_indices=[],
                      e_indices=[],
                      disease_indices=[]):
     names = [
         "Pathological Excitabilities x0_values",
         "Model Epileptogenicities e_values", "x1 Equilibria",
         "z Equilibria", "Total afferent coupling \n at equilibrium"
     ]
     data = [self.x0_values, self.e_values, self.x1eq, self.zeq, self.Ceq]
     disease_indices = np.unique(
         np.concatenate((x0_indices, e_indices, disease_indices),
                        axis=0)).tolist()
     indices = [
         x0_indices, e_indices, disease_indices, disease_indices,
         disease_indices
     ]
     plot_types = [
         "vector", "vector", "vector", "vector", "regions2regions"
     ]
     return dicts_of_lists_to_lists_of_dicts({
         "name": names,
         "data": data,
         "focus_indices": indices,
         "plot_type": plot_types
     })
def prepare_target_stats(distribution, target_stats, loc=0.0, scale=1.0):
    # Make sure that the shapes of target stats are all matching one to the other:
    target_shape = np.ones(()) * loc * scale
    target_shape = np.ones(target_shape.shape)
    try:
        for ts in target_stats.values():
            target_shape = target_shape * np.ones(np.array(ts).shape)
    except:
        raise_value_error(
            "Target statistics (" +
            str([np.array(ts).shape
                 for ts in target_stats.values()]) + ") and distribution (" +
            str(distribution.p_shape) + ") shapes do not propagate!")
    for ts_key in target_stats.keys():
        target_stats[ts_key] *= target_shape
        if np.sum(target_stats[ts_key].shape) > 0:
            target_stats[ts_key] = target_stats[ts_key].flatten()
    target_size = target_shape.size
    target_shape = target_shape.shape
    target_stats_array = np.around(np.vstack(target_stats.values()).T,
                                   decimals=2)
    target_stats_unique = np.unique(target_stats_array, axis=0)
    # target_stats_unique = np.vstack({tuple(row) for row in target_stats_array})
    target_stats_unique = dict(
        zip(target_stats.keys(), [
            np.around(target_stats_unique[:, ii], decimals=3)
            for ii in range(distribution.n_params)
        ]))
    target_stats_unique = dicts_of_lists_to_lists_of_dicts(target_stats_unique)
    return target_stats_unique, target_stats_array, target_shape, target_size
Exemple #4
0
    def plot_lsa_eigen_vals_vectors(self,
                                    lsa_service,
                                    lsa_hypothesis,
                                    region_labels=[]):
        fig_name = lsa_hypothesis.name + " " + "Eigen-values-vectors"
        n_subplots = lsa_service.eigen_vectors_number + 1
        plot_types = ["vector"] * n_subplots
        names = ["LSA eigenvalues"]
        data = [lsa_service.eigen_values]
        n_regions = lsa_hypothesis.number_of_regions
        if len(lsa_service.eigen_values) == 2 * n_regions:
            region_labels = numpy.array([
                "-".join(lbl)
                for lbl in (list(zip(n_regions * ["x1"], region_labels)) +
                            list(zip(n_regions * ["z"], region_labels)))
            ])
            index_doubling = lambda index: \
                numpy.concatenate([numpy.array(index), numpy.array(index) + lsa_hypothesis.number_of_regions]).tolist()
            eig_vec = lambda v: numpy.log10(numpy.abs(v))
            name_fun = lambda ii: [
                "log10(abs(LSA eigenvectror " + str(ii + 1) + "))"
            ]
        else:
            index_doubling = lambda index: numpy.array(index).tolist()
            eig_vec = lambda v: v
            name_fun = lambda ii: ["LSA eigenvectror " + str(ii + 1)]
        indices = [[]]
        for ii in range(lsa_service.eigen_vectors_number):
            names += name_fun(ii)
            data += [eig_vec(lsa_service.eigen_vectors[:, ii])]
            indices += [index_doubling(lsa_hypothesis.lsa_propagation_indices)]

        plot_dict_list = dicts_of_lists_to_lists_of_dicts({
            "name":
            names,
            "data":
            data,
            "focus_indices":
            indices,
            "plot_type":
            plot_types
        })
        description = "LSA eigenvalues and first " + str(
            lsa_service.eigen_vectors_number) + " eigenvectors"

        return self.plot_in_columns(
            plot_dict_list,
            region_labels,
            width_ratios=[],
            left_ax_focus_indices=index_doubling(
                lsa_hypothesis.lsa_propagation_indices),
            right_ax_focus_indices=index_doubling(
                lsa_hypothesis.lsa_propagation_indices),
            description=description,
            title=fig_name,
            figure_name=fig_name)
Exemple #5
0
def pse_from_lsa_hypothesis(lsa_hypothesis,
                            model_connectivity,
                            region_labels,
                            n_samples,
                            param_range=0.1,
                            global_coupling=[],
                            healthy_regions_parameters=[],
                            model_configuration_builder=None,
                            lsa_service=None,
                            save_flag=False,
                            folder_res=OutputConfig().FOLDER_RES,
                            filename=None,
                            logger=None,
                            **kwargs):
    if logger is None:
        logger = initialize_logger(__name__)
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.get_regions_disease_indices()
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "samples": []}
    sampler = StochasticSamplingService(n_samples=n_samples,
                                        random_seed=kwargs.get(
                                            "random_seed", None))
    # First build from the hypothesis the input parameters of the parameter search exploration.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.x0_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.x0_values[ii] - param_range,  # loc
        #                                         2 * param_range),                            # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.e_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")

        # Now generate samples using a truncated uniform distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.e_values[ii],  # loc
                    param_range / 3.0),  # scale
                probability_distribution="norm",
                high=MAX_DISEASE_VALUE,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(lsa_hypothesis.e_values[ii] - param_range,  # loc
        #                                         2 * param_range),  # scale
        #                              probability_distribution="uniform",
        #                              high=MAX_DISEASE_VALUE, shape=(1,)))
    for ii in range(len(lsa_hypothesis.w_values)):
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 model_connectivity.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
        # Now generate samples using a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    lsa_hypothesis.w_values[ii],  # loc
                    param_range * lsa_hypothesis.w_values[ii]),  # scale
                probability_distribution="norm",
                low=0.0,
                shape=(1, )))
    kloc = model_configuration_builder.K_unscaled[0]
    for val in global_coupling:
        pse_params["path"].append("model_configuration_builder.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)

        # Now generate samples susing a truncated normal distribution
        pse_params["samples"].append(
            sampler.generate_samples(
                parameter=(
                    1.0,  # loc
                    100),  # scale
                probability_distribution="uniform",
                low=1.0,
                shape=(1, )))
        # pse_params["samples"].append(
        #     sampler.generate_samples(parameter=(kloc,  # loc
        #                                         30 * param_range),  # scale
        #                              probability_distribution="norm", low=0.0, shape=(1,)))
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                0.0,  # loc
                param_range / 10),  # scale
            probability_distribution="norm",
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })
    # Now run pse service to generate output samples:
    # pse_old = PSEService("LSA", hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    # pse_results, execution_status = pse_old.run_pse(model_connectivity, grid_mode=False, lsa_service_input=lsa_service,
    #                                             model_configuration_builder_input=model_configuration_builder)
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(model_connectivity, False,
                                                model_configuration_builder,
                                                lsa_service)
    # Call to new PSEService:
    # pse = LSAPSEService(lsa_hypothesis, pse_params_list)
    # pse_results, execution_status = pse.run_pse(model_connectivity, False, lsa_service, model_configuration_builder)
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    for key in pse_results.keys():
        pse_results[key + "_mean"] = np.mean(pse_results[key], axis=0)
        pse_results[key + "_std"] = np.std(pse_results[key], axis=0)
    if save_flag:
        logger.info(pse.__repr__())
        if not (isinstance(filename, basestring)):
            filename = "LSA_PSA"
        writer = H5Writer()
        writer.write_pse_service(
            pse, os.path.join(folder_res, filename + "_pse_service.h5"))
        writer.write_dictionary(pse_results,
                                os.path.join(folder_res, filename + ".h5"))

    return pse_results, pse_params_list
Exemple #6
0
def sensitivity_analysis_pse_from_lsa_hypothesis(n_samples,
                                                 lsa_hypothesis,
                                                 connectivity_matrix,
                                                 model_configuration_builder,
                                                 lsa_service,
                                                 region_labels,
                                                 method="sobol",
                                                 half_range=0.1,
                                                 global_coupling=[],
                                                 healthy_regions_parameters=[],
                                                 save_services=False,
                                                 config=Config(),
                                                 **kwargs):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    method = method.lower()
    if np.in1d(method, METHODS):
        if np.in1d(method, ["delta", "dgsm"]):
            sampler = "latin"
        elif method == "sobol":
            sampler = "saltelli"
        elif method == "fast":
            sampler = "fast_sampler"
        else:
            sampler = method
    else:
        raise_value_error("Method " + str(method) +
                          " is not one of the available methods " +
                          str(METHODS) + " !")
    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.regions_disease_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    pse_params = {"path": [], "indices": [], "name": [], "low": [], "high": []}
    n_inputs = 0
    # First build from the hypothesis the input parameters of the sensitivity analysis.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")
        pse_params["low"].append(lsa_hypothesis.x0_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE,
                 lsa_hypothesis.x0_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.e_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")
        pse_params["low"].append(lsa_hypothesis.e_values[ii] - half_range)
        pse_params["high"].append(
            np.min(
                [MAX_DISEASE_VALUE, lsa_hypothesis.e_values[ii] + half_range]))
    for ii in range(len(lsa_hypothesis.w_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 connectivity_matrix.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
            pse_params["low"].append(
                np.max([lsa_hypothesis.w_values[ii] - half_range, 0.0]))
            pse_params["high"].append(lsa_hypothesis.w_values[ii] + half_range)
    for val in global_coupling:
        n_inputs += 1
        pse_params["path"].append("model.configuration.service.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)
        pse_params["low"].append(val.get("low", 0.0))
        pse_params["high"].append(val.get("high", 2.0))
    # Now generate samples suitable for sensitivity analysis
    sampler = SalibSamplingService(n_samples=n_samples,
                                   sampler=sampler,
                                   random_seed=kwargs.get("random_seed", None))
    input_samples = sampler.generate_samples(low=pse_params["low"],
                                             high=pse_params["high"],
                                             **kwargs)
    n_samples = input_samples.shape[1]
    pse_params.update(
        {"samples": [np.array(value) for value in input_samples.tolist()]})
    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)
    # Add a random jitter to the healthy regions if required...:
    sampler = ProbabilisticSamplingService(n_samples=n_samples,
                                           random_seed=kwargs.get(
                                               "random_seed", None))
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        samples = sampler.generate_samples(
            parameter=(
                kwargs.get("loc", 0.0),  # loc
                kwargs.get("scale", 2 * half_range)),  # scale
            probability_distribution="uniform",
            low=0.0,
            shape=(n_params, ))
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_builder." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })
    # Now run pse service to generate output samples:
    pse = LSAPSEService(hypothesis=lsa_hypothesis, params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(connectivity_matrix, False,
                                                model_configuration_builder,
                                                lsa_service)
    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)
    # Now prepare inputs and outputs and run the sensitivity analysis:
    # NOTE!: Without the jittered healthy regions which we don' want to include into the sensitivity analysis!
    inputs = dicts_of_lists_to_lists_of_dicts(pse_params)
    outputs = [{
        "names": ["LSA Propagation Strength"],
        "values": pse_results["lsa_propagation_strengths"]
    }]
    sensitivity_analysis_service = SensitivityAnalysisService(
        inputs,
        outputs,
        method=method,
        calc_second_order=kwargs.get("calc_second_order", True),
        conf_level=kwargs.get("conf_level", 0.95))
    results = sensitivity_analysis_service.run(**kwargs)
    if save_services:
        logger.info(pse.__repr__())
        writer = H5Writer()
        writer.write_pse_service(
            pse,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_pse_service.h5"))
        logger.info(sensitivity_analysis_service.__repr__())
        writer.write_sensitivity_analysis_service(
            sensitivity_analysis_service,
            os.path.join(config.out.FOLDER_RES,
                         method + "_test_sa_service.h5"))
    return results, pse_results