Exemplo n.º 1
0
def pse_from_hypothesis(hypothesis,
                        model_connectivity,
                        region_labels,
                        n_samples,
                        param_range=0.1,
                        global_coupling=[],
                        healthy_regions_parameters=[],
                        save_flag=False,
                        folder_res=OutputConfig().FOLDER_RES,
                        filename=None,
                        **kwargs):
    logger = initialize_logger(__name__)
    logger.info("Running hypothesis: " + hypothesis.name)

    # Compute lsa for this hypothesis before the parameter search:
    model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis = \
        start_lsa_run(hypothesis, model_connectivity)
    pse_results, pse_params_list = pse_from_lsa_hypothesis(
        lsa_hypothesis,
        model_connectivity,
        region_labels,
        n_samples,
        param_range,
        global_coupling,
        healthy_regions_parameters,
        model_configuration_builder,
        lsa_service,
        save_flag,
        folder_res=folder_res,
        filename=filename,
        logger=logger,
        **kwargs)
    return model_configuration, lsa_service, lsa_hypothesis, pse_results, pse_params_list
Exemplo n.º 2
0
def main_pse(config=Config()):
    # -------------------------------Reading data-----------------------------------
    reader = Reader()
    writer = H5Writer()
    head = reader.read_head(config.input.HEAD)
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    # --------------------------Manual Hypothesis definition-----------------------------------
    n_samples = 100
    x0_indices = [20]
    x0_values = [0.9]
    e_indices = [70]
    e_values = [0.9]
    disease_indices = x0_indices + e_indices
    n_disease = len(disease_indices)

    n_x0 = len(x0_indices)
    n_e = len(e_indices)
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()
    n_healthy = len(healthy_indices)
    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
    hyp_x0_E = HypothesisBuilder(
        head.connectivity.number_of_regions).set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()

    # Now running the parameter search analysis:
    logger.info("running PSE LSA...")
    model_config, lsa_service, lsa_hypothesis, pse_res = pse_from_hypothesis(
        hyp_x0_E,
        head.connectivity.normalized_weights,
        head.connectivity.region_labels,
        n_samples,
        param_range=0.1,
        global_coupling=[{
            "indices": all_regions_indices
        }],
        healthy_regions_parameters=[{
            "name": "x0_values",
            "indices": healthy_indices
        }],
        save_services=True)[:4]

    logger.info("Plotting LSA...")
    Plotter(config).plot_lsa(lsa_hypothesis,
                             model_config,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             region_labels=head.connectivity.region_labels,
                             pse_results=pse_res,
                             lsa_service=lsa_service)

    logger.info("Saving LSA results ...")
    writer.write_dictionary(
        pse_res,
        os.path.join(config.out.FOLDER_RES,
                     lsa_hypothesis.name + "_PSE_LSA_results.h5"))
Exemplo n.º 3
0
def parse_csv(fname, merge=True):
    if '*' in fname:
        import glob
        return parse_csv(glob.glob(fname), merge=merge)
    if isinstance(fname, (list, tuple)):
        csv = [parse_csv(_) for _ in fname]
        if merge:
            csv = merge_csv_data(*csv)
        return csv

    lines = []
    with open(fname, 'r') as fd:
        for line in fd.readlines():
            if not line.startswith('#'):
                lines.append(line.strip().split(','))
    names = [field.split('.') for field in lines[0]]
    data = []
    for id_line, line in enumerate(lines[1:]):
        append_data = True
        for iline in range(len(line)):
            try:
                line[iline] = float(line[iline])
            except:
                logger = initialize_logger(__name__)
                logger.warn("Failed to convert string " + line[iline] +
                            " to float!" + "\nSkipping line " + str(id_line) +
                            ":  " + str(line) + "!")
                append_data = False
                break
        if append_data:
            data.append(line)
    data = np.array(data)

    namemap = {}
    maxdims = {}
    for i, name in enumerate(names):
        if name[0] not in namemap:
            namemap[name[0]] = []
        namemap[name[0]].append(i)
        if len(name) > 1:
            maxdims[name[0]] = name[1:]

    for name in maxdims.keys():
        dims = []
        for dim in maxdims[name]:
            dims.append(int(dim))
        maxdims[name] = tuple(reversed(dims))

    # data in linear order per Stan, e.g. mat is col maj
    # TODO array is row maj, how to distinguish matrix vs array[,]?
    data_ = {}
    for name, idx in namemap.items():
        new_shape = (-1, ) + maxdims.get(name, ())
        data_[name] = data[:, idx].reshape(new_shape)

    return data_
Exemplo n.º 4
0
def read_edf(path,
             sensors,
             rois_selection=None,
             label_strip_fun=None,
             time_units="ms"):
    logger = initialize_logger(__name__)

    logger.info("Reading empirical dataset from mne file...")
    raw_data = read_raw_edf(path, preload=True)

    if not callable(label_strip_fun):
        label_strip_fun = lambda label: label

    rois = []
    rois_inds = []
    rois_lbls = []
    if len(rois_selection) == 0:
        rois_selection = sensors.labels

    logger.info("Selecting target signals from dataset...")
    for iR, s in enumerate(raw_data.ch_names):
        this_label = label_strip_fun(s)
        this_index = sensors.get_sensors_inds_by_sensors_labels(this_label)
        if this_label in rois_selection or (len(this_index) == 1 and
                                            this_index[0] in rois_selection):
            rois.append(iR)
            rois_inds.append(this_index[0])
            rois_lbls.append(this_label)

    data, times = raw_data[:, :]
    data = data[rois].T
    # Assuming that edf file time units is "sec"
    if ensure_string(time_units).find("ms") == 0:
        times = 1000 * times
    sort_inds = np.argsort(rois_lbls)
    rois = np.array(rois)[sort_inds]
    rois_inds = np.array(rois_inds)[sort_inds]
    rois_lbls = np.array(rois_lbls)[sort_inds]
    data = data[:, sort_inds]

    return data, times, rois, rois_inds, rois_lbls
Exemplo n.º 5
0
def sensitivity_analysis_pse_from_hypothesis(n_samples,
                                             hypothesis,
                                             connectivity_matrix,
                                             region_labels,
                                             method="sobol",
                                             half_range=0.1,
                                             global_coupling=[],
                                             healthy_regions_parameters=[],
                                             save_services=False,
                                             config=Config(),
                                             model_config_kwargs={},
                                             **kwargs):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # Compute lsa for this hypothesis before sensitivity analysis:
    logger.info("Running hypothesis: " + hypothesis.name)
    model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis = \
        start_lsa_run(hypothesis, connectivity_matrix, config, **model_config_kwargs)
    results, pse_results = sensitivity_analysis_pse_from_lsa_hypothesis(
        n_samples, lsa_hypothesis, connectivity_matrix,
        model_configuration_builder, lsa_service, region_labels, method,
        half_range, global_coupling, healthy_regions_parameters, save_services,
        config, **kwargs)
    return model_configuration_builder, model_configuration, lsa_service, lsa_hypothesis, results, pse_results
Exemplo n.º 6
0
 def __init__(self, number_of_regions=0, config=Config()):
     self.config = config
     self.logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
     self.number_of_regions = number_of_regions
     self.diseased_regions_values = numpy.zeros((self.number_of_regions,))
class ModelInversionService(object):

    logger = initialize_logger(__name__)

    active_regions_selection_methods = ["E", "LSA"]
    active_e_th = 0.1
    active_x0_th = 0.1
    active_lsa_th = None

    def __init__(self):
        self.logger.info("Model Inversion Service instance created!")

    def _repr(self, d=OrderedDict()):
        for ikey, (key, val) in enumerate(self.__dict__.iteritems()):
            d.update({key: val})
        return d

    def __repr__(self, d=OrderedDict()):
        return formal_repr(self, self._repr(d))

    def __str__(self):
        return self.__repr__()

    def update_active_regions_e_values(self,
                                       probabilistic_model,
                                       e_values,
                                       reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        if len(e_values) > 0:
            probabilistic_model.update_active_regions(
                probabilistic_model.active_regions +
                select_greater_values_array_inds(e_values,
                                                 self.active_e_th).tolist())
        else:
            warning(
                "Skipping active regions setting by E values because no such values were provided!"
            )
        return probabilistic_model

    def update_active_regions_x0_values(self,
                                        probabilistic_model,
                                        x0_values,
                                        reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        if len(x0_values) > 0:
            probabilistic_model.update_active_regions(
                probabilistic_model.active_regions +
                select_greater_values_array_inds(x0_values,
                                                 self.active_x0_th).tolist())
        else:
            warning(
                "Skipping active regions setting by x0 values because no such values were provided!"
            )
        return probabilistic_model

    def update_active_regions_lsa(self,
                                  probabilistic_model,
                                  lsa_propagation_strengths,
                                  reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        if len(lsa_propagation_strengths) > 0:
            ps_strengths = lsa_propagation_strengths / np.max(
                lsa_propagation_strengths)
            probabilistic_model.update_active_regions(
                probabilistic_model.active_regions +
                select_greater_values_array_inds(ps_strengths,
                                                 self.active_lsa_th).tolist())
        else:
            self.logger.warning(
                "No LSA results found (empty propagations_strengths vector)!" +
                "\nSkipping of setting active regions according to LSA!")
        return probabilistic_model

    def update_active_regions(self,
                              probabilistic_model,
                              e_values=[],
                              x0_values=[],
                              lsa_propagation_strengths=[],
                              reset=False):
        if reset:
            probabilistic_model.update_active_regions([])
        for m in ensure_list(self.active_regions_selection_methods):
            if isequal_string(m, "E"):
                probabilistic_model = self.update_active_regions_e_values(
                    probabilistic_model, e_values, reset=False)
            elif isequal_string(m, "x0"):
                probabilistic_model = self.update_active_regions_x0_values(
                    probabilistic_model, x0_values, reset=False)
            elif isequal_string(m, "LSA"):
                probabilistic_model = self.update_active_regions_lsa(
                    probabilistic_model,
                    lsa_propagation_strengths,
                    reset=False)
        return probabilistic_model
Exemplo n.º 8
0
class SimulatorBuilder(object):
    logger = initialize_logger(__name__)

    simulator = "tvb"
    model_name = "EpileptorDP"

    simulated_period = 2000
    fs = 16384.0
    fs_monitor = 1024.0

    def __init__(self, simulator="tvb"):
        self.simulator = simulator

    def set_model_name(self, model_name):
        # TODO: check that model_name is one of the available ones
        if model_name not in AVAILABLE_DYNAMICAL_MODELS_NAMES:
            raise_value_error(model_name +
                              " is not one of the available models: \n" +
                              str(AVAILABLE_DYNAMICAL_MODELS_NAMES) + " !")
        self.model_name = model_name
        return self

    def set_simulated_period(self, simulated_period):
        self.simulated_period = simulated_period
        return self

    def set_fs(self, fs):
        self.fs = fs
        return self

    def set_fs_monitor(self, fs_monitor):
        self.fs_monitor = fs_monitor
        return self

    def set_time_scales(self):
        scale_fsavg = int(numpy.round(self.fs / self.fs_monitor))
        dt = 1000.0 / self.fs
        monitor_period = scale_fsavg * dt
        return dt, monitor_period

    def generate_model(self, model_configuration):
        if isequal_string(self.model_name,
                          EpileptorModel._ui_name) and not isequal_string(
                              self.simulator, "java"):
            raise_value_error(
                "Custom EpileptorModel can be used only with java simulator!")
        elif not isequal_string(self.model_name,
                                EpileptorModel._ui_name) and isequal_string(
                                    self.simulator, "java"):
            raise_value_error(
                "Only java EpileptorModel can be used with java simulator!")
        return model_build_dict[self.model_name](model_configuration)

    def _check_noise_intesity_size(self, noise_intensity):
        nn = len(ensure_list(noise_intensity))
        if nn != 1 and nn != EPILEPTOR_MODEL_NVARS[self.model_name]:
            raise_value_error(
                "Noise intensity is neither of size 1 nor of size equal to the number of model variables, "
                "\n but of size: " + str(nn) + "!")

    def generate_white_noise(self, noise_intensity):
        self._check_noise_intesity_size(noise_intensity)
        noise_instance = noise.Additive(
            nsig=noise_intensity,
            random_stream=numpy.random.RandomState(seed=NOISE_SEED))
        noise_instance.configure_white(dt=1.0 / self.fs)
        return noise_instance

    def generate_colored_noise(self, noise_intensity, ntau, **kwargs):
        self._check_noise_intesity_size(noise_intensity)
        eq = equations.Linear(parameters=kwargs.get("parameters", {
            "a": 1.0,
            "b": 0.0
        }))
        noise_instance = noise.Multiplicative(
            ntau=ntau,
            nsig=noise_intensity,
            b=eq,
            random_stream=numpy.random.RandomState(seed=NOISE_SEED))
        noise_shape = noise_instance.nsig.shape
        noise_instance.configure_coloured(dt=1.0 / self.fs, shape=noise_shape)
        return noise_instance

    def build_sim_settings(self):
        dt, monitor_period = self.set_time_scales()
        return SimulationSettings(
            simulated_period=self.simulated_period,
            integration_step=dt,
            noise_type=WHITE_NOISE,
            noise_ntau=0.0,
            noise_seed=NOISE_SEED,
            noise_intensity=model_noise_intensity_dict[self.model_name],
            monitor_sampling_period=monitor_period,
            monitor_expressions=VOIS[self.model_name])

    def set_noise(self, sim_settings, **kwargs):
        # Check if the user provides a preconfigured noise instance to override
        noise = kwargs.get("noise", None)
        if isinstance(noise, Noise):
            self._check_noise_intesity_size(noise.nsig)
            sim_settings.noise_intensity = noise.nsig
            if isinstance(noise, Additive):
                sim_settings.noise_type = WHITE_NOISE
            elif isinstance(noise, Multiplicative):
                sim_settings.noise_type = COLORED_NOISE
            sim_settings.noise_ntau = noise.ntau
        else:
            if isequal_string(sim_settings.noise_type, COLORED_NOISE):
                noise = self.generate_colored_noise(
                    sim_settings.noise_intensity, sim_settings.noise_ntau,
                    **kwargs)
            else:
                noise = self.generate_white_noise(sim_settings.noise_intensity)
            sim_settings.noise_ntau = noise.ntau
        return noise, sim_settings

    def generate_temporal_average_monitor(self, monitor_sampling_period):
        monitor = TemporalAverage()
        monitor.period = monitor_sampling_period
        return monitor

    def set_monitor(self, model, sim_settings, monitors=None):
        model.variables_of_interest = [
            me.replace('lfp', 'x2 - x1')
            for me in sim_settings.monitor_expressions
        ]
        # Check if the user provides a preconfigured set of monitor instances to override
        if isinstance(monitors, Monitor):
            monitors = (monitors, )
            sim_settings.monitor_sampling_period = monitors.period
        elif isinstance(monitors, tuple) or isinstance(monitors, list):
            what_to_watch = []
            sim_settings.monitor_sampling_period = []
            for monitor in monitors:
                if isinstance(monitor, Monitor):
                    what_to_watch.append(monitor)
                    sim_settings.monitor_sampling_period.append(monitor.period)
                what_to_watch = tuple(what_to_watch)
            monitors = what_to_watch
        else:
            monitors = (self.generate_temporal_average_monitor(
                sim_settings.monitor_sampling_period), )
        return model, monitors, sim_settings

    def build_simulator_TVB_from_model_sim_settings(self, model_configuration,
                                                    connectivity, model,
                                                    sim_settings, **kwargs):
        model, monitors, sim_settings = self.set_monitor(
            model, sim_settings, kwargs.get("monitors", None))

        noise, sim_settings = self.set_noise(sim_settings, **kwargs)

        simulator_instance = SimulatorTVB(connectivity, model_configuration,
                                          model, sim_settings)
        simulator_instance.config_simulation(noise,
                                             monitors,
                                             initial_conditions=None)

        return simulator_instance, sim_settings, model

    def build_simulator_TVB(self, model_configuration, connectivity, **kwargs):

        model = self.generate_model(model_configuration)

        sim_settings = self.build_sim_settings()

        return self.build_simulator_TVB_from_model_sim_settings(
            model_configuration, connectivity, model, sim_settings, **kwargs)

    def build_simulator_java_from_model_configuration(self,
                                                      model_configuration,
                                                      connectivity, **kwargs):

        self.set_model_name("EpileptorModel")
        model = java_model_builder(model_configuration)

        sim_settings = self.build_sim_settings()
        sim_settings.noise_intensity = kwargs.get("noise_intensity", 1e-6)

        simulator_instance = SimulatorJava(connectivity, model_configuration,
                                           model, sim_settings)

        return simulator_instance, sim_settings, model

    def build_simulator(self, model_configuration, connectivity, **kwargs):
        if isequal_string(self.simulator, "java"):
            return self.build_simulator_java_from_model_configuration(
                model_configuration, connectivity, **kwargs)
        else:
            return self.build_simulator_TVB(model_configuration, connectivity,
                                            **kwargs)
Exemplo n.º 9
0
class ABCPSEService(object):
    __metaclass__ = ABCMeta

    logger = initialize_logger(__name__)

    def __init__(self):
        self.params_vals = []
        self.params_paths = []
        self.params_indices = []
        self.params_names = []
        self.n_params_vals = []
        self.n_params = 0

    def run_pse(self, conn_matrix, grid_mode=False, *kwargs):
        results = []
        execution_status = []
        loop_tenth = 1
        for iloop in range(self.n_loops):
            params = self.params_vals[iloop]
            if iloop == 0 or iloop + 1 >= loop_tenth * self.n_loops / 10.0:
                print "\nExecuting loop " + str(iloop + 1) + " of " + str(
                    self.n_loops)
                if iloop > 0:
                    loop_tenth += 1

            status = False
            output = None
            # try:
            status, output = self.run(params, conn_matrix, *kwargs)
            # except:
            #     pass
            # if not status:
            #     self.logger.warning("\nExecution of loop " + str(iloop) + " failed!")
            results.append(output)
            execution_status.append(status)
        if grid_mode:
            results = np.reshape(np.array(results, dtype="O"),
                                 tuple(self.n_params_vals))
            execution_status = np.reshape(np.array(execution_status),
                                          tuple(self.n_params_vals))
        return results, execution_status

    @abstractmethod
    def run_pse_parallel(self):
        # TODO: start each loop on a separate process, gather results and return them
        pass

    @abstractmethod
    def run(self, *kwargs):
        pass

    @abstractmethod
    def prepare_run_results(self, *kwargs):
        pass

    def prepare_params(self, params_pse):
        if isinstance(params_pse, list):
            temp = []
            for param in params_pse:
                self.params_paths.append(param["path"])
                temp2 = param["samples"].flatten()
                temp.append(temp2)
                self.n_params_vals.append(temp2.size)
                indices = param.get("indices", [])
                self.params_indices.append(indices)
                self.params_names.append(
                    param.get("name",
                              param["path"].rsplit('.', 1)[-1] + str(indices)))
            self.n_params_vals = np.array(self.n_params_vals)
            self.n_params = len(self.params_paths)
            if not (np.all(self.n_params_vals == self.n_params_vals[0])):
                raise_value_error(
                    "\nNot all parameters have the same number of samples!: " +
                    "\n" + str(self.params_paths) + " = " +
                    str(self.n_params_vals))
            else:
                self.n_params_vals = self.n_params_vals[0]
            self.params_vals = np.vstack(temp).T
            self.params_paths = np.array(self.params_paths)
            self.params_indices = np.array(self.params_indices)
            self.n_loops = self.params_vals.shape[0]
            print "\nGenerated a parameter search exploration for " + str(
                "lsa/sim task") + ","
            print "with " + str(self.n_params) + " parameters of " + str(
                self.n_params_vals) + " values each,"
            print "leading to " + str(self.n_loops) + " total execution loops"
        else:
            self.logger.warning("\nparams_pse is not a list of tuples!")

    def update_hypo_model_config(
        self,
        hypothesis,
        params,
        conn_matrix,
        model_config_service_input=None,
        yc=YC_DEF,
        Iext1=I_EXT1_DEF,
        K=K_DEF,
        a=A_DEF,
        b=B_DEF,
        tau1=TAU1_DEF,
        tau0=TAU0_DEF,
        x1eq_mode="optimize",
    ):
        # Copy and update hypothesis
        hypo_copy = deepcopy(hypothesis)
        hypo_copy.update_for_pse(params, self.params_paths,
                                 self.params_indices)
        # Create a ModelConfigService and update it
        if isinstance(model_config_service_input, ModelConfigurationBuilder):
            model_configuration_builder = deepcopy(model_config_service_input)
        else:
            model_configuration_builder = ModelConfigurationBuilder(
                hypo_copy.number_of_regions,
                yc=yc,
                Iext1=Iext1,
                K=K,
                a=a,
                b=b,
                tau1=tau1,
                tau0=tau0,
                x1eq_mode=x1eq_mode)
        model_configuration_builder.set_attributes_from_pse(
            params, self.params_paths, self.params_indices)
        # Obtain Modelconfiguration
        if hypo_copy.type == "Epileptogenicity":
            model_configuration = model_configuration_builder.build_model_from_E_hypothesis(
                hypo_copy, conn_matrix)
        else:
            model_configuration = model_configuration_builder.build_model_from_hypothesis(
                hypo_copy, conn_matrix)
        return hypo_copy, model_configuration

    def set_object_attribute_recursively(self, object, values, path, indices):
        # If there is more than one levels...
        if len(path) > 1:
            # ...call the function recursively
            self.set_object_attribute_recursively(getattr(object, path[0]),
                                                  values, path[1:], indices)
        else:
            # ...else, set the parameter values for the specified indices
            temp = getattr(object, path[0])
            if len(indices) > 0:
                temp[indices] = values  # index has to be linear... i.e., 1D...
            else:
                temp = values
            setattr(object, path[0], temp)

    def update_object(self, object, params, object_type=None):
        if not (isinstance(object_type, basestring)):
            object_type = object.__class__.__name__
        for i, path in enumerate(self.params_paths):
            path = path.split(".")
            if path[0] == object_type:
                self.set_object_attribute_recursively(object, params[i],
                                                      path[1:],
                                                      self.params_indices[i])
Exemplo n.º 10
0
class SimulatorTVB(ABCSimulator):
    """
    This class is used as a Wrapper over the TVB Simulator.
    It keeps attributes needed in order to create and configure a TVB Simulator object.
    """
    logger = initialize_logger(__name__)

    def __init__(self, connectivity, model_configuration, model,
                 simulation_settings):
        self.model = model
        self.simulation_settings = simulation_settings
        self.model_configuration = model_configuration
        self.connectivity = connectivity

    @staticmethod
    def _vep2tvb_connectivity(vep_conn, model_connectivity=None):
        if model_connectivity is None:
            model_connectivity = vep_conn.normalized_weights
        return connectivity.Connectivity(use_storage=False,
                                         weights=model_connectivity,
                                         tract_lengths=TIME_DELAYS_FLAG *
                                         vep_conn.tract_lengths,
                                         region_labels=vep_conn.region_labels,
                                         centres=vep_conn.centres,
                                         hemispheres=vep_conn.hemispheres,
                                         orientations=vep_conn.orientations,
                                         areas=vep_conn.areas)

    def get_vois(self):
        # TODO: change 'lfp' for 'source'
        return [
            me.replace('x2 - x1', 'source')
            for me in self.simulation_settings.monitor_expressions
        ]

    def config_simulation(self,
                          noise,
                          monitors,
                          initial_conditions=None,
                          **kwargs):

        if isinstance(self.model_configuration.model_connectivity,
                      numpy.ndarray):
            tvb_connectivity = self._vep2tvb_connectivity(
                self.connectivity, self.model_configuration.model_connectivity)
        else:
            tvb_connectivity = self._vep2tvb_connectivity(self.connectivity)

        tvb_coupling = coupling.Difference(a=1.)

        integrator = getattr(integrators,
                             kwargs.get("integrator", "HeunStochastic"))(
                                 dt=self.simulation_settings.integration_step,
                                 noise=noise)

        self.simTVB = simulator.Simulator(
            model=self.model,
            connectivity=tvb_connectivity,
            coupling=tvb_coupling,
            integrator=integrator,
            monitors=monitors,
            simulation_length=self.simulation_settings.simulated_period)
        self.simTVB.configure()

        self.configure_initial_conditions(
            initial_conditions=initial_conditions)

    def launch_simulation(self, report_every_n_monitor_steps=None):
        if report_every_n_monitor_steps >= 1:
            time_length_avg = numpy.round(
                self.simulation_settings.simulated_period /
                self.simTVB.monitors[0].period)
            n_report_blocks = max(
                report_every_n_monitor_steps *
                numpy.round(time_length_avg / 100), 1.0)
        else:
            n_report_blocks = 1

        self.simTVB._configure_history(
            initial_conditions=self.simTVB.initial_conditions)

        status = True

        if n_report_blocks < 2:
            try:
                tavg_time, tavg_data = self.simTVB.run()[0]

            except Exception, error_message:
                status = False
                self.logger.warning(
                    "Something went wrong with this simulation...:" + "\n" +
                    error_message)
                return None, None, status

            return tavg_time, tavg_data, status

        else:
Exemplo n.º 11
0
 def __init__(self, config=None):
     self.config = config or Config()
     self.logger = initialize_logger(self.__class__.__name__, self.config.out.FOLDER_LOGS)
Exemplo n.º 12
0
class H5Writer(object):
    logger = initialize_logger(__name__)

    H5_TYPE_ATTRIBUTE = "EPI_Type"
    H5_SUBTYPE_ATTRIBUTE = "EPI_Subtype"

    # TODO: write variants.
    def write_connectivity(self, connectivity, path):
        """
        :param connectivity: Connectivity object to be written in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        h5_file.create_dataset(ConnectivityH5Field.WEIGHTS,
                               data=connectivity.weights)
        h5_file.create_dataset(ConnectivityH5Field.TRACTS,
                               data=connectivity.tract_lengths)
        h5_file.create_dataset(ConnectivityH5Field.CENTERS,
                               data=connectivity.centres)
        h5_file.create_dataset(ConnectivityH5Field.REGION_LABELS,
                               data=connectivity.region_labels)
        h5_file.create_dataset(ConnectivityH5Field.ORIENTATIONS,
                               data=connectivity.orientations)
        h5_file.create_dataset(ConnectivityH5Field.HEMISPHERES,
                               data=connectivity.hemispheres)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "Connectivity")
        h5_file.attrs.create("Number_of_regions",
                             str(connectivity.number_of_regions))

        if connectivity.normalized_weights.size > 0:
            dataset = h5_file.create_dataset(
                "normalized_weights/" + ConnectivityH5Field.WEIGHTS,
                data=connectivity.normalized_weights)
            dataset.attrs.create(
                "Operations",
                "Removing diagonal, normalizing with 95th percentile, and ceiling to it"
            )

        self.logger.info("Connectivity has been written to file: %s" % path)
        h5_file.close()

    def write_sensors(self, sensors, path):
        """
        :param sensors: Sensors object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        h5_file.create_dataset(SensorsH5Field.LABELS, data=sensors.labels)
        h5_file.create_dataset(SensorsH5Field.LOCATIONS,
                               data=sensors.locations)
        h5_file.create_dataset(SensorsH5Field.NEEDLES, data=sensors.needles)

        gain_dataset = h5_file.create_dataset(SensorsH5Field.GAIN_MATRIX,
                                              data=sensors.gain_matrix)
        gain_dataset.attrs.create("Max", str(sensors.gain_matrix.max()))
        gain_dataset.attrs.create("Min", str(sensors.gain_matrix.min()))

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "Sensors")
        h5_file.attrs.create("Number_of_sensors",
                             str(sensors.number_of_sensors))
        h5_file.attrs.create("Sensors_subtype", sensors.s_type)

        self.logger.info("Sensors have been written to file: %s" % path)
        h5_file.close()

    def write_surface(self, surface, path):
        """
        :param surface: Surface object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        h5_file.create_dataset(SurfaceH5Field.VERTICES, data=surface.vertices)
        h5_file.create_dataset(SurfaceH5Field.TRIANGLES,
                               data=surface.triangles)
        h5_file.create_dataset(SurfaceH5Field.VERTEX_NORMALS,
                               data=surface.vertex_normals)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "Surface")
        h5_file.attrs.create("Surface_subtype", surface.surface_subtype)
        h5_file.attrs.create("Number_of_triangles", surface.triangles.shape[0])
        h5_file.attrs.create("Number_of_vertices", surface.vertices.shape[0])
        h5_file.attrs.create(
            "Voxel_to_ras_matrix",
            str(surface.vox2ras.flatten().tolist())[1:-1].replace(",", ""))

        self.logger.info("Surface has been written to file: %s" % path)
        h5_file.close()

    def write_head(self, head, path):
        """
        :param head: Head object to be written
        :param path: path to head folder
        """
        self.logger.info("Starting to write Head folder: %s" % head)

        if not (os.path.isdir(path)):
            os.mkdir(path)
        self.write_connectivity(head.connectivity,
                                os.path.join(path, "Connectivity.h5"))
        self.write_surface(head.cortical_surface,
                           os.path.join(path, "CorticalSurface.h5"))
        for sensor_list in (head.sensorsSEEG, head.sensorsEEG,
                            head.sensorsMEG):
            for sensors in sensor_list:
                self.write_sensors(
                    sensors,
                    os.path.join(
                        path, "Sensors%s_%s.h5" %
                        (sensors.s_type, sensors.number_of_sensors)))

        self.logger.info("Successfully wrote Head folder at: %s" % path)

    def write_hypothesis(self, hypothesis, path):
        """
        :param hypothesis: DiseaseHypothesis object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        h5_file.create_dataset("x0_values", data=hypothesis.x0_values)
        h5_file.create_dataset("e_values", data=hypothesis.e_values)
        h5_file.create_dataset("w_values", data=hypothesis.w_values)
        h5_file.create_dataset("lsa_propagation_strengths",
                               data=hypothesis.lsa_propagation_strengths)

        # TODO: change HypothesisModel to GenericModel here and inside Epi
        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             hypothesis.__class__.__name__)
        h5_file.attrs.create("number_of_regions", hypothesis.number_of_regions)
        h5_file.attrs.create("type", hypothesis.type)
        h5_file.attrs.create("x0_indices", hypothesis.x0_indices)
        h5_file.attrs.create("e_indices", hypothesis.e_indices)
        h5_file.attrs.create("w_indices", hypothesis.w_indices)
        h5_file.attrs.create("lsa_propagation_indices",
                             hypothesis.lsa_propagation_indices)

        h5_file.close()

    def write_model_configuration(self, model_configuration, path):
        """
        :param model_configuration: ModelConfiguration object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        datasets_dict, metadata_dict = self._determine_datasets_and_attributes(
            model_configuration)

        for key, value in datasets_dict.iteritems():
            h5_file.create_dataset(key, data=value)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             model_configuration.__class__.__name__)

        for key, value in metadata_dict.iteritems():
            h5_file.attrs.create(key, value)

        h5_file.close()

    def write_model_configuration_builder(self, model_configuration_builder,
                                          path):
        """
        :param model_configuration_builder: ModelConfigurationService object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        datasets_dict, metadata_dict = self._determine_datasets_and_attributes(
            model_configuration_builder)

        for key, value in datasets_dict.iteritems():
            h5_file.create_dataset(key, data=value)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             model_configuration_builder.__class__.__name__)

        for key, value in metadata_dict.iteritems():
            h5_file.attrs.create(key, value)

        h5_file.close()

    def write_lsa_service(self, lsa_service, path):
        """
        :param lsa_service: LSAService object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        datasets_dict, metadata_dict = self._determine_datasets_and_attributes(
            lsa_service)

        for key, value in datasets_dict.iteritems():
            h5_file.create_dataset(key, data=value)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             lsa_service.__class__.__name__)

        for key, value in metadata_dict.iteritems():
            h5_file.attrs.create(key, value)

        h5_file.close()

    def write_model_inversion_service(self, model_inversion_service, path):
        """
        :param model_inversion_service: ModelInversionService object to write in H5
        :param path: H5 path to be written
        """
        if getattr(model_inversion_service, "signals_inds", None) is not None:
            model_inversion_service.signals_inds = numpy.array(
                model_inversion_service.signals_inds)

        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        datasets_dict, metadata_dict = self._determine_datasets_and_attributes(
            model_inversion_service)

        for key, value in datasets_dict.iteritems():
            h5_file.create_dataset(key, data=value)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             model_inversion_service.__class__.__name__)

        for key, value in metadata_dict.iteritems():
            h5_file.attrs.create(key, value)

        h5_file.close()

    def write_pse_service(self, pse_service, path):
        """
        :param pse_service: PSEService object to write in H5
        :param path: H5 path to be written
        """
        if "params_vals" not in dir(pse_service):
            params_samples = pse_service.pse_params.T
        else:
            params_samples = pse_service.params_vals

        pse_dict = {
            "task":
            pse_service.task,
            "params_names":
            pse_service.params_names,
            "params_paths":
            pse_service.params_paths,
            "params_indices":
            numpy.array([str(inds) for inds in pse_service.params_indices],
                        dtype="S"),
            "params_samples":
            params_samples
        }

        self.write_dictionary(pse_dict, path)

    def write_sensitivity_analysis_service(self, sensitivity_service, path):
        """
        :param sensitivity_service: SensitivityAnalysisService object to write in H5
        :param path: H5 path to be written
        """
        sensitivity_service_dict = {
            "method": sensitivity_service.method,
            "calc_second_order": sensitivity_service.calc_second_order,
            "conf_level": sensitivity_service.conf_level,
            "n_inputs": sensitivity_service.n_inputs,
            "n_outputs": sensitivity_service.n_outputs,
            "input_names": sensitivity_service.input_names,
            "output_names": sensitivity_service.output_names,
            "input_bounds": sensitivity_service.input_bounds,
        }

        self.write_dictionary(sensitivity_service_dict, path)

    def write_dictionary(self, dictionary, path):
        """
        :param dictionary: dictionary to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        for key, value in dictionary.iteritems():
            try:
                if isinstance(value, numpy.ndarray) and value.size > 0:
                    h5_file.create_dataset(key, data=value)
                else:
                    if isinstance(value, list) and len(value) > 0:
                        h5_file.create_dataset(key, data=value)
                    else:
                        h5_file.attrs.create(key, value)
            except:
                self.logger.warning("Did not manage to write " + key +
                                    " to h5 file " + path + " !")

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             dictionary.__class__.__name__)

        h5_file.close()

    # TODO: can this be visualized? should we keep groups?
    def write_simulation_settings(self, simulation_settings, path):
        """
        :param simulation_settings: SimulationSettings object to write in H5
        :param path: H5 path to be written
        """
        h5_file = h5py.File(change_filename_or_overwrite(path),
                            'a',
                            libver='latest')

        datasets_dict, metadata_dict = self._determine_datasets_and_attributes(
            simulation_settings)

        for key, value in datasets_dict.iteritems():
            h5_file.create_dataset(key, data=value)

        h5_file.attrs.create(self.H5_TYPE_ATTRIBUTE, "HypothesisModel")
        h5_file.attrs.create(self.H5_SUBTYPE_ATTRIBUTE,
                             simulation_settings.__class__.__name__)

        for key, value in metadata_dict.iteritems():
            h5_file.attrs.create(key, value)

        h5_file.close()

    def write_ts_seeg_epi(self, seeg_data, sampling_period, path):
        if not os.path.exists(path):
            raise_error(
                "TS file %s does not exist. First define the raw data!" + path,
                self.logger)
            return
        sensors_name = "SeegSensors-" + str(seeg_data.shape[1])

        self.logger.info("Writing a TS at:\n" + path + ", " + sensors_name)
        try:
            h5_file = h5py.File(path, 'a', libver='latest')
            h5_file.create_dataset("/" + sensors_name, data=seeg_data)
            write_metadata(
                {
                    KEY_MAX: seeg_data.max(),
                    KEY_MIN: seeg_data.min(),
                    KEY_STEPS: seeg_data.shape[0],
                    KEY_CHANNELS: seeg_data.shape[1],
                    KEY_SV: 1,
                    KEY_SAMPLING: sampling_period,
                    KEY_START: 0.0
                }, h5_file, KEY_DATE, KEY_VERSION, "/" + sensors_name)
            h5_file.close()
        except Exception, e:
            raise_error(
                e + "\nSeeg dataset already written as " + sensors_name,
                self.logger)
Exemplo n.º 13
0
class HeadService(object):
    logger = initialize_logger(__name__)

    def compute_nearest_regions_to_sensors(self, head, sensors=None, target_contacts=None, s_type=Sensors.TYPE_SEEG,
                                           sensors_id=0, n_regions=None, gain_matrix_th=None):
        if not (isinstance(sensors, Sensors)):
            sensors = head.get_sensors_id(s_type=s_type, sensor_ids=sensors_id)
        n_contacts = sensors.labels.shape[0]
        if isinstance(target_contacts, (list, tuple, np.ndarray)):
            target_contacts = ensure_list(target_contacts)
            for itc, tc in enumerate(target_contacts):
                if isinstance(tc, int):
                    continue
                elif isinstance(tc, basestring):
                    target_contacts[itc] = sensors.contact_label_to_index([tc])
                else:
                    raise_value_error("target_contacts[" + str(itc) + "] = " + str(tc) +
                                      "is neither an integer nor a string!")
        else:
            target_contacts = range(n_contacts)
        auto_flag = False
        if n_regions is "all":
            n_regions = head.connectivity.number_of_regions
        elif not (isinstance(n_regions, int)):
            auto_flag = True
        nearest_regions = []
        for tc in target_contacts:
            projs = sensors.gain_matrix[tc]
            inds = np.argsort(projs)[::-1]
            if auto_flag:
                n_regions = select_greater_values_array_inds(projs[inds], threshold=gain_matrix_th)
            inds = inds[:n_regions]
            nearest_regions.append((inds, head.connectivity.region_labels[inds], projs[inds]))
        return nearest_regions

    def select_sensors_power(self, sensors, power, selection=[], power_th=0.5):
        if len(selection) == 0:
            selection = range(sensors.number_of_sensors)
        return (np.array(selection)[select_greater_values_array_inds(power, power_th)]).tolist()

    def select_sensors_rois(self, sensors, rois=None, initial_selection=[], gain_matrix_th=0.5):
        if len(initial_selection) == 0:
            initial_selection = range(sensors.number_of_sensors)
        selection = []
        if sensors.gain_matrix is None:
            raise_value_error("Projection matrix is not set!")
        else:
            for proj in sensors.gain_matrix[initial_selection].T[rois]:
                selection += (
                    np.array(initial_selection)[select_greater_values_array_inds(proj, gain_matrix_th)]).tolist()
        return np.unique(selection).tolist()

    def sensors_in_electrodes_disconnectivity(self, sensors, sensors_labels=[]):
        if len(sensors_labels) < 2:
            sensors_labels = sensors.labels
        n_sensors = len(sensors_labels)
        elec_labels, elec_inds = sensors.group_sensors_to_electrodes(sensors_labels)
        if len(elec_labels) >= 2:
            disconnectivity = np.ones((n_sensors, n_sensors))
            for ch in elec_inds:
                disconnectivity[np.meshgrid(ch, ch)] = 0.0
        return disconnectivity

    def select_sensors_corr(self, sensors, distance, initial_selection=[], n_electrodes=10, sensors_per_electrode=1,
                            power=None, group_electrodes=False):
        if len(initial_selection) == 0:
            initial_selection = range(sensors.number_of_sensors)
        n_sensors = len(initial_selection)
        if n_sensors > 2:
            initial_selection = np.array(initial_selection)
            distance = 1.0 - distance
            if group_electrodes:
                disconnectivity = self.sensors_in_electrodes_disconnectivity(sensors, sensors.labels[initial_selection])
            selection = \
                select_by_hierarchical_group_metric_clustering(distance, disconnectivity, metric=power,
                                                               n_groups=n_electrodes,
                                                               members_per_group=sensors_per_electrode)
            return np.unique(np.hstack(initial_selection[selection])).tolist()
        else:
            self.logger.warning("Number of sensors' left < 2!\n" + "Skipping clustering and returning all of them!")
            return initial_selection
Exemplo n.º 14
0
class StanService(object):
    __metaclass__ = ABCMeta

    logger = initialize_logger(__name__)

    def __init__(self, model_name="", model=None, model_code=None, model_code_path="",
                 model_data_path="", fitmethod="sampling", config=None):
        self.fitmethod = fitmethod
        self.model_name = model_name
        self.model = model
        self.config = config or Config()
        model_dir = config.out.FOLDER_RES
        if not (os.path.isdir(model_dir)):
            os.mkdir(model_dir)
        self.model_path = os.path.join(model_dir, self.model_name)
        self.model_code = model_code
        if os.path.isfile(model_code_path):
            self.model_code_path = model_code_path
        else:
            self.model_code_path = self.model_path + ".stan"
        if model_data_path == "":
            self.model_data_path = os.path.join(model_dir, "ModelData.h5")
        self.compilation_time = 0.0

    @abstractmethod
    def compile_stan_model(self, save_model=True, **kwargs):
        pass

    @abstractmethod
    def set_model_from_file(self, **kwargs):
        pass

    @abstractmethod
    def fit(self, model_data, **kwargs):
        pass

    def write_model_data_to_file(self, model_data, reset_path=False, **kwargs):
        model_data_path = kwargs.get("model_data_path", self.model_data_path)
        if reset_path:
            self.model_data_path = model_data_path
        extension = model_data_path.split(".", -1)[-1]
        if isequal_string(extension, "npy"):
            np.save(model_data_path, model_data)
        elif isequal_string(extension, "mat"):
            savemat(model_data_path, model_data)
        elif isequal_string(extension, "pkl"):
            with open(model_data_path, 'wb') as f:
                pickle.dump(model_data, f)
        elif isequal_string(extension, "R"):
            rdump(model_data_path, model_data)
        else:
            H5Writer().write_dictionary(model_data, os.path.join(os.path.dirname(model_data_path),
                                                                 os.path.basename(model_data_path)))

    def load_model_data_from_file(self, reset_path=False, **kwargs):
        model_data_path = kwargs.get("model_data_path", self.model_data_path)
        if reset_path:
            self.model_data_path = model_data_path
        extension = model_data_path.split(".", -1)[-1]
        if isequal_string(extension, "R"):
            model_data = rload(model_data_path)
        elif isequal_string(extension, "npy"):
            model_data = np.load(model_data_path).item()
        elif isequal_string(extension, "mat"):
            model_data = loadmat(model_data_path)
        elif isequal_string(extension, "pkl"):
            with open(model_data_path, 'wb') as f:
                model_data = pickle.load(f)
        elif isequal_string(extension, "h5"):
            model_data = H5Reader().read_dictionary(model_data_path)
        else:
            raise_not_implemented_error("model_data file (" + model_data_path +
                                        ") that are not one of (.R, .npy, .mat, .pkl) cannot be read!")
        for key in model_data.keys():
            if key[:3] == "EPI":
                del model_data[key]
        return model_data

    def set_model_data(self, debug=0, simulate=0, **kwargs):
        self.model_data_path = kwargs.get("model_data_path", self.model_data_path)
        model_data = kwargs.pop("model_data", None)
        if not(isinstance(model_data, dict)):
            model_data = self.load_model_data_from_file(self.model_data_path)
        # -1 for no debugging at all
        # 0 for printing only scalar parameters
        # 1 for printing scalar and vector parameters
        # 2 for printing all (scalar, vector and matrix) parameters
        model_data["DEBUG"] = debug
        # > 0 for simulating without using the input observation data:
        model_data["SIMULATE"] = simulate
        model_data = sort_dict(model_data)
        return model_data

    def set_or_compile_model(self, **kwargs):
        try:
            self.set_model_from_file(**kwargs)
        except:
            self.logger.info("Trying to compile model from file: " + str(self.model_code_path) + str("!"))
            self.compile_stan_model(save_model=kwargs.get("save_model", True), **kwargs)
        copyfile(self.model_code_path, os.path.join(os.path.dirname(self.model_path),
                                                    os.path.basename(self.model_code_path)))

    def read_output_samples(self, output_filepath, **kwargs):
        samples = ensure_list(parse_csv(output_filepath.replace(".csv", "*"), merge=kwargs.pop("merge_outputs", False)))
        if len(samples) == 1:
            return samples[0]
        return samples

    def compute_estimates_from_samples(self, samples):
        ests = []
        for chain_or_run_samples in ensure_list(samples):
            est = {}
            for pkey, pval in chain_or_run_samples.items():
                try:
                    est[pkey + "_low"], est[pkey], est[pkey + "_std"] = describe(chain_or_run_samples[pkey])[1:4]
                    est[pkey + "_high"] = est[pkey + "_low"][1]
                    est[pkey + "_low"] = est[pkey + "_low"][0]
                    est[pkey + "_std"] = np.sqrt(est[pkey + "_std"])
                    for skey in [pkey, pkey + "_low", pkey + "_high", pkey + "_std"]:
                        est[skey] = np.squeeze(est[skey])
                except:
                    est[pkey] = chain_or_run_samples[pkey]
            ests.append(sort_dict(est))
        if len(ests) == 1:
            return ests[0]
        else:
            return ests


    def compute_information_criteria(self, samples, nparams=None, nsamples=None, ndata=None, parameters=[],
                                     skip_samples=0, merge_chains_or_runs_flag=False, log_like_str='log_likelihood'):

        """

        :param samples: a dictionary of stan outputs or a list of dictionaries for multiple runs/chains
        :param nparams: number of model parameters, it can be inferred from parameters if None
        :param nsamples: number of samples, it can be inferred from loglikelihood if None
        :param ndata: number of data points, it can be inferred from loglikelihood if None
        :param parameters: a list of parameter names, necessary for dic metric computations and in case nparams is None,
                           as well as for aicc, aic and bic computation
        :param merge_chains_or_runs_flag: logical flag for merging seperate chains/runs, default is True
        :param log_like_str: the name of the log likelihood output of stan, default ''log_likelihood
        :return:
        """

        import sys
        sys.path.insert(0, self.config.generic.MODEL_COMPARISON_PATH)
        from information_criteria.ComputeIC import maxlike, aicc, aic, bic, dic, waic
        from information_criteria.ComputePSIS import psisloo


        # if self.fitmethod.find("opt") >= 0:
        #     warning("No model comparison can be computed for optimization method!")
        #     return None

        samples = ensure_list(samples)
        if merge_chains_or_runs_flag and len(samples) > 1:
            samples = ensure_list(merge_samples(samples, skip_samples, flatten=True))
            skip_samples = 0

        results = []
        for sample in samples:
            log_likelihood = -1 * sample[log_like_str][skip_samples:]
            log_lik_shape = log_likelihood.shape
            if len(log_lik_shape) > 1:
                target_shape = log_lik_shape[1:]
            else:
                target_shape = (1,)
            if nsamples is None:
                nsamples = log_lik_shape[0]
            elif nsamples != log_likelihood.shape[0]:
                warning("nsamples (" + str(nsamples) +
                        ") is not equal to likelihood.shape[0] (" + str(log_lik_shape[0]) + ")!")

            log_likelihood = np.reshape(log_likelihood, (log_lik_shape[0], -1))
            if log_likelihood.shape > 1:
                ndata_real = np.maximum(log_likelihood.shape[1], 1)
            else:
                ndata_real = 1
            if ndata is None:
                ndata = ndata_real
            elif ndata != ndata_real:
                warning("ndata (" + str(ndata) + ") is not equal to likelihood.shape[1] (" + str(ndata_real) + ")!")

            result = maxlike(log_likelihood)

            if len(parameters) == 0:
                parameters = [param for param in sample.keys() if param.find("_star") >= 0]
            if len(parameters) > 0:
                nparams_real = 0
                zscore_params = []
                for p in parameters:
                    pval = sample[p][skip_samples:]
                    pzscore = np.array((pval - np.mean(pval, axis=0)) / np.std(pval, axis=0))
                    if len(pzscore.shape) > 2:
                        pzscore = np.reshape(pzscore, (pzscore.shape[0], -1))
                    zscore_params.append(pzscore)
                    if len(pzscore.shape) > 1:
                        nparams_real += np.maximum(pzscore.shape[1], 1)
                    else:
                        nparams_real += 1
                if nparams is None:
                    nparams = nparams_real
                elif nparams != nparams_real:
                    warning("nparams (" + str(nparams) +
                            ") is not equal to number of parameters included in the dic computation (" +
                            str(nparams_real) + ")!")
                # TODO: find out how to reduce dic to 1 value, from 1 value per parameter. mean(.) for the moment:
                result['dic'] = np.mean(dic(log_likelihood, zscore_params))
            else:
                warning("Parameters' names' list is empty and we found no _star parameters! No computation of dic!")

            if nparams is not None:
                result['aicc'] = aicc(log_likelihood, nparams, ndata)
                result['aic'] = aic(log_likelihood, nparams)
                result['bic'] = bic(log_likelihood, nparams, ndata)
            else:
                warning("Unknown number of parameters! No computation of aic, aaic, bic!")

            result.update(waic(log_likelihood))

            if nsamples > 1:
                result.update(psisloo(log_likelihood))
                result["loos"] = np.reshape(result["loos"], target_shape)
                result["ks"] = np.reshape(result["ks"], target_shape)
            else:
                result.pop('p_waic', None)

            for metric, value in result.items():
                result[metric] = value * np.ones(1,)

            results.append(result)

        if len(results) == 1:
            return results[0]
        else:
            return list_of_dicts_to_dicts_of_ndarrays(results)

    def compare_models(self, samples, nparams=None, nsamples=None, ndata=None, parameters=[],
                       skip_samples=0, merge_chains_or_runs_flag=False, log_like_str='log_likelihood'):

        """

        :param samples: a dictionary of model's names and samples
        :param nparams: a number or list of numbers of parameters,
                       it can be inferred from parameters list or from _star parameters
        :param nsamples: a number or lists of numbers of samples, it can be inferred from loglikelihood if None
        :param ndata: a number or lists of numbers of data point, it can be inferred from loglikelihood if None
        :param parameters: a list (or list of lists) of parameter names,
                          it can be inferred from parameters list or from _star parameters
        :param merge_chains_or_runs_flag: logical flag for merging seperate chains/runs, default is True
        :param log_like_str: the name of the log likelihood output of stan, default ''log_likelihood
        :return:
        """

        def check_number_of_inputs(nmodels, input, input_str):
            input = ensure_list(input)
            ninput = len(input)
            if ninput != nmodels:
                if ninput == 1:
                    input *= nmodels
                else:
                    raise_value_error("The size of input " + input_str + " (" + str(ninput) +
                                      ") is neither equal to the number of models (" + str(nmodels) +
                                      ") nor equal to 1!")
            return input

        nmodels = len(samples)

        n_parameters = parameters
        if n_parameters > 0:
            if isinstance(parameters[0], (list, tuple)):
                parameters = check_number_of_inputs(nmodels, parameters, "number of parameters")
            else:
                parameters = nmodels*[parameters]
        nparams = check_number_of_inputs(nmodels, nparams, "number of parameters")
        nsamples = check_number_of_inputs(nmodels, nsamples, "number of samples")
        ndata = check_number_of_inputs(nmodels, ndata, "number of data points")
        skip_samples = check_number_of_inputs(nmodels, skip_samples, "skip_samples")
        log_like_str = check_number_of_inputs(nmodels, log_like_str, "log_like_str")

        results = {}

        for i_model, (model_name, model_samples) in enumerate(samples.items()):
            results[model_name] = \
               self.compute_information_criteria(model_samples, nparams[i_model], nsamples[i_model], ndata[i_model],
                                                 parameters[i_model], skip_samples[i_model], merge_chains_or_runs_flag,
                                                 log_like_str[i_model])

        # Return result into a dictionary with metrics at the upper level and models at the lower one
        return switch_levels_of_dicts_of_dicts(results)
Exemplo n.º 15
0
class Head(object):
    """
    One patient virtualization. Fully configured for defining hypothesis on it.
    """
    logger = initialize_logger(__name__)

    def __init__(self,
                 connectivity,
                 cortical_surface,
                 rm={},
                 vm={},
                 t1={},
                 name='',
                 **kwargs):
        self.connectivity = connectivity
        self.cortical_surface = cortical_surface
        self.region_mapping = rm
        self.volume_mapping = vm
        self.t1_background = t1
        self.sensorsSEEG = []
        self.sensorsEEG = []
        self.sensorsMEG = []
        for s_type in Sensors.SENSORS_TYPES:
            self.set_sensors(kwargs.get("sensors" + s_type), s_type=s_type)
        if len(name) == 0:
            self.name = 'Head' + str(self.number_of_regions)
        else:
            self.name = name

    @property
    def number_of_regions(self):
        return self.connectivity.number_of_regions

    def filter_regions(self, filter_arr):
        return self.connectivity.region_labels[filter_arr]

    def __repr__(self):
        d = {
            "1. name": self.name,
            "2. connectivity": self.connectivity,
            "3. RM": reg_dict(self.region_mapping,
                              self.connectivity.region_labels),
            "4. VM": reg_dict(self.volume_mapping,
                              self.connectivity.region_labels),
            "5. surface": self.cortical_surface,
            "6. T1": self.t1_background,
            "7. SEEG": self.sensorsSEEG,
            "8. EEG": self.sensorsEEG,
            "9. MEG": self.sensorsMEG
        }
        return formal_repr(self, sort_dict(d))

    def __str__(self):
        return self.__repr__()

    def get_sensors(self, s_type=Sensors.TYPE_SEEG):
        if np.in1d(s_type.upper(), Sensors.SENSORS_TYPES):
            return getattr(self, "sensors" + s_type)
        else:
            raise_value_error("Invalid input sensor type " + str(s_type))

    def set_sensors(self,
                    input_sensors,
                    s_type=Sensors.TYPE_SEEG,
                    reset=False):
        if input_sensors is None:
            return
        sensors = ensure_list(self.get_sensors(s_type))
        if reset is False or len(sensors) == 0:
            sensors = []
        for s in ensure_list(input_sensors):
            if isinstance(s, Sensors) and (s.s_type == s_type):
                if s.gain_matrix is None or s.gain_matrix.shape != (
                        s.number_of_sensors, self.number_of_regions):
                    self.logger.warning(
                        "No correctly sized gain matrix found in sensors! Computing and adding gain matrix!"
                    )
                    s.gain_matrix = s.compute_gain_matrix(self.connectivity)
                # if s.orientations == None or s.orientations.shape != (s.number_of_sensors, 3):
                #     self.logger.warning("No orientations found in sensors!")
                sensors.append(s)
            else:
                if s is not None:
                    raise_value_error(
                        "Input sensors:\n" + str(s) +
                        "\nis not a valid Sensors object of type " +
                        str(s_type) + "!")
        if len(sensors) == 0:
            setattr(self, "sensors" + s_type, [])
        else:
            setattr(self, "sensors" + s_type, sensors)

    def get_sensors_id(self, s_type=Sensors.TYPE_SEEG, sensor_ids=0):
        sensors = self.get_sensors(s_type)
        if sensors is None:
            return sensors
        else:
            out_sensors = []
            sensors = ensure_list(sensors)
            for iS, s in enumerate(sensors):
                if np.in1d(iS, sensor_ids):
                    out_sensors.append(sensors[iS])
            if len(out_sensors) == 0:
                return None
            elif len(out_sensors) == 1:
                return out_sensors[0]
            else:
                return out_sensors
class ProbabilisticSamplingService(SamplingService):
    logger = initialize_logger(__name__)

    def __init__(self, n_samples=10, sampling_module="scipy", random_seed=None):
        super(ProbabilisticSamplingService, self).__init__(n_samples)
        self.random_seed = random_seed
        self.sampling_module = sampling_module.lower()

    def __repr__(self):

        d = {"01. Sampling module": self.sampling_module,
             "02. Sampler": self.sampler,
             "03. Number of samples": self.n_samples,
             "04. Samples' p_shape": self.shape,
             "05. Random seed": self.random_seed,
             }
        return formal_repr(self, d) + "\n06. Resulting statistics: " + dict_str(self.stats)

    def __str__(self):
        return self.__repr__()

    def _truncated_distribution_sampling(self, trunc_limits, size):
        # Following: https://stackoverflow.com/questions/25141250/
        # how-to-truncate-a-numpy-scipy-exponential-distribution-in-an-efficient-way
        # TODO: to have distributions parameters valid for the truncated distributions instead for the original one
        # pystan might be needed for that...
        rnd_cdf = nr.uniform(self.sampler.cdf(x=trunc_limits.get("low", -np.inf)),
                             self.sampler.cdf(x=trunc_limits.get("high", np.inf)),
                             size=size)
        return self.sampler.ppf(q=rnd_cdf)

    def sample(self, parameter=(), loc=0.0, scale=1.0, **kwargs):
        nr.seed(self.random_seed)
        if isinstance(parameter, (ProbabilisticParameterBase, TransformedProbabilisticParameterBase)):
            parameter_shape = parameter.p_shape
            low = parameter.low
            high = parameter.high
            prob_distr = parameter
            loc = parameter.loc
            scale = parameter.scale
        else:
            parameter_shape = kwargs.pop("shape", (1,))
            low = kwargs.pop("low", -CalculusConfig.MAX_SINGLE_VALUE)
            high = kwargs.pop("high", CalculusConfig.MAX_SINGLE_VALUE)
            prob_distr = kwargs.pop("probability_distribution", "uniform")
        low, high = self.check_for_infinite_bounds(low, high)
        low, high, n_outputs, parameter_shape = self.check_size(low, high, parameter_shape)
        self.adjust_shape(parameter_shape)
        out_shape = tuple([self.n_samples] + list(self.shape)[:-1])
        if np.any(low > -CalculusConfig.MAX_SINGLE_VALUE) or np.any(high < CalculusConfig.MAX_SINGLE_VALUE):
            if not(isequal_string(self.sampling_module, "scipy")):
                self.logger.warning("Switching to scipy for truncated distributions' sampling!")
            self.sampling_module = "scipy"
            if isinstance(prob_distr, basestring):
                self.sampler = getattr(ss, prob_distr)(*parameter, **kwargs)
                samples = self._truncated_distribution_sampling({"low": low, "high": high}, out_shape) * scale + loc
            elif isinstance(prob_distr, (ProbabilisticParameterBase, TransformedProbabilisticParameterBase)):
                self.sampler = prob_distr.scipy()
                samples = self._truncated_distribution_sampling({"low": low, "high": high}, out_shape)
        elif self.sampling_module.find("scipy") >= 0:
            if isinstance(prob_distr, basestring):
                self.sampler = getattr(ss, prob_distr)(*parameter, **kwargs)
                samples = self.sampler.rvs(size=out_shape) * scale + loc
            elif isinstance(prob_distr, ProbabilisticParameterBase):
                self.sampler = prob_distr._scipy(**kwargs)
                samples = self.sampler.rvs(size=out_shape)
        elif self.sampling_module.find("numpy") >= 0:
            if isinstance(prob_distr, basestring):
                self.sampler = lambda size: getattr(nr, prob_distr)(*parameter, size=size, **kwargs)
                samples = self.sampler(out_shape) * scale + loc
            elif isinstance(prob_distr, (ProbabilisticParameterBase, TransformedProbabilisticParameterBase)):
                self.sampler = lambda size: prob_distr.numpy(size=size)
                samples = self.sampler(out_shape)
        return samples.T
Exemplo n.º 17
0
import os
from tvb_epilepsy.base.constants.config import Config
from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.io.tvb_data_reader import TVBReader
from tvb_epilepsy.io.h5_reader import H5Reader
from tvb_epilepsy.io.h5_writer import H5Writer
from tvb_epilepsy.plot.plotter import Plotter
# input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "tvb")
# head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work', 'VBtech', 'VEP', "results", "CC", "TVB3", "Head")
input_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                            'VBtech', 'VEP', "results", "INS", "JUNCH", "tvb")
head_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                           'VBtech', 'VEP', "results", "INS", "JUNCH", "Head")
output_folder = os.path.join(os.path.expanduser("~"), 'Dropbox', 'Work',
                             'VBtech', 'VEP', "results", "tests")
config = Config(head_folder=input_folder,
                output_base=output_folder,
                data_mode="tvb")  #, data_mode="java"
config.hypothesis.head_folder = head_folder
config.figures.MATPLOTLIB_BACKEND = "inline"
config.figures.SHOW_FLAG = True
logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
writer = H5Writer()
plotter = Plotter(config)

logger.info("Reading from: " + config.input.HEAD)
head = reader.read_head(config.input.HEAD,
                        seeg_sensors_files=[("seeg_xyz.txt", )])
print("OK!")
Exemplo n.º 18
0
def main_vep(config=Config(), sim_type="default", test_write_read=False,
             pse_flag=PSE_FLAG, sa_pse_flag=SA_PSE_FLAG, sim_flag=SIM_FLAG):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    plotter = Plotter(config)
    plotter.plot_head(head)
    if test_write_read:
        writer.write_head(head, os.path.join(config.out.FOLDER_RES, "Head"))
    # --------------------------Hypothesis definition-----------------------------------
    n_samples = 100
    # # Manual definition of hypothesis...:
    # x0_indices = [20]
    # x0_values = [0.9]
    # e_indices = [70]
    # e_values = [0.9]
    # disease_values = x0_values + e_values
    # disease_indices = x0_indices + e_indices
    # ...or reading a custom file:

    hypo_builder = HypothesisBuilder(head.connectivity.number_of_regions, config=config).set_normalize(0.95)

    # This is an example of Epileptogenicity Hypothesis: you give as ep all indices for values > 0
    hyp_E = hypo_builder.build_hypothesis_from_file(EP_NAME, e_indices=[1, 3, 16, 25])
    # print(hyp_E.string_regions_disease(head.connectivity.region_labels))

    # This is an example of Excitability Hypothesis:
    hyp_x0 = hypo_builder.build_hypothesis_from_file(EP_NAME)

    # # This is an example of Mixed Hypothesis set manually by the user:
    # x0_indices = [hyp_x0.x0_indices[-1]]
    # x0_values = [hyp_x0.x0_values[-1]]
    # e_indices = hyp_x0.x0_indices[0:-1].tolist()
    # e_values = hyp_x0.x0_values[0:-1].tolist()
    # hyp_x0_E = hypo_builder.set_x0_hypothesis(x0_indices, x0_values). \
    #                             set_e_hypothesis(e_indices, e_values).build_hypothesis()

    # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis set from file:
    all_regions_indices = np.array(range(head.number_of_regions))
    healthy_indices = np.delete(all_regions_indices, hyp_E.x0_indices + hyp_E.e_indices).tolist()
    hyp_x0_E = hypo_builder.build_hypothesis_from_file(EP_NAME, e_indices=[16, 25])

    hypotheses = (hyp_x0_E, hyp_x0, hyp_E)

    # --------------------------Simulation preparations-----------------------------------
    # If you choose model...
    # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
    # EpileptorDP: similar to the TVB Epileptor + optional variations,
    # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
    # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
    #      -x0, Iext1, Iext2, slope and K become noisy state variables,
    #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
    #      -multiplicative correlated noise is also used
    # We don't want any time delays for the moment
    head.connectivity.tract_lengths *= config.simulator.USE_TIME_DELAYS_FLAG
    sim_builder = SimulatorBuilder(config.simulator.MODE)
    if isequal_string(sim_type, "realistic"):
        sim_settings = sim_builder.set_model_name("EpileptorDPrealistic").set_simulated_period(50000).build_sim_settings()
        sim_settings.noise_type = COLORED_NOISE
        sim_settings.noise_ntau = 10
    elif isequal_string(sim_type, "fitting"):
        sim_settings = sim_builder.set_model_name("EpileptorDP2D").build_sim_settings()
        sim_settings.noise_intensity = 1e-3
    elif isequal_string(sim_type, "paper"):
        sim_builder.set_model_name("Epileptor")
        sim_settings = sim_builder.build_sim_settings()
    else:
        sim_settings = sim_builder.build_sim_settings()

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("\n\nRunning hypothesis: " + hyp.name)
        logger.info("\n\nCreating model configuration...")
        builder = ModelConfigurationBuilder(hyp.number_of_regions)

        mcs_file = os.path.join(config.out.FOLDER_RES, hyp.name + "_model_config_service.h5")
        writer.write_model_configuration_builder(builder, mcs_file)
        if test_write_read:
            logger.info("Written and read model configuration services are identical?: " +
                        str(assert_equal_objects(builder, reader.read_model_configuration_builder(mcs_file),
                                                 logger=logger)))

        if hyp.type == "Epileptogenicity":
            model_configuration = builder.build_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = builder.build_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        mc_path = os.path.join(config.out.FOLDER_RES, hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, mc_path)
        if test_write_read:
            logger.info("Written and read model configuration are identical?: " +
                        str(assert_equal_objects(model_configuration, reader.read_model_configuration(mc_path),
                                                 logger=logger)))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration, "6d", head.connectivity.region_labels,
                                 special_idx=hyp_x0.x0_indices + hyp_E.e_indices, zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=None, weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")
        lsa_config_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5")
        writer.write_hypothesis(lsa_hypothesis, lsa_path)
        writer.write_lsa_service(lsa_service, lsa_config_path)
        if test_write_read:
            logger.info("Written and read LSA services are identical?: " +
                        str(assert_equal_objects(lsa_service, reader.read_lsa_service(lsa_config_path), logger=logger)))
            logger.info("Written and read LSA hypotheses are identical (no input check)?: " +
                        str(assert_equal_objects(lsa_hypothesis, reader.read_hypothesis(lsa_path), logger=logger)))
        plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number, head.connectivity.region_labels, None)

        if pse_flag:
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(lsa_hypothesis,
                                                  head.connectivity.normalized_weights,
                                                  head.connectivity.region_labels,
                                                  n_samples, param_range=0.1,
                                                  global_coupling=[{"indices": all_regions_indices}],
                                                  healthy_regions_parameters=[
                                                      {"name": "x0_values", "indices": healthy_indices}],
                                                  model_configuration_builder=builder,
                                                  lsa_service=lsa_service, logger=logger, save_flag=True)[0]
            plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number, head.connectivity.region_labels, pse_results)

            pse_lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5")
            writer.write_dictionary(pse_results, pse_lsa_path)
            if test_write_read:
                logger.info("Written and read sensitivity analysis parameter search results are identical?: " +
                            str(assert_equal_objects(pse_results, reader.read_dictionary(pse_lsa_path), logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(lsa_hypothesis,
                                                             head.connectivity.normalized_weights,
                                                             head.connectivity.region_labels,
                                                             n_samples, method="sobol", param_range=0.1,
                                                             global_coupling=[{"indices": all_regions_indices,
                                                                               "bounds": [0.0, 2 *
                                                                                          builder.K_unscaled[
                                                                                              0]]}],
                                                             healthy_regions_parameters=[
                                                                 {"name": "x0_values", "indices": healthy_indices}],
                                                             model_configuration_builder=builder,
                                                             lsa_service=lsa_service, config=config)
            plotter.plot_lsa(lsa_hypothesis, model_configuration, lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number, head.connectivity.region_labels, pse_sa_results,
                             title="SA PSE Hypothesis Overview")

            sa_pse_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            sa_lsa_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5")
            writer.write_dictionary(pse_sa_results, sa_pse_path)
            writer.write_dictionary(sa_results, sa_lsa_path)
            if test_write_read:
                logger.info("Written and read sensitivity analysis results are identical?: " +
                            str(assert_equal_objects(sa_results, reader.read_dictionary(sa_lsa_path), logger=logger)))
                logger.info("Written and read sensitivity analysis parameter search results are identical?: " +
                            str(assert_equal_objects(pse_sa_results, reader.read_dictionary(sa_pse_path),
                                                     logger=logger)))

        if sim_flag:
            # ------------------------------Simulation--------------------------------------
            logger.info("\n\nConfiguring simulation from model_configuration...")
            model = sim_builder.generate_model(model_configuration)
            if isequal_string(sim_type, "realistic"):
                model.tau0 = 30000.0
                model.tau1 = 0.2
                model.slope = 0.25
            elif isequal_string(sim_type, "fitting"):
                model.tau0 = 10.0
                model.tau1 = 0.5
            sim, sim_settings, model = sim_builder.build_simulator_TVB_from_model_sim_settings(model_configuration,
                                                                                 head.connectivity, model, sim_settings)

            # Integrator and initial conditions initialization.
            # By default initial condition is set right on the equilibrium point.
            writer.write_generic(sim.model, config.out.FOLDER_RES, lsa_hypothesis.name + "_sim_model.h5")
            logger.info("\n\nSimulating...")
            ttavg, tavg_data, status = sim.launch_simulation(report_every_n_monitor_steps=100)

            sim_path = os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5")
            writer.write_simulation_settings(sim.simulation_settings, sim_path)
            if test_write_read:
                # TODO: find out why it cannot set monitor expressions
                logger.info("Written and read simulation settings are identical?: " +
                            str(assert_equal_objects(sim.simulation_settings,
                                                     reader.read_simulation_settings(sim_path), logger=logger)))
            if not status:
                logger.warning("\nSimulation failed!")
            else:
                time = np.array(ttavg, dtype='float32')
                output_sampling_time = np.mean(np.diff(time))
                tavg_data = tavg_data[:, :, :, 0]
                logger.info("\n\nSimulated signal return shape: %s", tavg_data.shape)
                logger.info("Time: %s - %s", time[0], time[-1])
                logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max())
                # Variables of interest in a dictionary:
                res_ts = prepare_vois_ts_dict(sim_settings.monitor_expressions, tavg_data)
                res_ts['time'] = time
                res_ts['time_units'] = 'msec'
                res_ts = compute_seeg_and_write_ts_h5_file(config.out.FOLDER_RES, lsa_hypothesis.name + "_ts.h5",
                                                                 sim.model, res_ts, output_sampling_time,
                                                                 sim_settings.simulated_period,
                                                                 hpf_flag=True, hpf_low=10.0, hpf_high=512.0,
                                                                 sensors_list=head.sensorsSEEG)
                # Plot results
                if model._ui_name is "EpileptorDP2D":
                    spectral_raster_plot = False
                    trajectories_plot = True
                else:
                    spectral_raster_plot = "lfp"
                    trajectories_plot = False
                #TODO: plotting fails when spectral_raster_plot="lfp". Denis will fix this
                plotter.plot_sim_results(sim.model, lsa_hypothesis.lsa_propagation_indices, res_ts,
                                         head.sensorsSEEG, hpf_flag=True, trajectories_plot=trajectories_plot,
                                         spectral_raster_plot=False, log_scale=True)
Exemplo n.º 19
0
class LSAService(object):
    logger = initialize_logger(__name__)

    def __init__(
            self,
            eigen_vectors_number_selection=CalculusConfig.
        EIGENVECTORS_NUMBER_SELECTION,
            eigen_vectors_number=None,
            weighted_eigenvector_sum=CalculusConfig.WEIGHTED_EIGENVECTOR_SUM,
            normalize_propagation_strength=False):
        self.eigen_vectors_number_selection = eigen_vectors_number_selection
        self.eigen_values = []
        self.eigen_vectors = []
        self.eigen_vectors_number = eigen_vectors_number
        self.weighted_eigenvector_sum = weighted_eigenvector_sum
        self.normalize_propagation_strength = normalize_propagation_strength

    def __repr__(self):
        d = {
            "01. Eigenvectors' number selection mode":
            self.eigen_vectors_number_selection,
            "02. Eigenvectors' number":
            self.eigen_vectors_number_selection,
            "03. Eigen values":
            self.eigen_values,
            "04. Eigenvectors":
            self.eigen_vectors,
            "05. Eigenvectors' number":
            self.eigen_vectors_number,
            "06. Weighted eigenvector's sum flag":
            str(self.weighted_eigenvector_sum)
        }
        return formal_repr(self, d)

    def __str__(self):
        return self.__repr__()

    def set_attribute(self, attr_name, data):
        setattr(self, attr_name, data)

    def get_curve_elbow_point(self, values_array):
        return curve_elbow_point(values_array)

    def _ensure_eigen_vectors_number(self, eigen_values, e_values, x0_values,
                                     disease_indices):
        if self.eigen_vectors_number is None:
            if self.eigen_vectors_number_selection is "auto_eigenvals":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    numpy.abs(eigen_values)) + 1

            elif self.eigen_vectors_number_selection is "auto_disease":
                self.eigen_vectors_number = len(disease_indices)

            elif self.eigen_vectors_number_selection is "auto_epileptogenicity":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    e_values) + 1

            elif self.eigen_vectors_number_selection is "auto_excitability":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    x0_values) + 1

            else:
                raise_value_error(
                    "\n" + self.eigen_vectors_number_selection +
                    "is not a valid option when for automatic computation of self.eigen_vectors_number"
                )
        else:
            self.eigen_vectors_number_selection = "user_defined"

    def _compute_jacobian(self, model_configuration):

        # Check if any of the equilibria are in the supercritical regime (beyond the separatrix) and set it right before
        # the bifurcation.
        zEQ = model_configuration.zEQ
        temp = model_configuration.x1EQ > X1_EQ_CR_DEF - 10**(-3)
        if temp.any():
            correction_value = X1_EQ_CR_DEF - 10**(-3)
            self.logger.warning(
                "Equilibria x1EQ[" + str(numpy.where(temp)[0]) + "]  = " +
                str(model_configuration.x1EQ[temp]) +
                "\nwere corrected for LSA to value: X1_EQ_CR_DEF - 10 ** (-3) = "
                + str(correction_value) + " to be sub-critical!")
            model_configuration.x1EQ[temp] = correction_value
            i_temp = numpy.ones(model_configuration.x1EQ.shape)
            zEQ[temp] = calc_eq_z(model_configuration.x1EQ[temp],
                                  model_configuration.yc * i_temp[temp],
                                  model_configuration.Iext1 * i_temp[temp],
                                  "2d", 0.0,
                                  model_configuration.slope * i_temp[temp],
                                  model_configuration.a * i_temp[temp],
                                  model_configuration.b * i_temp[temp],
                                  model_configuration.d * i_temp[temp])

        fz_jacobian = calc_fz_jac_square_taylor(
            model_configuration.zEQ, model_configuration.yc,
            model_configuration.Iext1, model_configuration.K,
            model_configuration.model_connectivity, model_configuration.a,
            model_configuration.b, model_configuration.d)

        if numpy.any([
                numpy.any(numpy.isnan(fz_jacobian.flatten())),
                numpy.any(numpy.isinf(fz_jacobian.flatten()))
        ]):
            raise_value_error("nan or inf values in dfz")

        return fz_jacobian

    def run_lsa(self, disease_hypothesis, model_configuration):

        jacobian = self._compute_jacobian(model_configuration)

        # Perform eigenvalue decomposition
        eigen_values, eigen_vectors = numpy.linalg.eig(jacobian)

        sorted_indices = numpy.argsort(eigen_values, kind='mergesort')
        self.eigen_values = eigen_values[sorted_indices]
        self.eigen_vectors = eigen_vectors[:, sorted_indices]

        self._ensure_eigen_vectors_number(
            self.eigen_values, model_configuration.e_values,
            model_configuration.x0_values,
            disease_hypothesis.get_all_disease_indices())

        if self.eigen_vectors_number == disease_hypothesis.number_of_regions:
            # Calculate the propagation strength index by summing all eigenvectors
            lsa_propagation_strength = numpy.abs(
                numpy.sum(self.eigen_vectors, axis=1))

        else:
            sorted_indices = max(self.eigen_vectors_number, 1)
            # Calculate the propagation strength index by summing the first n eigenvectors (minimum 1)
            if self.weighted_eigenvector_sum:
                lsa_propagation_strength = numpy.abs(
                    weighted_vector_sum(self.eigen_values[:sorted_indices],
                                        self.eigen_vectors[:, :sorted_indices],
                                        normalize=True))
            else:
                lsa_propagation_strength = numpy.abs(
                    numpy.sum(self.eigen_vectors[:, :sorted_indices], axis=1))

        if self.normalize_propagation_strength:
            # Normalize by the maximum
            lsa_propagation_strength /= numpy.max(lsa_propagation_strength)

        # # TODO: this has to be corrected
        # if self.eigen_vectors_number < 0.2 * disease_hypothesis.number_of_regions:
        #     propagation_strength_elbow = numpy.max([self.get_curve_elbow_point(lsa_propagation_strength),
        #                                     self.eigen_vectors_number])
        # else:
        propagation_strength_elbow = self.get_curve_elbow_point(
            lsa_propagation_strength)
        propagation_indices = lsa_propagation_strength.argsort(
        )[-propagation_strength_elbow:]

        hypothesis_builder = HypothesisBuilder(disease_hypothesis.number_of_regions).\
                                set_attributes_based_on_hypothesis(disease_hypothesis). \
                                    set_lsa_propagation(propagation_indices, lsa_propagation_strength)

        return hypothesis_builder.build_lsa_hypothesis()

    def update_for_pse(self, values, paths, indices):
        for i, val in enumerate(paths):
            vals = val.split(".")
            if vals[0] == "lsa_service":
                getattr(self, vals[1])[indices[i]] = values[i]
Exemplo n.º 20
0
    def test_computations(self):
        logger = initialize_logger(__name__, self.config.out.FOLDER_LOGS)

        # ------------------------------------------------------------------------------------------------------------------
        x1 = numpy.array([-4.1 / 3, -4.9 / 3, -5.0 / 3], dtype="float32")
        w = numpy.array([[0, 0.1, 0.9], [0.1, 0, 0.0], [0.9, 0.0, 0]])
        n = x1.size
        K = 0.0 * K_DEF * numpy.ones(x1.shape, dtype=x1.dtype)
        yc = YC_DEF * numpy.ones(x1.shape, dtype=x1.dtype)
        Iext1 = I_EXT1_DEF * numpy.ones(x1.shape, dtype=x1.dtype)
        slope = SLOPE_DEF * numpy.ones(x1.shape, dtype=x1.dtype)
        Iext2 = I_EXT2_DEF * numpy.ones(x1.shape, dtype=x1.dtype)
        a = A_DEF
        b = B_DEF
        d = D_DEF
        s = S_DEF
        gamma = GAMMA_DEF
        tau1 = TAU1_DEF
        tau2 = TAU2_DEF
        tau0 = TAU0_DEF
        x1, K = assert_arrays([x1, K])
        w = assert_arrays([w])  # , (x1.size, x1.size)
        zmode = numpy.array("lin")
        pmode = numpy.array("const")
        model = "EpileptorDPrealistic"
        x1eq = x1

        z = calc_eq_z(x1,
                      yc,
                      Iext1,
                      "2d",
                      x2=0.0,
                      slope=slope,
                      a=a,
                      b=b,
                      d=d,
                      x1_neg=True)
        zeq = z

        x0cr, r = calc_x0cr_r(yc,
                              Iext1,
                              zmode=zmode,
                              x1_rest=X1_DEF,
                              x1_cr=X1EQ_CR_DEF,
                              x0def=X0_DEF,
                              x0cr_def=X0_CR_DEF)

        x0 = calc_x0(x1, z, K, w, zmode=zmode, z_pos=True)

        calc_model_x0_to_x0_val(x0,
                                yc,
                                Iext1,
                                a,
                                b,
                                d,
                                zmode=numpy.array("lin"))

        if model == "EpileptorDP2D":
            eq = numpy.c_[x1eq, zeq].T.astype('float32')
            model_vars = 2
            dfun = calc_dfun(eq[0].T,
                             eq[1].T,
                             yc,
                             Iext1,
                             x0,
                             K,
                             w,
                             model_vars,
                             zmode=zmode,
                             pmode=pmode,
                             x0_var=x0,
                             slope_var=slope,
                             Iext1_var=Iext1,
                             Iext2_var=Iext2,
                             K_var=K,
                             slope=slope,
                             a=a,
                             b=b,
                             d=d,
                             s=s,
                             Iext2=Iext2,
                             gamma=gamma,
                             tau1=tau1,
                             tau0=tau0,
                             tau2=tau2,
                             output_mode="array")

            jac = calc_jac(eq[0].T,
                           eq[1].T,
                           yc,
                           Iext1,
                           x0,
                           K,
                           w,
                           model_vars,
                           zmode=zmode,
                           pmode=pmode,
                           x1_neg=True,
                           z_pos=True,
                           x2_neg=False,
                           x0_var=x0,
                           slope_var=slope,
                           Iext1_var=Iext1,
                           Iext2_var=Iext2,
                           K_var=K,
                           slope=slope,
                           a=a,
                           b=b,
                           d=d,
                           s=s,
                           Iext2=Iext2,
                           gamma=gamma,
                           tau1=tau1,
                           tau0=tau0,
                           tau2=tau2)
        else:
            if model == "EpileptorDPrealistic":
                # the 11D "realistic" simulations model
                eq, slope_eq, Iext2_eq = calc_eq_11d(
                    x0,
                    K,
                    w,
                    yc,
                    Iext1,
                    Iext2,
                    slope,
                    EpileptorDPrealistic.fun_slope_Iext2,
                    x1,
                    a=a,
                    b=b,
                    d=d,
                    zmode=zmode,
                    pmode=pmode)
                model_vars = 11
                dfun = calc_dfun(eq[0].T,
                                 eq[2].T,
                                 yc,
                                 Iext1,
                                 x0,
                                 K,
                                 w,
                                 model_vars,
                                 zmode,
                                 pmode,
                                 y1=eq[1].T,
                                 x2=eq[3].T,
                                 y2=eq[4].T,
                                 g=eq[5].T,
                                 x0_var=eq[6].T,
                                 slope_var=eq[7].T,
                                 Iext1_var=eq[8].T,
                                 Iext2_var=eq[9].T,
                                 K_var=eq[10].T,
                                 slope=slope,
                                 a=a,
                                 b=b,
                                 d=d,
                                 s=s,
                                 Iext2=Iext2,
                                 gamma=gamma,
                                 tau1=tau1,
                                 tau0=tau0,
                                 tau2=tau2,
                                 output_mode="array")
                jac = calc_jac(eq[0].T,
                               eq[2].T,
                               yc,
                               Iext1,
                               x0,
                               K,
                               w,
                               model_vars,
                               zmode,
                               pmode,
                               x1_neg=True,
                               z_pos=True,
                               x2_neg=False,
                               y1=eq[1].T,
                               x2=eq[3].T,
                               y2=eq[4].T,
                               g=eq[5].T,
                               x0_var=eq[6].T,
                               slope_var=eq[7].T,
                               Iext1_var=eq[8].T,
                               Iext2_var=eq[9].T,
                               K_var=eq[10].T,
                               slope=slope,
                               a=a,
                               b=b,
                               d=d,
                               s=s,
                               Iext2=Iext2,
                               gamma=gamma,
                               tau1=tau1,
                               tau0=tau0,
                               tau2=tau2)
            else:
                # all >=6D models
                eq = calc_eq_6d(x0,
                                K,
                                w,
                                yc,
                                Iext1,
                                Iext2,
                                x1,
                                a=a,
                                b=b,
                                d=d,
                                zmode=zmode)
                model_vars = 6
                dfun = calc_dfun(eq[0].T,
                                 eq[2].T,
                                 yc,
                                 Iext1,
                                 x0,
                                 K,
                                 w,
                                 model_vars,
                                 zmode,
                                 y1=eq[1].T,
                                 x2=eq[3].T,
                                 y2=eq[4].T,
                                 g=eq[5].T,
                                 slope=slope,
                                 a=a,
                                 b=b,
                                 d=d,
                                 s=s,
                                 Iext2=Iext2,
                                 gamma=gamma,
                                 tau1=tau1,
                                 tau0=tau0,
                                 tau2=tau2,
                                 output_mode="array")
                jac = calc_jac(eq[0].T,
                               eq[2].T,
                               yc,
                               Iext1,
                               r,
                               K,
                               w,
                               model_vars,
                               zmode,
                               x1_neg=True,
                               z_pos=True,
                               x2_neg=False,
                               y1=eq[1].T,
                               x2=eq[3].T,
                               y2=eq[4].T,
                               g=eq[5].T,
                               slope=slope,
                               a=a,
                               b=b,
                               d=d,
                               s=s,
                               Iext2=Iext2,
                               gamma=gamma,
                               tau1=tau1,
                               tau0=tau0,
                               tau2=tau2)

        model = str(model_vars) + "d"
        sx1, sy1, sz, sx2, sy2, sg, sx0, sx0_val, sK, syc, sIext1, sIext2, sslope, sa, sb, sd, stau1, stau0, stau2, v = \
            symbol_vars(n, ["x1", "y1", "z", "x2", "y2", "g", "x0", "x0_val", "K", "yc", "Iext1", "Iext2",
                            "slope", "a", "b", "d", "tau1", "tau0", "tau2"], shape=(3,))
        sw, vw = symbol_vars(n, ["w"], dims=2, output_flag="numpy_array")

        v.update(vw)
        del vw
        numpy.fill_diagonal(sw, 0.0)
        sw = numpy.array(sw)
        a = numpy.ones((n, ))
        b = 3.0 * a
        d = 5.0 * a
        s = 6.0 * a
        tau1 = a
        tau0 = a
        tau2 = a
        x1sq = -4.0 / 3 * a
        if model == "2d":
            y1 = yc
        else:
            y1 = eq[1].T
            x2 = eq[3].T
            y2 = eq[4].T
            g = eq[5].T
            if model == "11d":
                x0_var = eq[6].T
                slope_var = eq[7].T
                Iext1_var = eq[8].T
                Iext2_var = eq[9].T
                K_var = eq[10].T

        # -------------------------------------------- Test symbolic x0cr, r calculation ----------------------------------

        logger.info("\n\nTest symbolic x0cr, r calculation...")
        x0cr2, r2 = calc_x0cr_r(syc,
                                sIext1,
                                zmode=zmode,
                                x1_rest=X1_DEF,
                                x1_cr=X1EQ_CR_DEF,
                                x0def=X0_DEF,
                                x0cr_def=X0_CR_DEF)  # test=True

        lx0cr_r, sx0cr_r, v = symbol_eqtn_x0cr_r(
            n, zmode=zmode,
            shape=(n, ))  # symbol_calc_x0cr_r(n, zmode=zmode, shape=(3, ))
        sx0cr_r = list(sx0cr_r)

        for ii in range(2):
            sx0cr_r[ii] = Matrix(sx0cr_r[ii])
            for iv in range(n):
                sx0cr_r[ii][iv] = sx0cr_r[ii][iv].subs([
                    (v["a"][iv], a[iv]), (v["b"][iv], b[iv]),
                    (v["d"][iv], d[iv]), (v["x1_rest"][iv], X1_DEF),
                    (v["x0_rest"][iv], X0_DEF), (v["x1_cr"][iv], X1EQ_CR_DEF),
                    (v["x0_cr"][iv], X0_CR_DEF)
                ])

        assert list(x0cr2) == list(sx0cr_r[0])
        assert list(r2) == list(sx0cr_r[1])

        # -------------------------------------------- Test coupling ------------------------------------------------------
        coupling = calc_coupling(sx1, sK, sw)
        scoupling = symbol_eqtn_coupling(n, shape=(n, ))[:2]

        assert list(coupling) == list(scoupling[1])
        assert list(calc_coupling(x1, K, w)) == list(scoupling[0](x1, K, w))
        assert coupling.shape == scoupling[1].shape

        # ---------------------------------------- Test coupling derivative to x1 ------------------------------------------
        coupling_diff = calc_coupling_diff(sK, sw)
        scoupling_diff = symbol_calc_coupling_diff(n, ix=None, jx=None,
                                                   K="K")[:2]
        assert coupling_diff.shape == scoupling_diff[1].shape

        # ------------------------------------- Test the fz with substitution of z via fx1 ----------------------------------
        fx1z = calc_fx1z(sx1,
                         sx0,
                         sK,
                         sw,
                         syc,
                         sIext1,
                         sa,
                         sb,
                         sd,
                         stau1,
                         stau0,
                         zmode=zmode)
        sfx1z = symbol_eqtn_fx1z(n, model, zmode, shape=(n, ))[:2]
        # if model == "2d":
        #     fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode)
        #     s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0)
        #     assert list(fx1z) == list(s_fx1z)
        # else:
        #     fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode)
        #     s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0)
        #     assert list(fx1z) == list(s_fx1z)

        # ------------------------------- Test the derivative to x1 of fz with substitution of z via fx1 ---------------------
        fx1z_diff = calc_fx1z_diff(sx1,
                                   sK,
                                   sw,
                                   sa,
                                   sb,
                                   sd,
                                   stau1,
                                   stau0,
                                   model=model,
                                   zmode=zmode)
        sfx1z_diff = symbol_eqtn_fx1z_diff(n, model, zmode)[:2]
        # for ii in range(n):
        #     assert list(fx1z_diff[ii]) == list(sfx1z_diff[1][ii, :])

        # -------------------------------- Test symbolic fx2 with substitution of y2 via fy2 ----------------------------------
        if model != "2d":
            sfx2y2 = symbol_eqtn_fx2y2(n, x2_neg=False, shape=(n, ))[:2]

        # ----------------------------------------------- Test calc_fx1_2d_taylor ---------------------------------------------
        x_taylor = symbol_vars(n, ["x1lin"],
                               shape=(n, ))[0]  # x_taylor = -4.5/3 (=x1lin)
        fx1lin = calc_fx1_2d_taylor(sx1,
                                    x_taylor,
                                    sz,
                                    syc,
                                    sIext1,
                                    sslope,
                                    sa,
                                    sb,
                                    stau1,
                                    x1_neg=True,
                                    order=2,
                                    shape=(n, ))
        sfx1lin = symbol_calc_2d_taylor(n,
                                        "x1lin",
                                        order=2,
                                        x1_neg=True,
                                        slope="slope",
                                        Iext1="Iext1",
                                        shape=(n, ))[:2]

        # for ii in range(3):
        #     assert numpy.array(fx1lin[ii].expand(sx1[ii]).collect(sx1[ii])) == numpy.array(
        #         sfx1lin[1][ii].expand(sx1[ii]).collect(sx1[ii]))
        calc_fx1_2d_taylor(x1,
                           -1.5,
                           z,
                           yc,
                           Iext1,
                           slope,
                           a=a,
                           b=b,
                           d=d,
                           tau1=tau1,
                           x1_neg=True,
                           order=2,
                           shape=(n, ))

        # ----------------------------------------- Test calc_fx1y1_6d_diff_x1 -------------------------------------------------
        fx1y1_6d_diff_x1 = calc_fx1y1_6d_diff_x1(sx1, syc, sIext1, sa, sb, sd,
                                                 stau1, stau0)
        sfx1y1_6d_diff_x1 = symbol_calc_fx1y1_6d_diff_x1(n, shape=(n, ))[:2]

        # for ii in range(n):
        #     assert fx1y1_6d_diff_x1[ii].expand(sx1[ii]).collect(sx1[ii]) == sfx1y1_6d_diff_x1[1][ii].expand(sx1[ii]).collect(sx1[ii])

        # ------------------------------- Test eq_x1_hypo_x0_optimize_fun & eq_x1_hypo_x0_optimize_jac --------------------------
        ix0 = numpy.array([1, 2])
        iE = numpy.array([0])
        x = numpy.empty_like(sx1).flatten()
        x[ix0] = sx1[ix0]
        x[iE] = sx0[iE]
        eq_x1_hypo_x0_optimize(ix0,
                               iE,
                               x1eq,
                               zeq,
                               x0[ix0],
                               K,
                               w,
                               yc,
                               Iext1,
                               a=A_DEF,
                               b=B_DEF,
                               d=D_DEF,
                               slope=SLOPE_DEF)
        eq_x1_hypo_x0_optimize_fun(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0],
                                   sK, sw, syc, sIext1)
        eq_x1_hypo_x0_optimize_jac(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0],
                                   sK, sw, sy1, sIext1)
        eq_x1_hypo_x0_optimize(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1)
        eq_x1_hypo_x0_linTaylor(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1)

        # ------------------------------------------ Test calc_fz_jac_square_taylor ----------------------------------------------
        calc_fz_jac_square_taylor(numpy.array(sz),
                                  syc,
                                  sIext1,
                                  sK,
                                  sw,
                                  tau1=tau1,
                                  tau0=tau0)
        lfz_jac_square_taylor, sfz_jac_square_taylor, v = symbol_calc_fz_jac_square_taylor(
            n)
        sfz_jac_square_taylor = Matrix(sfz_jac_square_taylor).reshape(n, n)
        for iv in range(n):
            for jv in range(n):
                sfz_jac_square_taylor[iv,
                                      jv] = sfz_jac_square_taylor[iv, jv].subs(
                                          [(v["x_taylor"][jv], x1sq[jv]),
                                           (v["a"][jv], a[jv]),
                                           (v["b"][jv], b[jv]),
                                           (v["d"][jv], d[jv]),
                                           (v["tau1"][iv], tau1[iv]),
                                           (v["tau0"][iv], tau2[iv])])

        assert list(
            calc_fz_jac_square_taylor(
                z, yc, Iext1, K, w, tau1=tau1, tau0=tau0)[0]) == list(
                    lfz_jac_square_taylor(zeq, yc, Iext1, K, w, a, b, d, tau1,
                                          tau0, x1sq)[0])
Exemplo n.º 21
0
class SimulatorJava(ABCSimulator):
    """
    From a VEP Hypothesis, write a custom JSON simulation configuration.
    To run a simulation, we can also open a GUI and import the resulted JSON file.
    """
    logger = initialize_logger(__name__)
    reader = H5Reader()
    json_custom_config_file = "SimulationConfiguration.json"

    def __init__(self, connectivity, model_configuration, simulation_settings):
        self.model = None
        self.simulation_settings = simulation_settings
        self.model_configuration = model_configuration
        self.connectivity = connectivity
        self.head_path = os.path.dirname(self.connectivity.file_path)
        self.json_config_path = os.path.join(self.head_path,
                                             self.json_custom_config_file)
        self.configure_model()

    def get_vois(self):
        return self.model.vois

    @staticmethod
    def _save_serialized(ep_full_config, result_path):
        json_text = json.dumps(obj_to_dict(ep_full_config), indent=2)
        result_file = open(result_path, 'w')
        result_file.write(json_text)
        result_file.close()

    def config_simulation(self):

        ep_settings = Settings(
            integration_step=self.simulation_settings.integration_step,
            noise_seed=self.simulation_settings.noise_seed,
            simulated_period=self.simulation_settings.simulated_period,
            downsampling_period=self.simulation_settings.
            monitor_sampling_period)
        if isinstance(self.simulation_settings.noise_intensity, (float, int)):
            self.logger.info("Using uniform noise %s" %
                             self.simulation_settings.noise_intensity)
            ep_settings.noise_intensity = self.simulation_settings.noise_intensity
        elif len(self.simulation_settings.noise_intensity
                 ) == JavaEpileptor._nvar:
            self.logger.info("Using noise/voi %s" %
                             self.simulation_settings.noise_intensity)
            ep_settings.set_voi_noise_dispersions(
                self.simulation_settings.noise_intensity,
                self.connectivity.number_of_regions)
        elif len(
                self.simulation_settings.noise_intensity
        ) == JavaEpileptor._nvar * self.connectivity.number_of_regions:
            self.logger.info("Using node noise %s" %
                             self.simulation_settings.noise_intensity)
            ep_settings.set_node_noise_dispersions(
                self.simulation_settings.noise_intensity)
        else:
            self.logger.warning("Could not set noise %s" %
                                self.simulation_settings.noise_intensity)

        json_model = self.prepare_epileptor_model_for_json(
            self.connectivity.number_of_regions)
        # TODO: history length has to be computed given the time delays (i.e., the tract lengths...)
        # TODO: when dfun is implemented for JavaEpileptor, we can use commented lines with initial_conditions
        # initial_conditions = self.prepare_initial_conditions(history_length=1)
        # custom_config = FullConfiguration(connectivity_path=os.path.abspath(self.connectivity.file_path),
        #                                        epileptor_params=json_model, settings=ep_settings,
        #                                        initial_states=initial_conditions.flatten(),
        #                                        initial_states_shape=numpy.array(initial_conditions.shape))
        custom_config = FullConfiguration(connectivity_path=os.path.abspath(
            self.connectivity.file_path),
                                          epileptor_params=json_model,
                                          settings=ep_settings,
                                          initial_states=None,
                                          initial_states_shape=None)
        self._save_serialized(custom_config, self.json_config_path)

    def launch_simulation(self):
        opts = "java -Dncsa.hdf.hdf5lib.H5.hdf5lib=" + os.path.join(GenericConfig.LIB_PATH, GenericConfig.HDF5_LIB) + \
               " " + "-Djava.library.path=" + GenericConfig.LIB_PATH + " " + "-cp" + " " + GenericConfig.JAR_PATH + \
               " " + GenericConfig.JAVA_MAIN_SIM + " " + os.path.abspath(self.json_config_path) + " " + \
               os.path.abspath(self.head_path)
        try:
            status = subprocess.call(opts, shell=True)
            print(status)
        except:
            status = False
            self.logger.warning("Something went wrong with this simulation...")
        time, data = self.reader.read_ts(
            os.path.join(self.head_path, "full-configuration", "ts.h5"))
        return time, data, status

    def prepare_epileptor_model_for_json(self, no_regions=88):
        epileptor_params_list = []
        self.logger.warning("No of regions is " + str(no_regions))
        for idx in xrange(no_regions):
            epileptor_params_list.append(
                EpileptorParams(self.model.a[idx], self.model.b[idx],
                                self.model.c[idx], self.model.d[idx],
                                self.model.aa[idx], self.model.r[idx],
                                self.model.Kvf[idx], self.model.Kf[idx],
                                self.model.Ks[idx], self.model.tau[idx],
                                self.model.Iext[idx], self.model.Iext2[idx],
                                self.model.slope[idx], self.model.x0[idx],
                                self.model.tt[idx]))

        return epileptor_params_list

    def configure_model(self):
        x0 = calc_x0_val_to_model_x0(
            self.model_configuration.x0_values, self.model_configuration.yc,
            self.model_configuration.Iext1, self.model_configuration.a,
            self.model_configuration.b - self.model_configuration.d)
        self.model = JavaEpileptor(a=self.model_configuration.a,
                                   b=self.model_configuration.b,
                                   d=self.model_configuration.d,
                                   x0=x0,
                                   iext=self.model_configuration.Iext1,
                                   ks=self.model_configuration.K,
                                   c=self.model_configuration.yc,
                                   tt=self.model_configuration.tau1,
                                   r=1.0 / self.model_configuration.tau0)

    def configure_initial_conditions(self, initial_conditions=None):
        if isinstance(initial_conditions, numpy.ndarray):
            self.initial_conditions = initial_conditions
        else:
            # TODO: have a function to calculate the correct history length when we have time delays
            self.initial_conditions = self.prepare_initial_conditions(
                history_length=1)
Exemplo n.º 22
0
class TimeseriesService(object):

    logger = initialize_logger(__name__)

    def __init__(self, logger=initialize_logger(__name__)):

        self.logger = logger

    def decimate(self, timeseries, decim_ratio):
        if decim_ratio > 1:
            return Timeseries(timeseries.data[0:timeseries.time_length:decim_ratio], timeseries.dimension_labels,
                              timeseries.time_start, decim_ratio*timeseries.time_step, timeseries.time_unit)
        else:
            return timeseries

    def decimate_by_filtering(self, timeseries, decim_ratio):
        if decim_ratio > 1:
            decim_data, decim_time, decim_dt, decim_n_times = decimate_signals(timeseries.squeezed,
                                                                               timeseries.time_line, decim_ratio)
            return Timeseries(decim_data, timeseries.dimension_labels,
                              decim_time[0], decim_dt, timeseries.time_unit)
        else:
            return timeseries

    def convolve(self, timeseries, win_len=None, kernel=None):
        if kernel is None:
            kernel = np.ones((np.int(np.round(win_len)), 1, 1, 1))
        else:
            kernel = kernel * np.ones((np.int(np.round(win_len)), 1, 1, 1))
        return Timeseries(convolve(timeseries.data, kernel, mode='same'), timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def hilbert_envelope(self, timeseries):
        return Timeseries(np.abs(hilbert(timeseries.data, axis=0)), timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def detrend(self, timeseries, type='linear'):
        return Timeseries(detrend(timeseries.data, axis=0, type=type), timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def normalize(self, timeseries, normalization=None):
        return Timeseries(normalize_signals(timeseries.data, normalization), timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def filter(self, timeseries, lowcut=None, highcut=None, mode='bandpass', order=3):
        return Timeseries(filter_data(timeseries.data, timeseries.sampling_frequency, lowcut, highcut, mode, order),
                         timeseries.dimension_labels, timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def log(self, timeseries):
        return Timeseries(np.log(timeseries.data), timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def power(self, timeseries):
        return np.sum(self.square(timeseries).squeezed, axis=0)

    def square(self, timeseries):
        return Timeseries(timeseries.data ** 2, timeseries.dimension_labels,
                          timeseries.time_start, timeseries.time_step, timeseries.time_unit)

    def correlation(self, timeseries):
        return np.corrcoef(timeseries.squeezed.T)

    def select_by_metric(self, timeseries, metric, metric_th=None):
        return timeseries.get_subspace_by_index(select_greater_values_array_inds(metric, metric_th))

    def select_by_power(self, timeseries, power=np.array([]), power_th=None):
        if len(power) != timeseries.number_of_labels:
            power = self.power(timeseries)
        return self.select_by_metric(timeseries, power, power_th)

    def select_by_hierarchical_group_metric_clustering(self, timeseries, distance, disconnectivity=np.array([]),
                                                       metric=None, n_groups=10, members_per_group=1):
        selection = select_by_hierarchical_group_metric_clustering(distance, disconnectivity, metric,
                                                                   n_groups, members_per_group)
        return timeseries.get_subspace_by_index(selection)

    def select_by_correlation_power(self, timeseries, correlation=np.array([]), disconnectivity=np.array([]),
                                    power=np.array([]), n_groups=10, members_per_group=1):
        if correlation.shape[0] != timeseries.number_of_labels:
            correlation = self.correlation(timeseries)
        if len(power) != timeseries.number_of_labels:
            power = self.power(timeseries)
        return self.select_by_hierarchical_group_metric_clustering(timeseries, 1-correlation,
                                                                   disconnectivity, power, n_groups, members_per_group)

    def select_by_rois_proximity(self, timeseries, proximity, proximity_th=None):
        initial_selection = range(timeseries.number_of_labels)
        selection = []
        for prox in proximity:
                selection += (
                    np.array(initial_selection)[select_greater_values_array_inds(prox, proximity_th)]).tolist()
        return timeseries.get_subspace_by_index(np.unique(selection).tolist())

    def select_by_rois(self, timeseries, rois, all_labels):
        for ir, roi in rois:
            if not(isinstance(roi, basestring)):
                rois[ir] = all_labels[roi]
        return timeseries.get_subspace_by_labels(rois)

    def compute_seeg(self, source_timeseries, sensors, sum_mode="lin"):
        if sum_mode == "exp":
            seeg_fun = lambda source, gain_matrix: np.log(np.exp(source.squeezed).dot(gain_matrix.T))
        else:
            seeg_fun = lambda source, gain_matrix: source.squeezed.dot(gain_matrix.T)
        seeg = []
        for sensor in ensure_list(sensors):
            seeg.append(Timeseries(seeg_fun(source_timeseries, sensor.gain_matrix),
                                   {TimeseriesDimensions.SPACE.value: sensor.labels,
                                    TimeseriesDimensions.VARIABLES.value:
                                        PossibleVariables.SEEG.value + str(sensor.labels)},
                                   source_timeseries.time_start, source_timeseries.time_step,
                                   source_timeseries.time_unit))
        return seeg
Exemplo n.º 23
0
class ModelConfigurationBuilder(object):
    logger = initialize_logger(__name__)

    x1EQcr = X1_EQ_CR_DEF

    def __init__(self, number_of_regions=1, x0_values=X0_DEF, e_values=E_DEF, yc=YC_DEF, Iext1=I_EXT1_DEF,
                 Iext2=I_EXT2_DEF, K=K_DEF, a=A_DEF, b=B_DEF, d=D_DEF, slope=SLOPE_DEF, s=S_DEF, gamma=GAMMA_DEF,
                 zmode=np.array("lin"), x1eq_mode="optimize"):
        self.number_of_regions = number_of_regions
        self.x0_values = x0_values * np.ones((self.number_of_regions,), dtype=np.float32)
        self.yc = yc
        self.Iext1 = Iext1
        self.Iext2 = Iext2
        self.a = a
        self.b = b
        self.d = d
        self.slope = slope
        self.s = s
        self.gamma = gamma
        self.zmode = zmode
        self.x1eq_mode = x1eq_mode
        if len(ensure_list(K)) == 1:
            self.K_unscaled = np.array(K) * np.ones((self.number_of_regions,), dtype=np.float32)
        elif len(ensure_list(K)) == self.number_of_regions:
            self.K_unscaled = np.array(K)
        else:
            self.logger.warning(
                "The length of input global coupling K is neither 1 nor equal to the number of regions!" +
                "\nSetting model_configuration_builder.K_unscaled = K_DEF for all regions")
        self.K = None
        self._normalize_global_coupling()
        self.e_values = e_values * np.ones((self.number_of_regions,), dtype=np.float32)
        self.x0cr = 0.0
        self.rx0 = 0.0
        self._compute_critical_x0_scaling()

    def __repr__(self):
        d = {"01. Number of regions": self.number_of_regions,
             "02. x0_values": self.x0_values,
             "03. e_values": self.e_values,
             "04. K_unscaled": self.K_unscaled,
             "05. K": self.K,
             "06. yc": self.yc,
             "07. Iext1": self.Iext1,
             "08. Iext2": self.Iext2,
             "09. K": self.K,
             "10. a": self.a,
             "11. b": self.b,
             "12. d": self.d,
             "13. s": self.s,
             "14. slope": self.slope,
             "15. gamma": self.gamma,
             "16. zmode": self.zmode,
             "07. x1eq_mode": self.x1eq_mode
             }
        return formal_repr(self, d)

    def __str__(self):
        return self.__repr__()

    def set_attribute(self, attr_name, data):
        setattr(self, attr_name, data)

    def _compute_model_x0(self, x0_values):
        return calc_x0_val_to_model_x0(x0_values, self.yc, self.Iext1, self.a, self.b, self.d, self.zmode)

    def _ensure_equilibrum(self, x1EQ, zEQ):
        temp = x1EQ > self.x1EQcr - 10 ** (-3)
        if temp.any():
            x1EQ[temp] = self.x1EQcr - 10 ** (-3)
            zEQ = self._compute_z_equilibrium(x1EQ)

        return x1EQ, zEQ

    def _compute_x1_equilibrium_from_E(self, e_values):
        array_ones = np.ones((self.number_of_regions,), dtype=np.float32)
        return ((e_values - 5.0) / 3.0) * array_ones

    def _compute_z_equilibrium(self, x1EQ):
        return calc_eq_z(x1EQ, self.yc, self.Iext1, "2d", slope=self.slope, a=self.a, b=self.b, d=self.d)

    def _compute_critical_x0_scaling(self):
        (self.x0cr, self.rx0) = calc_x0cr_r(self.yc, self.Iext1, a=self.a, b=self.b, d=self.d, zmode=self.zmode)

    def _compute_coupling_at_equilibrium(self, x1EQ, model_connectivity):
        return calc_coupling(x1EQ, self.K, model_connectivity)

    def _compute_x0_values_from_x0_model(self, x0):
        return calc_model_x0_to_x0_val(x0, self.yc, self.Iext1, self.a, self.b, self.d, self.zmode)

    def _compute_x0_values(self, x1EQ, zEQ, model_connectivity):
        x0 = calc_x0(x1EQ, zEQ, self.K, model_connectivity)
        return self._compute_x0_values_from_x0_model(x0)

    def _compute_e_values(self, x1EQ):
        return 3.0 * x1EQ + 5.0

    def _compute_params_after_equilibration(self, x1EQ, zEQ, model_connectivity):
        self._compute_critical_x0_scaling()
        Ceq = self._compute_coupling_at_equilibrium(x1EQ, model_connectivity)
        x0_values = self._compute_x0_values(x1EQ, zEQ, model_connectivity)
        e_values = self._compute_e_values(x1EQ)
        x0 = self._compute_model_x0(x0_values)
        return x0, Ceq, x0_values, e_values

    def _compute_x1_and_z_equilibrium_from_E(self, e_values):
        x1EQ = self._compute_x1_equilibrium_from_E(e_values)
        zEQ = self._compute_z_equilibrium(x1EQ)
        return x1EQ, zEQ

    def _compute_x1_equilibrium(self, e_indices, x1EQ, zEQ, x0_values, model_connectivity):
        self._compute_critical_x0_scaling()
        x0 = self._compute_model_x0(x0_values)
        x0_indices = np.delete(np.array(range(model_connectivity.shape[0])), e_indices)
        if self.x1eq_mode == "linTaylor":
            x1EQ = \
                eq_x1_hypo_x0_linTaylor(x0_indices, e_indices, x1EQ, zEQ, x0, self.K,
                                        model_connectivity, self.yc, self.Iext1, self.a, self.b, self.d)[0]
        else:
            x1EQ = \
                eq_x1_hypo_x0_optimize(x0_indices, e_indices, x1EQ, zEQ, x0, self.K,
                                       model_connectivity, self.yc, self.Iext1, self.a, self.b, self.d)[0]
        return x1EQ

    def _normalize_global_coupling(self):
        self.K = self.K_unscaled / self.number_of_regions

    def _configure_model_from_equilibrium(self, x1EQ, zEQ, model_connectivity):
        # x1EQ, zEQ = self._ensure_equilibrum(x1EQ, zEQ) # We don't this by default anymore
        x0, Ceq, x0_values, e_values = self._compute_params_after_equilibration(x1EQ, zEQ, model_connectivity)
        return ModelConfiguration(self.yc, self.Iext1, self.Iext2, self.K, self.a, self.b, self.d,
                                  self.slope, self.s, self.gamma, x1EQ, zEQ, Ceq, x0, x0_values,
                                  e_values, self.zmode, model_connectivity)

    def build_model_from_E_hypothesis(self, disease_hypothesis, model_connectivity):
        # Always normalize K first
        self._normalize_global_coupling()

        # Then apply connectivity disease hypothesis scaling if any:
        if len(disease_hypothesis.w_indices) > 0:
            model_connectivity *= disease_hypothesis.get_connectivity_disease()

        # All nodes except for the diseased ones will get the default epileptogenicity:
        e_values = np.array(self.e_values)
        e_values[disease_hypothesis.e_indices] = disease_hypothesis.e_values

        # Compute equilibrium from epileptogenicity:
        x1EQ, zEQ = self._compute_x1_and_z_equilibrium_from_E(e_values)

        return self._configure_model_from_equilibrium(x1EQ, zEQ, model_connectivity)

    def build_model_from_hypothesis(self, disease_hypothesis, model_connectivity):
        # Always normalize K first
        self._normalize_global_coupling()

        # Then apply connectivity disease hypothesis scaling if any:
        if len(disease_hypothesis.w_indices) > 0:
            model_connectivity *= disease_hypothesis.get_connectivity_disease()

        # We assume that all nodes have the default (healthy) excitability:
        x0_values = np.array(self.x0_values)
        # ...and some  excitability-diseased ones:
        x0_values[disease_hypothesis.x0_indices] = disease_hypothesis.x0_values
        # x0_values values must have size of len(x0_indices):
        x0_values = np.delete(x0_values, disease_hypothesis.e_indices)

        # There might be some epileptogenicity-diseased regions as well:
        # Initialize with the default e_values
        e_values = np.array(self.e_values)
        # and assign any diseased E_values if any
        e_values[disease_hypothesis.e_indices] = disease_hypothesis.e_values

        # Compute equilibrium from epileptogenicity:
        x1EQ_temp, zEQ_temp = self._compute_x1_and_z_equilibrium_from_E(e_values)

        # Now, solve the system in order to compute equilibrium:
        x1EQ = self._compute_x1_equilibrium(disease_hypothesis.e_indices, x1EQ_temp, zEQ_temp, x0_values,
                                            model_connectivity)
        zEQ = self._compute_z_equilibrium(x1EQ)

        return self._configure_model_from_equilibrium(x1EQ, zEQ, model_connectivity)

    # TODO: This is used from PSE for varying an attribute's value. We should find a better way, not hardcoded strings.
    def set_attributes_from_pse(self, values, paths, indices):
        for i, val in enumerate(paths):
            vals = val.split(".")
            if vals[0] == "model_configuration_builder":
                getattr(self, vals[1])[indices[i]] = values[i]
Exemplo n.º 24
0
    def __init__(self, logger=initialize_logger(__name__)):

        self.logger = logger
Exemplo n.º 25
0
def main_vep(config=Config(),
             ep_name=EP_NAME,
             K_unscaled=K_DEF,
             ep_indices=[],
             hyp_norm=0.99,
             manual_hypos=[],
             sim_type="paper",
             pse_flag=PSE_FLAG,
             sa_pse_flag=SA_PSE_FLAG,
             sim_flag=SIM_FLAG,
             n_samples=1000,
             test_write_read=False):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)
    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: " + config.input.HEAD)
    head = reader.read_head(config.input.HEAD)
    plotter = Plotter(config)
    plotter.plot_head(head)
    if test_write_read:
        writer.write_head(head, os.path.join(config.out.FOLDER_RES, "Head"))
    # --------------------------Hypothesis definition-----------------------------------

    hypotheses = []
    # Reading a h5 file:

    if len(ep_name) > 0:
        # For an Excitability Hypothesis you leave e_indices empty
        # For a Mixed Hypothesis: you give as e_indices some indices for values > 0
        # For an Epileptogenicity Hypothesis: you give as e_indices all indices for values > 0
        hyp_file = HypothesisBuilder(head.connectivity.number_of_regions, config=config).set_normalize(hyp_norm). \
            build_hypothesis_from_file(ep_name, e_indices=ep_indices)
        hyp_file.name += ep_name
        # print(hyp_file.string_regions_disease(head.connectivity.region_labels))
        hypotheses.append(hyp_file)

    hypotheses += manual_hypos

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("\n\nRunning hypothesis: " + hyp.name)

        all_regions_indices = np.array(range(head.number_of_regions))
        healthy_indices = np.delete(all_regions_indices,
                                    hyp.regions_disease_indices).tolist()

        logger.info("\n\nCreating model configuration...")
        model_config_builder = ModelConfigurationBuilder(hyp.number_of_regions,
                                                         K=K_unscaled,
                                                         tau1=TAU1_DEF,
                                                         tau0=TAU0_DEF)
        mcs_file = os.path.join(config.out.FOLDER_RES,
                                hyp.name + "_model_config_builder.h5")
        writer.write_model_configuration_builder(model_config_builder,
                                                 mcs_file)
        if test_write_read:
            logger.info(
                "Written and read model configuration services are identical?: "
                + str(
                    assert_equal_objects(
                        model_config_builder,
                        reader.read_model_configuration_builder(mcs_file),
                        logger=logger)))
        # Fix healthy regions to default equilibria:
        # model_configuration = \
        #        model_config_builder.build_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights)
        # Fix healthy regions to default x0s:
        model_configuration = \
                model_config_builder.build_model_from_hypothesis(hyp, head.connectivity.normalized_weights)
        mc_path = os.path.join(config.out.FOLDER_RES,
                               hyp.name + "_ModelConfig.h5")
        writer.write_model_configuration(model_configuration, mc_path)
        if test_write_read:
            logger.info(
                "Written and read model configuration are identical?: " + str(
                    assert_equal_objects(model_configuration,
                                         reader.read_model_configuration(
                                             mc_path),
                                         logger=logger)))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 "6d",
                                 head.connectivity.region_labels,
                                 special_idx=hyp.regions_disease_indices,
                                 zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")

        logger.info("\n\nRunning LSA...")
        lsa_service = LSAService(eigen_vectors_number=1)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)

        lsa_path = os.path.join(config.out.FOLDER_RES,
                                lsa_hypothesis.name + "_LSA.h5")
        lsa_config_path = os.path.join(config.out.FOLDER_RES,
                                       lsa_hypothesis.name + "_LSAConfig.h5")
        writer.write_hypothesis(lsa_hypothesis, lsa_path)
        writer.write_lsa_service(lsa_service, lsa_config_path)
        if test_write_read:
            logger.info("Written and read LSA services are identical?: " + str(
                assert_equal_objects(lsa_service,
                                     reader.read_lsa_service(lsa_config_path),
                                     logger=logger)))
            logger.info(
                "Written and read LSA hypotheses are identical (no input check)?: "
                + str(
                    assert_equal_objects(lsa_hypothesis,
                                         reader.read_hypothesis(lsa_path),
                                         logger=logger)))
        plotter.plot_lsa(lsa_hypothesis,
                         model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels,
                         None,
                         lsa_service=lsa_service)

        if pse_flag:
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nRunning PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                n_samples,
                lsa_hypothesis,
                head.connectivity.normalized_weights,
                model_config_builder,
                lsa_service,
                head.connectivity.region_labels,
                param_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                logger=logger,
                save_flag=True)[0]
            plotter.plot_lsa(lsa_hypothesis, model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels, pse_results)

            pse_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_PSE_LSA_results.h5")
            writer.write_dictionary(pse_results, pse_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_results,
                                             reader.read_dictionary(
                                                 pse_lsa_path),
                                             logger=logger)))

        if sa_pse_flag:
            # --------------Sensitivity Analysis Parameter Search Exploration (PSE)-------------------------------
            logger.info("\n\nrunning sensitivity analysis PSE LSA...")
            sa_results, pse_sa_results = \
                sensitivity_analysis_pse_from_lsa_hypothesis(n_samples, lsa_hypothesis,
                                                             head.connectivity.normalized_weights,
                                                             model_config_builder, lsa_service,
                                                             head.connectivity.region_labels,
                                                             method="sobol", param_range=0.1,
                                                             global_coupling=[{"indices": all_regions_indices,
                                                                               "bounds": [0.0, 2 *
                                                                                          model_config_builder.K_unscaled[
                                                                                              0]]}],
                                                             healthy_regions_parameters=[
                                                                 {"name": "x0_values", "indices": healthy_indices}],
                                                             config=config)
            plotter.plot_lsa(lsa_hypothesis,
                             model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels,
                             pse_sa_results,
                             title="SA PSE Hypothesis Overview")

            sa_pse_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")
            sa_lsa_path = os.path.join(
                config.out.FOLDER_RES,
                lsa_hypothesis.name + "_SA_LSA_results.h5")
            writer.write_dictionary(pse_sa_results, sa_pse_path)
            writer.write_dictionary(sa_results, sa_lsa_path)
            if test_write_read:
                logger.info(
                    "Written and read sensitivity analysis results are identical?: "
                    + str(
                        assert_equal_objects(sa_results,
                                             reader.read_dictionary(
                                                 sa_lsa_path),
                                             logger=logger)))
                logger.info(
                    "Written and read sensitivity analysis parameter search results are identical?: "
                    + str(
                        assert_equal_objects(pse_sa_results,
                                             reader.read_dictionary(
                                                 sa_pse_path),
                                             logger=logger)))

        if sim_flag:
            # --------------------------Simulation preparations-----------------------------------
            # If you choose model...
            # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers):
            # EpileptorDP: similar to the TVB Epileptor + optional variations,
            # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations,
            # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but:
            #      -x0, Iext1, Iext2, slope and K become noisy state variables,
            #      -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure,
            #      -correlated noise is also used
            # We don't want any time delays for the moment
            head.connectivity.tract_lengths *= config.simulator.USE_TIME_DELAYS_FLAG

            sim_types = ensure_list(sim_type)
            integrator = "HeunStochastic"
            for sim_type in sim_types:
                # ------------------------------Simulation--------------------------------------
                logger.info(
                    "\n\nConfiguring simulation from model_configuration...")
                sim_builder = SimulatorBuilder(config.simulator.MODE)
                if isequal_string(sim_type, "realistic"):
                    model.tau0 = 60000.0
                    model.tau1 = 0.2
                    model.slope = 0.25
                    model.Iext2 = 0.45
                    model.pmode = np.array(
                        "z")  # np.array("None") to opt out for feedback
                    sim_settings = \
                        sim_builder.set_fs(2048.0).set_fs_monitor(1024.0).set_simulated_period(60000).build_sim_settings()
                    sim_settings.noise_type = COLORED_NOISE
                    sim_settings.noise_ntau = 20
                    integrator = "Dop853Stochastic"
                elif isequal_string(sim_type, "fitting"):
                    sim_settings = sim_builder.set_model_name("EpileptorDP2D").set_fs(2048.0).set_fs_monitor(2048.0).\
                                                                    set_simulated_period(2000).build_sim_settings()
                    sim_settings.noise_intensity = 1e-5
                    model = sim_builder.generate_model_tvb(model_configuration)
                    model.tau0 = 300.0
                    model.tau1 = 0.5
                elif isequal_string(sim_type, "reduced"):
                    sim_settings = sim_builder.set_model_name("EpileptorDP2D").set_fs(4096.0). \
                                                                    set_simulated_period(1000).build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)
                elif isequal_string(sim_type, "paper"):
                    sim_builder.set_model_name("Epileptor")
                    sim_settings = sim_builder.build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)
                else:
                    sim_settings = sim_builder.build_sim_settings()
                    model = sim_builder.generate_model_tvb(model_configuration)

                sim, sim_settings, model = \
                    sim_builder.build_simulator_TVB_from_model_sim_settings(model_configuration,head.connectivity,
                                                                            model, sim_settings, integrator=integrator)

                # Integrator and initial conditions initialization.
                # By default initial condition is set right on the equilibrium point.
                writer.write_simulator_model(
                    sim.model, sim.connectivity.number_of_regions,
                    os.path.join(config.out.FOLDER_RES,
                                 lsa_hypothesis.name + "_sim_model.h5"))
                logger.info("\n\nSimulating...")
                sim_output, status = sim.launch_simulation(
                    report_every_n_monitor_steps=100)

                sim_path = os.path.join(
                    config.out.FOLDER_RES,
                    lsa_hypothesis.name + "_sim_settings.h5")
                writer.write_simulation_settings(sim.simulation_settings,
                                                 sim_path)
                if test_write_read:
                    # TODO: find out why it cannot set monitor expressions
                    logger.info(
                        "Written and read simulation settings are identical?: "
                        + str(
                            assert_equal_objects(
                                sim.simulation_settings,
                                reader.read_simulation_settings(sim_path),
                                logger=logger)))
                if not status:
                    logger.warning("\nSimulation failed!")
                else:
                    time = np.array(sim_output.time_line).astype("f")
                    logger.info("\n\nSimulated signal return shape: %s",
                                sim_output.shape)
                    logger.info("Time: %s - %s", time[0], time[-1])
                    logger.info("Values: %s - %s", sim_output.data.min(),
                                sim_output.data.max())
                    if not status:
                        logger.warning("\nSimulation failed!")
                    else:
                        sim_output, seeg = compute_seeg_and_write_ts_to_h5(
                            sim_output,
                            sim.model,
                            head.sensorsSEEG,
                            os.path.join(config.out.FOLDER_RES,
                                         model._ui_name + "_ts.h5"),
                            seeg_gain_mode="lin",
                            hpf_flag=True,
                            hpf_low=10.0,
                            hpf_high=512.0)

                    # Plot results
                    plotter.plot_simulated_timeseries(
                        sim_output,
                        sim.model,
                        lsa_hypothesis.lsa_propagation_indices,
                        seeg_list=seeg,
                        spectral_raster_plot=False,
                        title_prefix=hyp.name,
                        spectral_options={"log_scale": True})
Exemplo n.º 26
0
def main_sampling_service(config=Config()):
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    n_samples = 100
    logger.info("\nDeterministic numpy.linspace sampling:")
    sampler = DeterministicSamplingService(n_samples=n_samples, grid_mode=True)
    samples, stats = sampler.generate_samples(low=1.0,
                                              high=2.0,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.iteritems():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())
    writer = H5Writer()
    writer.write_generic(sampler, config.out.FOLDER_RES,
                         "test_Stochastic_Sampler.h5")

    logger.info("\nStochastic uniform sampling with numpy:")
    sampler = StochasticSamplingService(n_samples=n_samples,
                                        sampling_module="numpy")
    #                                      a (low), b (high)
    samples, stats = sampler.generate_samples(
        parameter=(1.0, 2.0),
        probability_distribution=ProbabilityDistributionTypes.UNIFORM,
        shape=(2, ),
        stats=True)
    for key, value in stats.iteritems():
        logger.info("\n" + key + ": " + str(value))

    logger.info(sampler.__repr__())
    writer.write_generic(sampler, config.out.FOLDER_RES,
                         "test1_Stochastic_Sampler.h5")

    logger.info("\nStochastic truncated normal sampling with scipy:")
    sampler = StochasticSamplingService(n_samples=n_samples)
    #                                   loc (mean), scale (sigma)
    samples, stats = sampler.generate_samples(parameter=(1.5, 1.0),
                                              probability_distribution="norm",
                                              low=1,
                                              high=2,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.iteritems():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())
    writer.write_generic(sampler, config.out.FOLDER_RES,
                         "test2_Stochastic_Sampler.h5")

    logger.info("\nSensitivity analysis sampling:")
    sampler = SalibSamplingService(n_samples=n_samples, sampler="latin")
    samples, stats = sampler.generate_samples(low=1,
                                              high=2,
                                              shape=(2, ),
                                              stats=True)
    for key, value in stats.iteritems():
        logger.info("\n" + key + ": " + str(value))
    logger.info(sampler.__repr__())
    writer.write_generic(sampler, config.out.FOLDER_RES,
                         "test3_Stochastic_Sampler.h5")

    logger.info("\nTesting distribution class and conversions...")
    sampler = StochasticSamplingService(n_samples=n_samples)
    for distrib_name in ProbabilityDistributionTypes.available_distributions:
        logger.info("\n" + distrib_name)
        logger.info("\nmode/mean, std to distribution " + distrib_name + ":")
        if np.in1d(distrib_name, [
                ProbabilityDistributionTypes.EXPONENTIAL,
                ProbabilityDistributionTypes.CHISQUARE
        ]):
            target_stats = {"mean": 1.0}
            stats_m = "mean"
        elif np.in1d(distrib_name, [
                ProbabilityDistributionTypes.BERNOULLI,
                ProbabilityDistributionTypes.POISSON
        ]):
            target_stats = {"mean": np.ones((2, ))}
            stats_m = "mean"
        elif isequal_string(distrib_name,
                            ProbabilityDistributionTypes.BINOMIAL):
            target_stats = {"mean": 1.0, "std": 2.0}
            stats_m = "mean"
        else:
            if isequal_string(distrib_name,
                              ProbabilityDistributionTypes.UNIFORM):
                target_stats = {"mean": 1.0, "std": 2.0}
                stats_m = "mean"
            else:
                target_stats = {"mean": 1.0, "std": 2.0}
                stats_m = "mean"
        parameter1 = generate_stochastic_parameter(
            name="test1_" + distrib_name,
            low=0.0,
            high=2.0,
            p_shape=(2, 2),
            probability_distribution=distrib_name,
            optimize_pdf=True,
            use="manual",
            **target_stats)
        name2 = "test2_" + distrib_name
        defaults = set_parameter_defaults(name2,
                                          _pdf=distrib_name,
                                          _shape=(2, 2),
                                          _lo=0.0,
                                          _hi=2.0,
                                          **(deepcopy(target_stats)))
        parameter2 = set_parameter(name=name2, use="manual", **defaults)
        for parameter in (parameter1, parameter2):
            logger.info(str(parameter))
            samples = sampler.generate_samples(parameter=parameter, stats=True)
            for key, value in stats.iteritems():
                logger.info("\n" + key + ": " + str(value))
            diff = target_stats[stats_m] - stats[stats_m]
            if np.any(np.abs(diff.flatten()) > 0.001):
                logger.warning(
                    "Large difference between target and resulting samples' " +
                    stats_m + "!: " + str(diff))
            del parameter
Exemplo n.º 27
0
class H5Reader(object):
    logger = initialize_logger(__name__)

    connectivity_filename = "Connectivity.h5"
    cortical_surface_filename = "CorticalSurface.h5"
    region_mapping_filename = "RegionMapping.h5"
    volume_mapping_filename = "VolumeMapping.h5"
    structural_mri_filename = "StructuralMRI.h5"
    sensors_filename_prefix = "Sensors"
    sensors_filename_separator = "_"

    def read_connectivity(self, path):
        """
        :param path: Path towards a custom Connectivity H5 file
        :return: Connectivity object
        """
        self.logger.info("Starting to read a Connectivity from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        weights = h5_file['/' + ConnectivityH5Field.WEIGHTS][()]
        tract_lengths = h5_file['/' + ConnectivityH5Field.TRACTS][()]
        region_centres = h5_file['/' + ConnectivityH5Field.CENTERS][()]
        region_labels = h5_file['/' + ConnectivityH5Field.REGION_LABELS][()]
        orientations = h5_file['/' + ConnectivityH5Field.ORIENTATIONS][()]
        hemispheres = h5_file['/' + ConnectivityH5Field.HEMISPHERES][()]

        h5_file.close()

        conn = Connectivity(path, weights, tract_lengths, region_labels, region_centres, hemispheres, orientations)
        self.logger.info("Successfully read connectvity from: %s" % path)

        return conn

    def read_surface(self, path):
        """
        :param path: Path towards a custom Surface H5 file
        :return: Surface object
        """
        if not os.path.isfile(path):
            self.logger.warning("Surface file %s does not exist" % path)
            return None

        self.logger.info("Starting to read Surface from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        vertices = h5_file['/' + SurfaceH5Field.VERTICES][()]
        triangles = h5_file['/' + SurfaceH5Field.TRIANGLES][()]
        vertex_normals = h5_file['/' + SurfaceH5Field.VERTEX_NORMALS][()]

        h5_file.close()

        surface = Surface(vertices, triangles, vertex_normals)
        self.logger.info("Successfully read surface from: %s" % path)

        return surface

    def read_sensors(self, path):
        """
        :param path: Path towards a custom head folder
        :return: 3 lists with all sensors from Path by type
        """
        sensors_seeg = []
        sensors_eeg = []
        sensors_meg = []

        self.logger.info("Starting to read all Sensors from: %s" % path)

        all_head_files = os.listdir(path)
        for head_file in all_head_files:
            str_head_file = str(head_file)
            if not str_head_file.startswith(self.sensors_filename_prefix):
                continue

            type = str_head_file[len(self.sensors_filename_prefix):str_head_file.index(self.sensors_filename_separator)]
            if type == Sensors.TYPE_SEEG:
                sensors_seeg.append(self.read_sensors_of_type(os.path.join(path, head_file), Sensors.TYPE_SEEG))
            if type == Sensors.TYPE_EEG:
                sensors_eeg.append(self.read_sensors_of_type(os.path.join(path, head_file), Sensors.TYPE_EEG))
            if type == Sensors.TYPE_MEG:
                sensors_meg.append(self.read_sensors_of_type(os.path.join(path, head_file), Sensors.TYPE_MEG))

        self.logger.info("Successfuly read all sensors from: %s" % path)

        return sensors_seeg, sensors_eeg, sensors_meg

    def read_sensors_of_type(self, sensors_file, type):
        """
        :param
            sensors_file: Path towards a custom Sensors H5 file
            type: Senors type
        :return: Sensors object
        """
        if not os.path.exists(sensors_file):
            self.logger.warning("Senors file %s does not exist!" % sensors_file)
            return None

        self.logger.info("Starting to read sensors of type %s from: %s" % (type, sensors_file))
        h5_file = h5py.File(sensors_file, 'r', libver='latest')

        labels = h5_file['/' + SensorsH5Field.LABELS][()]
        locations = h5_file['/' + SensorsH5Field.LOCATIONS][()]

        if '/orientations' in h5_file:
            orientations = h5_file['/orientations'][()]
        else:
            orientations = None
        if '/' + SensorsH5Field.GAIN_MATRIX in h5_file:
            gain_matrix = h5_file['/' + SensorsH5Field.GAIN_MATRIX][()]
        else:
            gain_matrix = None

        h5_file.close()

        sensors = Sensors(labels, locations, orientations=orientations, gain_matrix=gain_matrix, s_type=type)
        self.logger.info("Successfully read sensors from: %s" % sensors_file)

        return sensors

    def read_volume_mapping(self, path):
        """
        :param path: Path towards a custom VolumeMapping H5 file
        :return: volume mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("VolumeMapping file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read VolumeMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read volume mapping!") #: %s" % data)

        return data

    def read_region_mapping(self, path):
        """
        :param path: Path towards a custom RegionMapping H5 file
        :return: region mapping in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("RegionMapping file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read RegionMapping from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read region mapping!") #: %s" % data)

        return data

    def read_t1(self, path):
        """
        :param path: Path towards a custom StructuralMRI H5 file
        :return: structural MRI in a numpy array
        """
        if not os.path.isfile(path):
            self.logger.warning("StructuralMRI file %s does not exist" % path)
            return numpy.array([])

        self.logger.info("Starting to read StructuralMRI from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]

        h5_file.close()
        self.logger.info("Successfully read structural MRI from: %s" % path)

        return data

    def read_head(self, path):
        """
        :param path: Path towards a custom head folder
        :return: Head object
        """
        self.logger.info("Starting to read Head from: %s" % path)
        conn = self.read_connectivity(os.path.join(path, self.connectivity_filename))
        srf = self.read_surface(os.path.join(path, self.cortical_surface_filename))
        rm = self.read_region_mapping(os.path.join(path, self.region_mapping_filename))
        vm = self.read_volume_mapping(os.path.join(path, self.volume_mapping_filename))
        t1 = self.read_t1(os.path.join(path, self.structural_mri_filename))
        sensorsSEEG, sensorsEEG, sensorsMEG = self.read_sensors(path)

        head = Head(conn, srf, rm, vm, t1, path, sensorsSEEG=sensorsSEEG, sensorsEEG=sensorsEEG, sensorsMEG=sensorsMEG)
        self.logger.info("Successfully read Head from: %s" % path)

        return head

    def read_epileptogenicity(self, root_folder, name="ep"):
        """
        :param
            root_folder: Path towards a valid custom Epileptogenicity H5 file
            name: the name of the hypothesis
        :return: Timeseries in a numpy array
        """
        path = os.path.join(root_folder, name, name + ".h5")
        self.logger.info("Starting to read Epileptogenicity from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        values = h5_file['/values'][()]

        h5_file.close()
        self.logger.info("Successfully read epileptogenicity values!") #: %s" % values)

        return values

    def read_timeseries(self, path):
        """
        :param path: Path towards a valid TimeSeries H5 file
        :return: Timeseries data and time in 2 numpy arrays
        """
        self.logger.info("Starting to read TimeSeries from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        data = h5_file['/data'][()]
        total_time = int(h5_file["/"].attrs["Simulated_period"][0])
        nr_of_steps = int(h5_file["/data"].attrs["Number_of_steps"][0])
        start_time = float(h5_file["/data"].attrs["Start_time"][0])
        time = numpy.linspace(start_time, total_time, nr_of_steps)

        self.logger.info("First Channel sv sum: " + str(numpy.sum(data[:, 0])))
        self.logger.info("Successfully read Timeseries!") #: %s" % data)
        h5_file.close()

        return time, data

    def read_hypothesis(self, path):
        """
        :param path: Path towards a Hypothesis H5 file
        :return: DiseaseHypothesis object
        """
        self.logger.info("Starting to read Hypothesis from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        if h5_file.attrs["EPI_Subtype"] != "DiseaseHypothesis":
            self.logger.warning("This file does not seem to holds a DiseaseHypothesis!")

        hypothesis = DiseaseHypothesis()
        for dataset in h5_file.keys():
            hypothesis.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            if attr in ["x0_indices", "e_indices", "w_indices"]:
                hypothesis.set_attribute(attr, h5_file.attrs[attr].tolist())
            else:
                hypothesis.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return hypothesis

    def read_model_configuration(self, path):
        """
        :param path: Path towards a ModelConfiguration H5 file
        :return: ModelConfiguration object
        """
        self.logger.info("Starting to read ModelConfiguration from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        if h5_file.attrs["EPI_Subtype"] != "ModelConfiguration":
            self.logger.warning("This file does not seem to hold a ModelConfiguration")

        model_configuration = ModelConfiguration()
        for dataset in h5_file.keys():
            model_configuration.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            model_configuration.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return model_configuration

    def read_lsa_service(self, path):
        """
        :param path: Path towards a LSAService H5 file
        :return: LSAService object
        """
        self.logger.info("Starting to read LSAService from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')
        from tvb_epilepsy.service.lsa_service import LSAService
        lsa_service = LSAService()

        for dataset in h5_file.keys():
            lsa_service.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            lsa_service.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return lsa_service

    def read_model_configuration_builder(self, path):
        """
        :param path: Path towards a ModelConfigurationService H5 file
        :return: ModelConfigurationService object
        """
        self.logger.info("Starting to read ModelConfigurationService from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        from tvb_epilepsy.service.model_configuration_builder import ModelConfigurationBuilder
        mc_service = ModelConfigurationBuilder()

        for dataset in h5_file.keys():
            mc_service.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            mc_service.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return mc_service

    def read_model_inversions_service(self, path):
        """
                :param path: Path towards a ModelConfigurationService H5 file
                :return: ModelInversionService object
                """
        # TODO: add a specialized reader function
        model_inversions_service = self.read_dictionary(path, "OrderedDictDot")
        if model_inversions_service.dict.get("signals_inds", None) is not None:
            model_inversions_service.dict["signals_inds"] = model_inversions_service.dict["signals_inds"].tolist()
        return model_inversions_service

    def read_dictionary(self, path, type="dict"):
        """
        :param path: Path towards a dictionary H5 file
        :return: dict
        """
        self.logger.info("Starting to read a dictionary from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        dictionary = dict()
        for dataset in h5_file.keys():
            dictionary.update({dataset: h5_file["/" + dataset][()]})

        for attr in h5_file.attrs.keys():
            dictionary.update({attr: h5_file.attrs[attr]})

        h5_file.close()
        if isequal_string(type, "DictDot"):
            return DictDot(dictionary)
        elif isequal_string(type, "OrderedDictDot"):
            return OrderedDictDot(dictionary)
        else:
            return dictionary

    def read_simulation_settings(self, path):
        """
        :param path: Path towards a SimulationSettings H5 file
        :return: SimulationSettings
        """
        self.logger.info("Starting to read SimulationSettings from: %s" % path)
        h5_file = h5py.File(path, 'r', libver='latest')

        sim_settings = SimulationSettings()
        for dataset in h5_file.keys():
            sim_settings.set_attribute(dataset, h5_file["/" + dataset][()])

        for attr in h5_file.attrs.keys():
            sim_settings.set_attribute(attr, h5_file.attrs[attr])

        h5_file.close()
        return sim_settings

    def read_generic(self, path, obj=None, output_shape=None):
        return read_h5_model(path).convert_from_h5_model(obj, output_shape)
Exemplo n.º 28
0
def main_cc_vep(config,
                head_folder,
                ep_name="clinical_hypothesis",
                x0_indices=[],
                pse_flag=False,
                sim_flag=True):
    if not (os.path.isdir(config.out.FOLDER_RES)):
        os.mkdir(config.out.FOLDER_RES)
    logger = initialize_logger(__name__, config.out.FOLDER_LOGS)

    # -------------------------------Reading data-----------------------------------
    reader = TVBReader() if config.input.IS_TVB_MODE else H5Reader()
    writer = H5Writer()
    logger.info("Reading from: %s", head_folder)
    head = reader.read_head(head_folder)
    plotter = Plotter(config)
    plotter.plot_head(head)

    # --------------------------Hypothesis definition-----------------------------------
    hypo_builder = HypothesisBuilder(head.connectivity.number_of_regions)
    all_regions_indices = np.array(range(head.number_of_regions))

    # This is an example of Epileptogenicity Hypothesis:
    hyp_E = hypo_builder.build_hypothesis_from_file(ep_name, x0_indices)
    # This is an example of Excitability Hypothesis:
    hyp_x0 = hypo_builder.build_hypothesis_from_file(ep_name)

    disease_indices = hyp_E.e_indices + hyp_x0.x0_indices
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()

    if len(x0_indices) > 0:
        # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis:
        disease_values = reader.read_epileptogenicity(head_folder,
                                                      name=ep_name)
        disease_values = disease_values.tolist()
        x0_values = []
        for ix0 in x0_indices:
            ind = disease_indices.index(ix0)
            del disease_indices[ind]
            x0_values.append(disease_values.pop(ind))
        e_indices = disease_indices
        e_values = np.array(disease_values)
        x0_values = np.array(x0_values)
        hyp_x0_E = hypo_builder.set_x0_hypothesis(
            x0_indices,
            x0_values).set_e_hypothesis(e_indices,
                                        e_values).build_hypothesis()
        hypotheses = (hyp_E, hyp_x0, hyp_x0_E)

    else:
        hypotheses = (
            hyp_E,
            hyp_x0,
        )

    # --------------------------Hypothesis and LSA-----------------------------------
    for hyp in hypotheses:
        logger.info("Running hypothesis: %s", hyp.name)
        logger.info("Creating model configuration...")
        builder = ModelConfigurationBuilder(hyp.number_of_regions)
        writer.write_model_configuration_builder(
            builder,
            os.path.join(config.out.FOLDER_RES, "model_config_service.h5"))
        if hyp.type == "Epileptogenicity":
            model_configuration = builder.build_model_from_E_hypothesis(
                hyp, head.connectivity.normalized_weights)
        else:
            model_configuration = builder.build_model_from_hypothesis(
                hyp, head.connectivity.normalized_weights)
        writer.write_model_configuration(
            model_configuration,
            os.path.join(config.out.FOLDER_RES, "ModelConfiguration.h5"))
        # Plot nullclines and equilibria of model configuration
        plotter.plot_state_space(model_configuration,
                                 region_labels=head.connectivity.region_labels,
                                 special_idx=disease_indices,
                                 model="2d",
                                 zmode="lin",
                                 figure_name=hyp.name + "_StateSpace")
        logger.info("Running LSA...")
        lsa_service = LSAService(eigen_vectors_number=None,
                                 weighted_eigenvector_sum=True)
        lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration)
        writer.write_hypothesis(
            lsa_hypothesis,
            os.path.join(config.out.FOLDER_RES, lsa_hypothesis.name + ".h5"))
        writer.write_lsa_service(
            lsa_service,
            os.path.join(config.out.FOLDER_RES, "lsa_config_service.h5"))
        plotter.plot_lsa(lsa_hypothesis, model_configuration,
                         lsa_service.weighted_eigenvector_sum,
                         lsa_service.eigen_vectors_number,
                         head.connectivity.region_labels, None)
        if pse_flag:
            n_samples = 100
            # --------------Parameter Search Exploration (PSE)-------------------------------
            logger.info("Running PSE LSA...")
            pse_results = pse_from_lsa_hypothesis(
                lsa_hypothesis,
                head.connectivity.normalized_weights,
                head.connectivity.region_labels,
                n_samples,
                param_range=0.1,
                global_coupling=[{
                    "indices": all_regions_indices
                }],
                healthy_regions_parameters=[{
                    "name": "x0_values",
                    "indices": healthy_indices
                }],
                model_configuration_builder=builder,
                lsa_service=lsa_service,
                save_flag=True,
                folder_res=config.out.FOLDER_RES,
                filename="PSE_LSA",
                logger=logger)[0]
            plotter.plot_lsa(lsa_hypothesis,
                             model_configuration,
                             lsa_service.weighted_eigenvector_sum,
                             lsa_service.eigen_vectors_number,
                             head.connectivity.region_labels,
                             pse_results,
                             title="Hypothesis PSE LSA Overview")
        if sim_flag:
            config.out.subfolder = "simulations"
            for folder in (config.out.FOLDER_RES, config.out.FOLDER_FIGURES):
                if not (os.path.isdir(folder)):
                    os.mkdir(folder)
            dynamical_models = ["EpileptorDP2D", "EpileptorDPrealistic"]

            for dynamical_model, sim_type in zip(dynamical_models,
                                                 ["fitting", "realistic"]):
                ts_file = None  # os.path.join(sim_folder_res, dynamical_model + "_ts.h5")
                vois_ts_dict = \
                    from_model_configuration_to_simulation(model_configuration, head, lsa_hypothesis,
                                                           sim_type=sim_type, dynamical_model=dynamical_model,
                                                           ts_file=ts_file, plot_flag=True, config=config)
Exemplo n.º 29
0
import os
import numpy as np
from tvb_epilepsy.base.constants.config import Config
from tvb_epilepsy.base.utils.log_error_utils import initialize_logger
from tvb_epilepsy.base.utils.data_structures_utils import isequal_string
from tvb_epilepsy.base.model.vep.sensors import Sensors
from tvb_epilepsy.io.h5_reader import H5Reader
from tvb_epilepsy.io.h5_writer import H5Writer
from tvb_epilepsy.plot.plotter import Plotter
from tvb_epilepsy.base.epileptor_models import EpileptorDP2D
from tvb_epilepsy.service.simulator.simulator_builder import build_simulator_TVB_realistic, \
    build_simulator_TVB_fitting, build_simulator_TVB_default, build_simulator_TVB_paper
from tvb_epilepsy.service.timeseries_service import TimeseriesService

logger = initialize_logger(__name__)


def _compute_and_write_seeg(source_timeseries,
                            sensors_list,
                            filename,
                            hpf_flag=False,
                            hpf_low=10.0,
                            hpf_high=256.0,
                            seeg_gain_mode="lin",
                            h5_writer=H5Writer()):
    ts_service = TimeseriesService()
    fsAVG = 1000.0 / source_timeseries.time_step

    if hpf_flag:
        hpf_low = max(
            hpf_low, 1000.0 /
Exemplo n.º 30
0
class SensitivityAnalysisService(object):
    logger = initialize_logger(__name__)

    def __init__(self,
                 inputs,
                 outputs,
                 method="delta",
                 calc_second_order=True,
                 conf_level=0.95):

        self._set_method(method)
        self._set_calc_second_order(calc_second_order)
        self._set_conf_level(conf_level)
        self.n_samples = []
        self.input_names = []
        self.input_bounds = []
        self.input_samples = []
        self.n_inputs = len(inputs)

        for input in inputs:
            self.input_names.append(input["name"])

            samples = np.array(input["samples"]).flatten()
            self.n_samples.append(samples.size)
            self.input_samples.append(samples)

            self.input_bounds.append(
                input.get("bounds",
                          [samples.min(), samples.max()]))

        if len(self.n_samples) > 0:
            if np.all(np.array(self.n_samples) == self.n_samples[0]):
                self.n_samples = self.n_samples[0]
            else:
                raise_value_error(
                    "Not all input parameters have equal number of samples!: "
                    + str(self.n_samples))

        self.input_samples = np.array(self.input_samples).T

        self.n_outputs = 0
        self.output_values = []
        self.output_names = []

        for output in outputs:

            if output["values"].size == self.n_samples:
                n_outputs = 1
                self.output_values.append(output["values"].flatten())
            else:
                if output["values"].shape[0] == self.n_samples:
                    self.output_values.append(output["values"])
                    n_outputs = output["values"].shape[1]
                elif output["values"].shape[1] == self.n_samples:
                    self.output_values.append(output["values"].T)
                    n_outputs = output["values"].shape[0]
                else:
                    raise_value_error(
                        "None of the dimensions of output samples: " +
                        str(output["values"].shape) + " matches n_samples = " +
                        str(self.n_samples) + " !")
            self.n_outputs += n_outputs

            if n_outputs > 1 and len(output["names"]) == 1:
                self.output_names += np.array([
                    "%s[%d]" % l
                    for l in zip(np.repeat(output["names"][0], n_outputs),
                                 range(n_outputs))
                ]).tolist()
            else:
                self.output_names += output["names"]

        if len(self.output_values) > 0:
            self.output_values = np.vstack(self.output_values)

        self.problem = {}
        self.other_parameters = {}

    def __repr__(self):

        d = {
            "01. Method": self.method,
            "02. Second order calculation flag": self.calc_second_order,
            "03. Confidence level": self.conf_level,
            "05. Number of inputs": self.n_inputs,
            "06. Number of outputs": self.n_outputs,
            "07. Input names": self.input_names,
            "08. Output names": self.output_names,
            "09. Input bounds": self.input_bounds,
            "10. Problem": dict_str(self.problem),
            "11. Other parameters": dict_str(self.other_parameters),
        }
        return formal_repr(self, d)

    def __str__(self):
        return self.__repr__()

    def _set_method(self, method):
        method = method.lower()
        if np.in1d(method, METHODS):
            self.method = method
        else:
            raise_value_error("Method " + str(method) +
                              " is not one of the available methods " +
                              str(METHODS) + " !")

    def _set_calc_second_order(self, calc_second_order):
        if isinstance(calc_second_order, bool):
            self.calc_second_order = calc_second_order
        else:
            raise_value_error("calc_second_order = " + str(calc_second_order) +
                              "is not a boolean as it should!")

    def _set_conf_level(self, conf_level):
        if isinstance(conf_level,
                      float) and conf_level > 0.0 and conf_level < 1.0:
            self.conf_level = conf_level
        else:
            raise_value_error(
                "conf_level = " + str(conf_level) +
                "is not a float in the (0.0, 1.0) interval as it should!")

    def _update_parameters(self,
                           method=None,
                           calc_second_order=None,
                           conf_level=None):

        if method is not None:
            self._set_method(method)

        if calc_second_order is not None:
            self._calc_set_second_order(calc_second_order)

        if conf_level is not None:
            self._set_conf_level(conf_level)

    def run(self,
            input_ids=None,
            output_ids=None,
            method=None,
            calc_second_order=None,
            conf_level=None,
            **kwargs):

        self._update_parameters(method, calc_second_order, conf_level)

        self.other_parameters = kwargs

        if input_ids is None:
            input_ids = range(self.n_inputs)

        self.problem = {
            "num_vars": len(input_ids),
            "names": np.array(self.input_names)[input_ids].tolist(),
            "bounds": np.array(self.input_bounds)[input_ids].tolist()
        }

        if output_ids is None:
            output_ids = range(self.n_outputs)

        n_outputs = len(output_ids)

        if self.method.lower() == "sobol":
            self.logger.warning(
                "'sobol' method requires 'saltelli' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # calc_second_order (bool): Calculate second-order sensitivities (default True)
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            # parallel: False,
            # n_processors: None
            self.analyzer = lambda output: sobol.analyze(
                self.problem,
                output,
                calc_second_order=self.calc_second_order,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                parallel=self.other_parameters.get("parallel", False),
                n_processors=self.other_parameters.get("n_processors", None),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["latin", "delta"]):
            self.logger.warning(
                "'latin' sampling scheme is recommended for 'delta' method!")
            # Additional keyword parameters and their defaults:
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: delta.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["fast", "fast_sampler"]):
            self.logger.warning(
                "'fast' method requires 'fast_sampler' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # M (int): The interference parameter,
            #           i.e., the number of harmonics to sum in the Fourier series decomposition (default 4)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: fast.analyze(
                self.problem,
                output,
                M=self.other_parameters.get("M", 4),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["ff", "fractional_factorial"]):
            # Additional keyword parameters and their defaults:
            # second_order (bool, default=False): Include interaction effects
            # print_to_console (bool, default=False): Print results directly to console
            self.logger.warning(
                "'fractional_factorial' method requires 'fractional_factorial' sampling scheme!"
            )
            self.analyzer = lambda output: ff.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                calc_second_order=self.calc_second_order,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif self.method.lower().lower() == "morris":
            self.logger.warning(
                "'morris' method requires 'morris' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            # grid_jump (int): The grid jump size, must be identical to the value passed to
            #                   SALib.sample.morris.sample() (default 2)
            # num_levels (int): The number of grid levels, must be identical to the value passed to
            #                   SALib.sample.morris (default 4)
            self.analyzer = lambda output: morris.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                grid_jump=self.other_parameters.get("grid_jump", 2),
                num_levels=self.other_parameters.get("num_levels", 4),
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif self.method.lower() == "dgsm":
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: dgsm.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        else:
            raise_value_error("Method " + str(self.method) +
                              " is not one of the available methods " +
                              str(METHODS) + " !")

        output_names = []
        results = []
        for io in output_ids:
            output_names.append(self.output_names[io])
            results.append(self.analyzer(self.output_values[:, io]))

        # TODO: Adjust list_of_dicts_to_dicts_of_ndarrays to handle ndarray concatenation
        results = list_of_dicts_to_dicts_of_ndarrays(results)

        results.update({"output_names": output_names})

        return results