Exemplo n.º 1
0
 def __repr__(self):
     d = {
         "01. Name": self.name,
         "02. Type": self.type,
         "03. Number of regions": self.number_of_regions,
         "04. Excitability (x0) disease indices":
         self.x0_disease_indices,  # x0_indices,
         "05. Excitability (x0) disease values":
         self.x0_disease_values,  # x0_values,
         "06. Epileptogenicity (E) disease indices":
         self.e_disease_indices,  # e_indices,
         "07. Epileptogenicity (E) disease values":
         self.e_disease_values,  # e_values,
         "08. Connectivity (W) disease indices": self.w_indices,
         "09. Connectivity (W) disease values": self.w_values,
         "10. Propagation indices": self.lsa_propagation_indices,
     }
     if len(self.lsa_propagation_indices):
         d.update({
             "11. Propagation strengths of indices":
             self.lsa_propagation_strengths[self.lsa_propagation_indices]
         })
     else:
         d.update({
             "11. Propagation strengths of indices":
             self.lsa_propagation_strengths
         })
     # d.update({"11. Connectivity": str(self.connectivity)})
     return formal_repr(self, sort_dict(d))
Exemplo n.º 2
0
 def __repr__(self):
     d = {
         "1. type": self.name,
         "2. number of regions": self.number_of_regions,
         "3. number of parameters": self.n_parameters,
         "4. parameters": self.parameters
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 3
0
def dict_to_h5_model(h5_model, obj, path, container_path):
    if len(obj) == 0:
        h5_model.add_or_update_datasets_attribute(path, "{}")
        return h5_model, None
    else:
        h5_model.add_or_update_metadata_attribute(
            os.path.join(container_path, "create_str"), "OrderedDict()")
        return h5_model, sort_dict(obj)
Exemplo n.º 4
0
 def __repr__(self):
     d = {
         "1. name": self.name,
         "2. low": self.low,
         "3. high": self.high,
         "4. shape": self.p_shape
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 5
0
def read_h5_model(path):
    h5_file = h5py.File(path, 'r', libver='latest')
    datasets_dict = dict()
    metadata_dict = dict()
    for attr_key, value in h5_file.attrs.iteritems():
        metadata_dict.update({"/" + attr_key: value})

    def add_dset_and_attr(name, obj):
        if isinstance(obj, h5py.Dataset):
            # node is a dataset
            datasets_dict.update({"/" + name: obj[()]})
        for key, val in obj.attrs.iteritems():
            metadata_dict.update({os.path.join("/" + name, key): val})

    h5_file.visititems(add_dset_and_attr)
    datasets_dict = sort_dict(datasets_dict)
    metadata_dict = sort_dict(metadata_dict)
    return H5Model(datasets_dict, metadata_dict)
Exemplo n.º 6
0
 def __repr__(self):
     d = {
         "00. surface subtype": self.surface_subtype,
         "01. vertices": self.vertices,
         "02. triangles": self.triangles,
         "03. vertex_normals": self.vertex_normals,
         "04. triangle_normals": self.triangle_normals,
         "05. voxel to ras matrix": self.vox2ras
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 7
0
def object_to_h5_model(h5_model, obj, container_path):
    obj_dict = sort_dict(vars(obj))
    for this_gen_str in [
            "context_str", "create_str", "transform_str", "update_str"
    ]:
        gen_str = obj_dict.get(this_gen_str, None)
        if isinstance(gen_str, basestring):
            h5_model.add_or_update_metadata_attribute(
                os.path.join(container_path[1:], this_gen_str), gen_str)
    return h5_model, obj_dict
Exemplo n.º 8
0
 def __repr__(self):
     d = {"f. normalized weights": reg_dict(self.normalized_weights, self.region_labels),
          "g. weights": reg_dict(self.weights, self.region_labels),
          "h. tract_lengths": reg_dict(self.tract_lengths, self.region_labels),
          "a. region_labels": reg_dict(self.region_labels),
          "b. centres": reg_dict(self.centres, self.region_labels),
          "c. hemispheres": reg_dict(self.hemispheres, self.region_labels),
          "d. orientations": reg_dict(self.orientations, self.region_labels),
          "e. areas": reg_dict(self.areas, self.region_labels)}
     return formal_repr(self, sort_dict(d))
Exemplo n.º 9
0
 def __repr__(self):
     d = {
         "1. sensors' type": self.s_type,
         "2. number of sensors": self.number_of_sensors,
         "3. labels": reg_dict(self.labels),
         "4. locations": reg_dict(self.locations, self.labels),
         "5. orientations": reg_dict(self.orientations, self.labels),
         "6. gain_matrix": self.gain_matrix
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 10
0
 def set_model_data(self, debug=0, simulate=0, **kwargs):
     self.model_data_path = kwargs.get("model_data_path", self.model_data_path)
     model_data = kwargs.pop("model_data", None)
     if not(isinstance(model_data, dict)):
         model_data = self.load_model_data_from_file(self.model_data_path)
     # -1 for no debugging at all
     # 0 for printing only scalar parameters
     # 1 for printing scalar and vector parameters
     # 2 for printing all (scalar, vector and matrix) parameters
     model_data["DEBUG"] = debug
     # > 0 for simulating without using the input observation data:
     model_data["SIMULATE"] = simulate
     model_data = sort_dict(model_data)
     return model_data
Exemplo n.º 11
0
 def __repr__(self):
     d = {
         "1. name": self.name,
         "2. connectivity": self.connectivity,
         "3. RM": reg_dict(self.region_mapping,
                           self.connectivity.region_labels),
         "4. VM": reg_dict(self.volume_mapping,
                           self.connectivity.region_labels),
         "5. surface": self.cortical_surface,
         "6. T1": self.t1_background,
         "7. SEEG": self.sensorsSEEG,
         "8. EEG": self.sensorsEEG,
         "9. MEG": self.sensorsMEG
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 12
0
 def __repr__(self):
     form_repr = super(ODEStatisticalModel, self).__repr__()
     d = {
         "6. active regions": self.active_regions,
         "7. initial condition std": self.sig_init,
         "8. number of active regions": self.n_active_regions,
         "9. number of nonactive regions": self.n_nonactive_regions,
         "10. number of observation signals": self.n_signals,
         "11. number of time points": self.n_times,
         "12. time step": self.dt,
         "13. observation_expression": self.observation_expression,
         "14. observation_model": self.observation_model
     }
     # "13. euler_method": self.euler_method,
     return form_repr + "\n" + formal_repr(self, sort_dict(d))
 def _repr(self):
     d = {
         "01. type": self.type,
         "02. pdf_params": self.pdf_params(),
         "03. n_params": self.n_params,
         "04. constraint": self.constraint_string,
         "05. shape": self.__p_shape,
         "05. mean": self.__mean,
         "06. median": self.__median,
         "07. mode": self.__mode,
         "08. var": self.__var,
         "09. std": self.__std,
         "10. skew": self.__skew,
         "11. kurt": self.__kurt,
         "12. scipy_name": self.scipy_name,
         "13. numpy_name": self.numpy_name
     }
     return formal_repr(self, sort_dict(d))
Exemplo n.º 14
0
 def compute_estimates_from_samples(self, samples):
     ests = []
     for chain_or_run_samples in ensure_list(samples):
         est = {}
         for pkey, pval in chain_or_run_samples.items():
             try:
                 est[pkey + "_low"], est[pkey], est[pkey + "_std"] = describe(chain_or_run_samples[pkey])[1:4]
                 est[pkey + "_high"] = est[pkey + "_low"][1]
                 est[pkey + "_low"] = est[pkey + "_low"][0]
                 est[pkey + "_std"] = np.sqrt(est[pkey + "_std"])
                 for skey in [pkey, pkey + "_low", pkey + "_high", pkey + "_std"]:
                     est[skey] = np.squeeze(est[skey])
             except:
                 est[pkey] = chain_or_run_samples[pkey]
         ests.append(sort_dict(est))
     if len(ests) == 1:
         return ests[0]
     else:
         return ests
Exemplo n.º 15
0
 def convert_from_h5_model(self, obj=None, output_shape=None):
     output_type = obj.__class__.__name__
     if isinstance(obj, dict):
         obj = sort_dict(obj)
     elif np.in1d(output_type, ["tuple", "list"]):
         obj = iterable_to_dict(obj)
     elif isequal_string(output_type, "numpy.ndarray"):
         if isequal_string(obj.dtype, "numpy.ndarray"):
             obj = iterable_to_dict(obj.tolist())
     else:
         obj, output_type = create_object("/", self.metadata_dict)[:2]
     if obj is None:
         obj = OrderedDict()
     if output_type is None:
         output_type = obj.__class__.__name__
     for abs_path in self.datasets_dict.keys():
         child_obj = self.datasets_dict.pop(abs_path)
         rel_path = abs_path.split("/", 1)[1]
         build_hierarchical_object_recursively(obj, rel_path, child_obj,
                                               "/", abs_path,
                                               self.metadata_dict)
     if np.in1d(output_type, ["tuple", "list"]):
         obj = dict_to_list_or_tuple(obj, output_type)
     elif isequal_string(output_type, "numpy.ndarray"):
         obj = np.array(dict.values())
         if isinstance(output_shape, tuple):
             try:
                 obj = np.reshape(obj, output_shape)
             except:
                 logger.warning(
                     "Failed to reshape read object to target shape " +
                     str(output_shape) + "!" +
                     "\nReturning array of shape " + str(obj.shape) + "!")
     else:
         obj = update_object(obj, "/", self.metadata_dict,
                             getORpop="pop")[0]
     if isinstance(obj, dict) and output_type.lower().find("dict") < 0:
         return OrderedDictDot(obj)
     else:
         return obj
 def set_simulated_target_data(self, target_data, statistical_model, **kwargs):
     self.signals_inds = range(self.number_of_regions)
     self.data_type = "lfp"
     signals = np.array([])
     if statistical_model.observation_model.find("seeg") >= 0:
         self.data_type = "seeg"
         self.signals_inds = range(self.gain_matrix.shape[0])
         if not(isequal_string(statistical_model.observation_model, "seeg_logpower")):
             signals = extract_dict_stringkeys(sort_dict(target_data), kwargs.get("seeg_dataset", "SEEG0"),
                                               modefun="find", two_way_search=True, break_after=1)
             if len(signals) > 0:
                 signals = signals.values()[0]
         if signals.size == 0:
             signals = np.array(target_data.get("lfp", target_data["x1"]))
             if isequal_string(statistical_model.observation_model, "seeg_logpower"):
                 signals = np.log(np.dot(self.gain_matrix[self.signals_inds], np.exp(signals.T))).T
             else:
                 signals = (np.dot(self.gain_matrix[self.signals_inds], signals.T)).T
     else:
         # if statistical_model.observation_expression == "x1z_offset":
         #     signals = ((target_data["x1"].T - np.expand_dims(self.x1EQ, 1)).T +
         #                (target_data["z"].T - np.expand_dims(self.zEQ, 1)).T) / 2.75
         #     # TODO: a better normalization
         # elif statistical_model.observation_expression == "x1_offset":
         #     # TODO: a better normalization
         #     signals = (target_data["x1"].T - np.expand_dims(self.x1EQ, 1)).T / 2.0
         # else: # statistical_models.observation_expression == "lfp"
         signals = np.array(target_data.get("lfp", target_data["x1"]))
     target_data["signals"] = np.array(signals)
     manual_selection = kwargs.get("manual_selection", [])
     if len(manual_selection) > 0:
         self.signals_inds = manual_selection
         if len(self.signals_inds) < signals.shape[1]:
             signals = signals[:, self.signals_inds]
     self.observation_shape = signals.shape
     (self.n_times, self.n_signals) = self.observation_shape
     return signals, target_data
Exemplo n.º 17
0
def build_stan_model_dict(statistical_model,
                          signals,
                          model_inversion,
                          gain_matrix=None):
    """
    Builds a dictionary with data needed for stan models.
    :param statistical_model: StatisticalModel object
    :param signals:
    :param model_inversion: ModelInversionService object
    :param gain_matrix: array
    :return: dictionary with stan data
    """
    active_regions_flag = np.zeros((statistical_model.number_of_regions, ),
                                   dtype="i")
    active_regions_flag[statistical_model.active_regions] = 1
    if gain_matrix is None:
        if statistical_model.observation_model.find("seeg") >= 0:
            gain_matrix = model_inversion.gain_matrix
            mixing = deepcopy(gain_matrix)
        else:
            mixing = np.eye(statistical_model.number_of_regions)
    if mixing.shape[0] > len(model_inversion.signals_inds):
        mixing = mixing[model_inversion.signals_inds]
    SC = model_inversion.get_SC()
    model_data = {
        "number_of_regions":
        statistical_model.number_of_regions,
        "n_times":
        statistical_model.n_times,
        "n_signals":
        statistical_model.n_signals,
        "n_active_regions":
        statistical_model.n_active_regions,
        "n_nonactive_regions":
        statistical_model.n_nonactive_regions,
        "n_connections":
        statistical_model.number_of_regions *
        (statistical_model.number_of_regions - 1) / 2,
        "active_regions_flag":
        np.array(active_regions_flag),
        "active_regions":
        np.array(statistical_model.active_regions) +
        1,  # cmdstan cannot take lists!
        "nonactive_regions":
        np.where(1 - active_regions_flag)[0] + 1,  # indexing starts from 1!
        "x1eq_min":
        statistical_model.x1eq_min,
        "x1eq_max":
        statistical_model.x1eq_max,
        "SC":
        SC[np.triu_indices(SC.shape[0], 1)],
        "dt":
        statistical_model.dt,
        # "euler_method": np.where(np.in1d(EULER_METHODS, statistical_model.euler_method))[0][0] - 1,
        "observation_model":
        np.where(
            np.in1d(OBSERVATION_MODELS,
                    statistical_model.observation_model))[0][0],
        # "observation_expression": np.where(np.in1d(OBSERVATION_MODEL_EXPRESSIONS,
        #                                            statistical_model.observation_expression))[0][0],
        "signals":
        signals,
        "time":
        model_inversion.time,
        "mixing":
        mixing
    }
    for key, val in model_inversion.epileptor_parameters.iteritems():
        model_data.update({key: val})
    for p in statistical_model.parameters.values():
        model_data.update({p.name + "_lo": p.low, p.name + "_hi": p.high})
        if not (isequal_string(p.type, "normal")):
            model_data.update({
                p.name + "_loc":
                p.loc,
                p.name + "_scale":
                p.scale,
                p.name + "_pdf":
                np.where(
                    np.in1d(
                        ProbabilityDistributionTypes.available_distributions,
                        p.type))[0][0],
                p.name + "_p": (np.array(p.pdf_params().values()).T * np.ones(
                    (2, ))).squeeze()
            })
    model_data["x1eq_star_loc"] = statistical_model.parameters[
        "x1eq_star"].mean
    model_data["x1eq_star_scale"] = statistical_model.parameters[
        "x1eq_star"].std
    model_data["MC_scale"] = statistical_model.MC_scale
    MCsplit_shape = np.ones(statistical_model.parameters["MCsplit"].p_shape)
    model_data["MCsplit_loc"] = statistical_model.parameters[
        "MCsplit"].mean * MCsplit_shape
    model_data["MCsplit_scale"] = statistical_model.parameters[
        "MCsplit"].std * MCsplit_shape
    model_data["offset_signal_p"] = np.array(
        statistical_model.parameters["offset_signal"].pdf_params().values())
    return sort_dict(model_data)
Exemplo n.º 18
0
def convert_to_h5_model(obj):
    h5_model = H5Model(OrderedDict(), OrderedDict())
    object_to_h5_model_recursively(h5_model, obj)
    h5_model.datasets_dict = sort_dict(h5_model.datasets_dict)
    h5_model.metadata_dict = sort_dict(h5_model.metadata_dict)
    return h5_model
Exemplo n.º 19
0
import os
from collections import OrderedDict
import numpy as np
from tvb_epilepsy.base.utils.data_structures_utils import sort_dict, isequal_string

STAN_STATIC_OPTIONS = sort_dict({"int_time": 2 * np.pi})  # int_time > 0

STAN_NUTS_OPTIONS = sort_dict({"max_depth": 10})  # int > 0

STAN_HMC_OPTIONS = sort_dict({
    "metric": "diag_e",  # others: "unit_e", "dense_e"
    "stepsize": 1,  # stepsize > 0
    "stepsize_jitter": 0
})  # 0 <= stepsize_jitter <= 1
STAN_HMC_OPTIONS.update({"engine": "nuts"})  # "static"and

STAN_SAMPLE_ADAPT_OPTIONS = sort_dict({
    "engaged": 1,  # 0, 1
    "gamma": 0.05,  # gamma > 0
    "delta": 0.8,  # 1 > delta > 0
    "kappa": 0.75,  # kappa > 0
    "t0": 10,  # t0 > 0
    "init_buffer": 75,  # int > 0
    "term_buffer": 50,  # int > 0
    "window": 25
})  # int > 0

STAN_SAMPLE_OPTIONS = sort_dict({
    "num_samples": 1000,  # num_samples >= 0
    "num_warmup": 1000,  # warmup >= 0
    "save_warmup": 0,  # 0, 1
Exemplo n.º 20
0
 def plot_fit_timeseries(self,
                         target_data,
                         samples,
                         ests,
                         stats=None,
                         probabilistic_model=None,
                         seizure_indices=[],
                         skip_samples=0,
                         trajectories_plot=False,
                         title_prefix=""):
     if len(title_prefix) > 0:
         title_prefix = title_prefix + ": "
     samples = ensure_list(samples)
     region_labels = samples[0]["x1"].space_labels
     if probabilistic_model is not None:
         sig_prior_str = " sig_prior = " + str(
             probabilistic_model.get_prior("sigma")[0])
     else:
         sig_prior_str = ""
     stats_region_labels = region_labels
     if stats is not None:
         stats_string = {
             "fit_target_data": "\n",
             "x1": "\n",
             "z": "\n",
             "MC": ""
         }
         if isinstance(stats, dict):
             for skey, sval in stats.items():
                 for p_str in ["fit_target_data", "x1", "z"]:
                     stats_string[p_str] \
                         = stats_string[p_str] + skey + "_mean=" + str(numpy.mean(sval[p_str])) + ", "
                 stats_region_labels = [
                     stats_region_labels[ip] + ", " + skey + "_" + "x1" +
                     "_mean=" + str(sval["x1"][:, ip].mean()) + ", " +
                     skey + "_z_mean=" + str(sval["z"][:, ip].mean())
                     for ip in range(len(region_labels))
                 ]
             for p_str in ["fit_target_data", "x1", "z"]:
                 stats_string[p_str] = stats_string[p_str][:-2]
     else:
         stats_string = dict(zip(["fit_target_data", "x1", "z"], 3 * [""]))
     observation_dict = OrderedDict(
         {'observation time series': target_data.squeezed})
     time = target_data.time_line
     figs = []
     for id_est, (est, sample) in enumerate(zip(ensure_list(ests),
                                                samples)):
         name = title_prefix + "_chain" + str(id_est + 1)
         observation_dict.update({
             "fit chain " + str(id_est + 1):
             sample["fit_target_data"].data[:, :, :,
                                            skip_samples:].squeeze()
         })
         figs.append(
             self.plot_raster(
                 sort_dict({
                     "x1":
                     sample["x1"].data[:, :, :, skip_samples:].squeeze(),
                     'z':
                     sample["z"].data[:, :, :, skip_samples:].squeeze()
                 }),
                 time,
                 special_idx=seizure_indices,
                 time_units=target_data.time_unit,
                 title=name + ": Hidden states fit rasterplot",
                 subtitles=[
                     'hidden state ' + "x1" + stats_string["x1"],
                     'hidden state z' + stats_string["z"]
                 ],
                 offset=1.0,
                 labels=region_labels,
                 figsize=FiguresConfig.VERY_LARGE_SIZE))
         dWt = {}
         subtitles = []
         if sample.get("dX1t", None):
             dWt.update({
                 "dX1t":
                 sample["dX1t"].data[:, :, :, skip_samples:].squeeze()
             })
             subtitles.append("dX1t")
         if sample.get("dZt", None):
             dWt.update({
                 "dZt":
                 sample["dZt"].data[:, :, :, skip_samples:].squeeze()
             })
             subtitles.append("dZt")
         if len(dWt) > 0:
             subtitles[
                 -1] += "\ndynamic noise" + sig_prior_str + ", sig_post = " + str(
                     est["sigma"])
             figs.append(
                 self.plot_raster(sort_dict(dWt),
                                  time[:-1],
                                  time_units=target_data.time_unit,
                                  special_idx=seizure_indices,
                                  title=name +
                                  ": Hidden states random walk rasterplot",
                                  subtitles=subtitles,
                                  offset=1.0,
                                  labels=region_labels,
                                  figsize=FiguresConfig.VERY_LARGE_SIZE))
         if trajectories_plot:
             title = name + ' Fit hidden state space trajectories'
             figs.append(
                 self.plot_trajectories(
                     {
                         "x1":
                         sample["x1"].data[:, :, :,
                                           skip_samples:].squeeze(),
                         'z':
                         sample['z'].data[:, :, :, skip_samples:].squeeze()
                     },
                     special_idx=seizure_indices,
                     title=title,
                     labels=stats_region_labels,
                     figsize=FiguresConfig.SUPER_LARGE_SIZE))
     figs.append(
         self.plot_raster(observation_dict,
                          time,
                          special_idx=[],
                          time_units=target_data.time_unit,
                          title=title_prefix +
                          "Observation target vs fit time series: " +
                          stats_string["fit_target_data"],
                          figure_name=title_prefix +
                          "ObservationTarget_VS_FitTimeSeries",
                          offset=1.0,
                          labels=target_data.space_labels,
                          figsize=FiguresConfig.VERY_LARGE_SIZE))
     return tuple(figs)