def prepare_params(self, params_pse): if isinstance(params_pse, list): temp = [] for param in params_pse: self.params_paths.append(param["path"]) temp2 = param["samples"].flatten() temp.append(temp2) self.n_params_vals.append(temp2.size) indices = param.get("indices", []) self.params_indices.append(indices) self.params_names.append( param.get("name", param["path"].rsplit('.', 1)[-1] + str(indices))) self.n_params_vals = np.array(self.n_params_vals) self.n_params = len(self.params_paths) if not (np.all(self.n_params_vals == self.n_params_vals[0])): raise_value_error( "\nNot all parameters have the same number of samples!: " + "\n" + str(self.params_paths) + " = " + str(self.n_params_vals)) else: self.n_params_vals = self.n_params_vals[0] self.params_vals = np.vstack(temp).T self.params_paths = np.array(self.params_paths) self.params_indices = np.array(self.params_indices) self.n_loops = self.params_vals.shape[0] print "\nGenerated a parameter search exploration for " + str( "lsa/sim task") + "," print "with " + str(self.n_params) + " parameters of " + str( self.n_params_vals) + " values each," print "leading to " + str(self.n_loops) + " total execution loops" else: self.logger.warning("\nparams_pse is not a list of tuples!")
def _prepare_distribution_axes(self, distribution, loc=0.0, scale=1.0, x=numpy.array([]), ax=None, linestyle="-", lgnd=False): if len(x) < 1: x = linspace_broadcast( distribution._scipy_method("ppf", distribution.loc, distribution.scale, 0.01), distribution._scipy_method("ppf", distribution.loc, distribution.scale, 0.99), 100) if x is not None: if x.ndim == 1: x = x[:, numpy.newaxis] pdf = distribution._scipy_method("pdf", loc, scale, x) if ax is None: _, ax = pyplot.subplots(1, 1) for ip, (xx, pp) in enumerate(zip(x.T, pdf.T)): ax.plot(xx.T, pp.T, linestyle=linestyle, linewidth=1, label=str(ip), alpha=0.5) if lgnd: pyplot.legend() return ax else: # TODO: is this message correct?? raise_value_error("Distribution parameters do not broadcast!")
def eqtn_fz_square_taylor(zeq, yc, Iext1, K, w, tau1, tau0): n_regions = zeq.size tau = np.divide(tau1, tau0) tau = np.repeat(tau.T, n_regions, 1) # The z derivative of the function # x1 = F(z) = -4/3 -1/2*sqrt(2(z-yc-Iext1)+64/27) dfz = -np.divide(0.5, np.power(2.0 * (zeq - yc - Iext1) + 64.0 / 27.0, 0.5)) # Tim Proix: dfz = -np.divide(1, np.power(8.0 * zeq - 629.6/27, 0.5)) try: if np.any([np.any(np.isnan(dfz)), np.any(np.isinf(dfz))]): raise_value_error("nan or inf values in dfz") except: pass # Jacobian: diagonal elements at first row # Diagonal elements: -1 + dfz_i * (4 + K_i * sum_j_not_i{wij}) # Off diagonal elements: -K_i * wij_not_i * dfz_j_not_i i = np.ones((1, n_regions), dtype=np.float32) fz_jac = np.diag((-1.0 + np.multiply(dfz, (4.0 + np.multiply(K, np.expand_dims(np.sum(w, axis=1), 1).T)))).T[:, 0]) - np.multiply(np.multiply(np.dot(K.T, i), np.dot(i.T, dfz)), w) try: if np.any([np.any(np.isnan(fz_jac.flatten())), np.any(np.isinf(fz_jac.flatten()))]): raise_value_error("nan or inf values in dfz") except: pass return np.multiply(fz_jac, tau)
def model_builder_fun(model_name, **kwargs): if model_name in AVAILABLE_DYNAMICAL_MODELS_NAMES: return model_build_dict[model_name](**kwargs) else: raise_value_error( "Model name (%s) does not correspond to one of the available ones: %s" % (str(input), str(AVAILABLE_DYNAMICAL_MODELS_NAMES)))
def __init__(self, name="Parameter", low=-CalculusConfig.MAX_SINGLE_VALUE, high=CalculusConfig.MAX_SINGLE_VALUE, loc=0.0, scale=1.0, p_shape=(), use="scipy", **target_params): ProbabilisticParameterBase.__init__(self, name, low, high, loc, scale, p_shape) thisProbabilityDistribution.__init__(self, **target_params) success = True for p_key, p_val in target_params.items(): if np.any(p_val != getattr(self, p_key)): success = False if success is False: if optimize_pdf: pdf_params = compute_pdf_params( probability_distribution.lower(), target_params, loc, scale, use) thisProbabilityDistribution.__init__(self, **pdf_params) success = True for p_key, p_val in target_params.items(): if np.any(np.abs(p_val - getattr(self, p_key)) > 0.1): success = False if success is False: raise_value_error( "Cannot generate probability distribution of type " + probability_distribution + " with parameters " + str(target_params) + " !") self._update_params(use=use)
def read_probabilistic_model(self, path): h5_file = h5py.File(path, 'r', libver='latest') epi_subtype = h5_file.attrs[H5_SUBTYPE_ATTRIBUTE] probabilistic_model = None if ProbabilisticModelBase.__class__.find(epi_subtype) >= 0: probabilistic_model = ProbabilisticModelBase() else: raise_value_error( epi_subtype + "does not correspond to the available probabilistic model!:\n" + ProbabilisticModelBase.__class__) for attr in h5_file.attrs.keys(): if attr not in H5_TYPES_ATTRUBUTES: probabilistic_model.__setattr__(attr, h5_file.attrs[attr]) for key, value in h5_file.items(): if isinstance(value, h5py.Dataset): probabilistic_model.__setattr__(key, value[()]) if isinstance(value, h5py.Group): h5_group_handlers = H5GroupHandlers() if key == "parameters": # and value.attrs[epi_subtype_key] == OrderedDict.__name__: parameters = h5_group_handlers.handle_group_parameters( value) probabilistic_model.__setattr__(key, parameters) if key == "ground_truth": h5_group_handlers.handle_group_ground_truth( value, probabilistic_model) h5_file.close() return probabilistic_model
def prepare_target_stats(distribution, target_stats, loc=0.0, scale=1.0): # Make sure that the shapes of target stats are all matching one to the other: target_shape = np.ones(()) * loc * scale target_shape = np.ones(target_shape.shape) try: for ts in target_stats.values(): target_shape = target_shape * np.ones(np.array(ts).shape) except: raise_value_error( "Target statistics (" + str([np.array(ts).shape for ts in target_stats.values()]) + ") and distribution (" + str(distribution.p_shape) + ") shapes do not propagate!") for ts_key in target_stats.keys(): target_stats[ts_key] *= target_shape if np.sum(target_stats[ts_key].shape) > 0: target_stats[ts_key] = target_stats[ts_key].flatten() target_size = target_shape.size target_shape = target_shape.shape target_stats_array = np.around(np.vstack(target_stats.values()).T, decimals=2) target_stats_unique = np.unique(target_stats_array, axis=0) # target_stats_unique = np.vstack({tuple(row) for row in target_stats_array}) target_stats_unique = dict( zip(target_stats.keys(), [ np.around(target_stats_unique[:, ii], decimals=3) for ii in range(distribution.n_params) ])) target_stats_unique = dicts_of_lists_to_lists_of_dicts(target_stats_unique) return target_stats_unique, target_stats_array, target_shape, target_size
def _ensure_eigen_vectors_number(self, eigen_values, e_values, x0_values, disease_indices): if self.eigen_vectors_number is None: if self.eigen_vectors_number_selection is "auto_eigenvals": self.eigen_vectors_number = self.get_curve_elbow_point( numpy.abs(eigen_values)) elif self.eigen_vectors_number_selection is "auto_disease": self.eigen_vectors_number = len(disease_indices) elif self.eigen_vectors_number_selection is "auto_epileptogenicity": self.eigen_vectors_number = self.get_curve_elbow_point( e_values) elif self.eigen_vectors_number_selection is "auto_excitability": self.eigen_vectors_number = self.get_curve_elbow_point( x0_values) else: raise_value_error( "\n" + self.eigen_vectors_number_selection + "is not a valid option when for automatic computation of self.eigen_vectors_number" ) else: self.eigen_vectors_number_selection = "user_defined"
def _sort_disease_indices_values(self, disease_dict): indices = [] values = [] for key, value in disease_dict.items(): value = ensure_list(value) key = ensure_list(key) n = len(key) if n > 0: indices += key if len(value) == n: values += value elif len(value) == 1 and n > 1: values += value * n else: raise_value_error("Length of disease indices " + str(n) + " and values " + str(len(value)) + " do not match!") if len(indices) > 0: if isinstance(indices[0], tuple): arg_sort = np.ravel_multi_index( indices, (self.number_of_regions, self.number_of_regions)).argsort() else: arg_sort = np.argsort(indices) return np.array(indices)[arg_sort].tolist(), np.array( values)[arg_sort] else: return [], []
def __update_params__(self, loc=0.0, scale=1.0, use="scipy", check_constraint=True, **params): if len(params) == 0: params = self.pdf_params() self.__set_params__(**params) # params = self.__squeeze_parameters__(update=False, loc=loc, scale=scale, use=use) # self.__set_params__(**params) self.__p_shape = self.__update_shape__(loc, scale) self.__p_size = shape_to_size(self.p_shape) self.n_params = len(self.pdf_params()) if check_constraint and not (self.__check_constraint__()): raise_value_error("Constraint for " + self.type + " distribution " + self.constraint_string + "\nwith parameters " + str(self.pdf_params()) + " is not satisfied!") self.__mean = self._calc_mean(loc, scale, use) self.__median = self._calc_median(loc, scale, use) self.__mode = self._calc_mode(loc, scale) self.__var = self._calc_var(loc, scale, use) self.__std = self._calc_std(loc, scale, use) self.__skew = self._calc_skew() self.__kurt = self._calc_kurt()
def set_diseased_regions_values(self, disease_values): n = len(disease_values) if n != self.number_of_regions: raise_value_error("Diseased region values size (" + str(n) + ") doesn't match the number of regions (" + str(self.number_of_regions) + ")!") self.diseased_regions_values = disease_values return self
def _set_conf_level(self, conf_level): if isinstance(conf_level, float) and conf_level > 0.0 and conf_level < 1.0: self.conf_level = conf_level else: raise_value_error( "conf_level = " + str(conf_level) + "is not a float in the (0.0, 1.0) interval as it should!")
def _set_method(self, method): method = method.lower() if np.in1d(method, METHODS): self.method = method else: raise_value_error("Method " + str(method) + " is not one of the available methods " + str(METHODS) + " !")
def model_builder_from_model_config_fun(model_config, model_name=None): if not (isinstance(model_name, basestring)): model_name = model_config.model_name if model_name in AVAILABLE_DYNAMICAL_MODELS_NAMES: return model_build_dict_from_model_config[model_name](model_config) else: raise_value_error( "Model name (%s) does not correspond to one of the available ones: %s" % (str(input), str(AVAILABLE_DYNAMICAL_MODELS_NAMES)))
def _check_regions_inds_range(self, indices, type): if numpy.any(numpy.array(indices) < 0) or numpy.any( numpy.array(indices) >= self.number_of_regions): raise_value_error(type + "_indices out of range! " + "\nThe maximum indice is " + str(self.number_of_regions) + " for number of brain regions " + str(self.number_of_regions) + " but" "\n" + type + "_indices = " + str(indices)) return indices
def _check_indices_vals_sizes(self, indices, values, type): n_inds = len(indices) n_vals = len(values) if n_inds != n_vals: if n_vals != 1: values *= numpy.ones(numpy.array(indices).shape) else: raise_value_error("Sizes of " + type + "_indices (" + str(n_inds) + ") " + "and " + type + "_values (" + str(n_vals) + ") do not match!") return values
def concatenate_in_time(self, timeseries_list): timeseries_list = ensure_list(timeseries_list) out_timeseries = timeseries_list[0] for id, timeseries in enumerate(timeseries_list[1:]): if out_timeseries.time_step == timeseries.time_step: out_timeseries.data = np.concatenate( [out_timeseries.data, timeseries.data], axis=0) else: raise_value_error("Timeseries concatenation in time failed!\n" "Timeseries %d have a different time step (%s) than the ones before(%s)!" \ % (id, str(timeseries_list.time_step), str(out_timeseries.time_step))) return out_timeseries
def check_number_of_inputs(nmodels, input, input_str): input = ensure_list(input) ninput = len(input) if ninput != nmodels: if ninput == 1: input *= nmodels else: raise_value_error( "The size of input " + input_str + " (" + str(ninput) + ") is neither equal to the number of models (" + str(nmodels) + ") nor equal to 1!") return input
def _compute_jacobian(self, model_configuration): if self.lsa_method == "2D": fz_jacobian = calc_jac(model_configuration.x1eq, model_configuration.zeq, model_configuration.yc, model_configuration.Iext1, model_configuration.x0, model_configuration.K, model_configuration.connectivity, model_vars=2, zmode=model_configuration.zmode, a=model_configuration.a, b=model_configuration.b, d=model_configuration.d, tau1=model_configuration.tau1, tau0=model_configuration.tau0) else: # Check if any of the equilibria are in the supercritical regime (beyond the separatrix) # and set it right before the bifurcation. x1eq = numpy.array(model_configuration.x1eq) zeq = numpy.array(model_configuration.zeq) correction_value = X1EQ_CR_DEF - 10**(-3) if numpy.any(x1eq > correction_value): x1eq_min = numpy.min(x1eq) x1eq = interval_scaling(x1eq, min_targ=x1eq_min, max_targ=correction_value, min_orig=x1eq_min, max_orig=numpy.max(x1eq)) self.logger.warning( "Equilibria x1eq are rescaled for LSA to value: X1EQ_CR_DEF - 10 ** (-3) = " + str(correction_value) + " to be sub-critical!") zeq = calc_eq_z(x1eq, model_configuration.yc, model_configuration.Iext1, "2d", numpy.zeros(model_configuration.x1eq.shape), model_configuration.slope, model_configuration.a, model_configuration.b, model_configuration.d) fz_jacobian = calc_fz_jac_square_taylor( zeq, model_configuration.yc, model_configuration.Iext1, model_configuration.K, model_configuration.connectivity, model_configuration.a, model_configuration.b, model_configuration.d) if numpy.any([ numpy.any(numpy.isnan(fz_jacobian.flatten())), numpy.any(numpy.isinf(fz_jacobian.flatten())) ]): raise_value_error("nan or inf values in dfz") return fz_jacobian
def _confirm_support(self): p_star = (self.low - self.loc) / self.scale p_star_cdf = self.scipy.cdf(p_star) if np.any(p_star_cdf + np.finfo(np.float).eps <= 0.0): # raise_value_error("Lower limit of " + self.name + " base distribution outside support!: " + "\n(self.low-self.loc)/self.scale) = " + str(p_star) + "\ncdf(self.low-self.loc)/self.scale) = " + str(p_star_cdf)) p_star = (self.high - self.loc) / self.scale p_star_cdf = self.scipy.cdf(p_star) if np.any(p_star_cdf - np.finfo(np.float).eps) >= 1.0: self.logger.warning("Upper limit of base " + self.name + " distribution outside support!: " + "\n(self.high-self.loc)/self.scale) = " + str(p_star) + "\ncdf(self.high-self.loc)/self.scale) = " + str(p_star_cdf))
def eqtn_x0(x1, z, zmode=np.array([ZMODE_DEF]), z_pos=True, K=None, w=None, coupl=None): if coupl is None: if np.all(K == 0.0) or np.all(w == 0.0) or (K is None) or (w is None): coupl = 0.0 else: from tvb_fit.tvb_epilepsy.base.computation_utils.calculations_utils import calc_coupling coupl = calc_coupling(x1, K, w) # TODO: work on an element by element basis here... if np.any(zmode == 0): return x1 - (z + np.where(z_pos, 0.0, 0.1 * np.power(z, 7.0)) + coupl) / 4.0 elif np.any(zmode == 1): return np.divide(3.0, 1.0 + np.power(np.exp(1), -10.0 * (x1 + 0.5))) - z - coupl else: raise_value_error('zmode is neither [0] nor [1]')
def set_normalize(self, values): values = ensure_list(values) n_vals = len(values) if n_vals > 0: if n_vals > 2: raise_value_error( "Invalid disease hypothesis normalization values!: " + str(values) + "\nThey cannot be more than 2!") else: if n_vals < 2: # Assuming normalization only to a maximum value, keeping the existing minimum one values = [numpy.min(self.diseased_regions_values)] + values self.normalize_values = values return self
def assert_fitmethod(self): if self.fitmethod.lower().find("sampl") >= 0: # for sample or sampling self.fitmethod = "sampling" elif self.fitmethod.lower().find( "v") >= 0: # for variational or vb or advi self.fitmethod = "vb" elif self.fitmethod.lower().find( "opt") >= 0: # for optimization or optimizing or optimize self.fitmethod = "optimizing" else: raise_value_error( self.fitmethod + " does not correspond to one of the input methods:\n" + "sampling, vb, optimizing")
def eqtn_fz(x1, z, x0, tau1, tau0, zmode=np.array([ZMODE_DEF]), z_pos=True, K=None, w=None, coupl=None): if coupl is None: if np.all(K == 0.0) or np.all(w == 0.0) or (K is None) or (w is None): coupl = 0.0 else: from tvb_fit.tvb_epilepsy.base.computation_utils.calculations_utils import calc_coupling coupl = calc_coupling(x1, K, w) tau = np.divide(tau1, tau0) # TODO: work on an element by element basis here... if np.any(zmode == 0): return np.multiply((4 * (x1 - x0) - np.where(z_pos, z, z + 0.1 * np.power(z, 7.0)) - coupl), tau) elif np.any(zmode == 1): return np.multiply(np.divide(3.0, (1 + np.power(np.exp(1), (-10.0 * (x1 + 0.5))))) - x0 - z - coupl, tau) else: raise_value_error('zmode is neither [0] nor [1]')
def __init__(self, input="EpileptorDP", connectivity=None, K_unscaled=np.array([K_UNSCALED_DEF]), x0_values=X0_DEF, e_values=E_DEF, x1eq_mode="optimize", **kwargs): if isinstance(input, Simulator): # TODO: make this more specific once we clarify the model configuration representation compared to simTVB self.model_name = input.model._ui_name self.set_params_from_tvb_model(input.model) self.connectivity = normalize_weights(input.connectivity.weights) # self.coupling = input.coupling self.initial_conditions = np.squeeze(input.initial_conditions) # initial conditions in a reduced form # self.noise = input.integrator.noise # self.monitor = ensure_list(input.monitors)[0] else: if isinstance(input, Model): self.model_name = input._ui_name self.set_params_from_tvb_model(input) elif isinstance(input, basestring): self.model_name = input else: raise_value_error("Input (%s) is not a TVB simulator, an epileptor model, " "\nor a string of an epileptor model!") if isinstance(connectivity, Connectivity): self.connectivity = connectivity.normalized_weights elif isinstance(connectivity, TVBConnectivity): self.connectivity = normalize_weights(connectivity.weights) elif isinstance(connectivity, np.ndarray): self.connectivity = normalize_weights(connectivity) else: if not(isinstance(input, Simulator)): warning("Input connectivity (%s) is not a virtual patient connectivity, a TVB connectivity, " "\nor a numpy.array!" % str(connectivity)) self.x0_values = x0_values * np.ones((self.number_of_regions,), dtype=np.float32) self.x1eq_mode = x1eq_mode if len(ensure_list(K_unscaled)) == 1: K_unscaled = np.array(K_unscaled) * np.ones((self.number_of_regions,), dtype=np.float32) elif len(ensure_list(K_unscaled)) == self.number_of_regions: K_unscaled = np.array(K_unscaled) else: self.logger.warning( "The length of input global coupling K_unscaled is neither 1 nor equal to the number of regions!" + "\nSetting model_configuration_builder.K_unscaled = K_UNSCALED_DEF for all regions") self.set_K_unscaled(K_unscaled) for pname in EPILEPTOR_PARAMS: self.set_parameter(pname, kwargs.get(pname, getattr(self, pname))) # Update K_unscaled self.e_values = e_values * np.ones((self.number_of_regions,), dtype=np.float32) self.x0cr = 0.0 self.rx0 = 0.0 self._compute_critical_x0_scaling()
def read_simulator_model(self, path, model_builder_fun): """ :param path: Path towards a TVB model H5 file :return: TVB model object """ self.logger.info("Starting to read epileptor model from: %s" % path) h5_file = h5py.File(path, 'r', libver='latest') try: model_name = h5_file["/"].attrs[H5_SUBTYPE_ATTRIBUTE] model = model_builder_fun(model_name) except: raise_value_error( "No model read from model configuration file!: %s" % str(path)) return H5GroupHandlers().read_simulator_model_group( h5_file, model, "/")
def generate_distribution(distrib_type, loc=0.0, scale=1.0, use="manual", target_shape=None, optimize_pdf=True, **pdf_params): if np.in1d(distrib_type.lower(), ProbabilityDistributionTypes.available_distributions): distribution = probability_distribution_factory( distrib_type.lower()) # generate an agnostic distribution success = True if len(pdf_params) > 0: distribution.update_params( loc, scale, use, **pdf_params) # update with desired parameters # test whether the distribution is correctly set: for p_key, p_val in pdf_params.iteritems(): if np.any(p_val != getattr(distribution, p_key)): success = False if success is False: # if the distribution is not correct, try to optimize it if optimize_pdf: distribution = optimize_distribution(distrib_type, loc, scale, use, target_shape=None, **pdf_params) success = True for p_key, p_val in pdf_params.iteritems(): if np.any( np.abs(p_val - getattr(distribution, p_key)) > 0.1): success = False # if still we don't get the desired distribution raise an error if success is False: raise_value_error( "Cannot generate probability distribution of type " + distrib_type + " with parameters " + str(pdf_params) + " !") if isinstance(target_shape, tuple): distribution.__shape_parameters__(target_shape, loc, scale, use) return distribution else: raise_value_error( distrib_type + " is not one of the available distributions!: " + str(ProbabilityDistributionTypes.available_distributions))
def eqtn_jac_fz_2d(x1, z, tau1, tau0, zmode=np.array([ZMODE_DEF]), z_pos=True, K=None, w=None): tau = np.divide(tau1, tau0) jac_z = - np.ones(z.shape, dtype=z.dtype) # TODO: work on an element by element basis here... if np.any(zmode == 0): jac_x1 = 4.0 * np.ones(z.shape, dtype=z.dtype) if not (z_pos): jac_z -= 0.7 * np.power(z, 6.0) elif np.any(zmode == 1): jac_x1 = np.divide(30 * np.power(np.exp(1), (-10.0 * (x1 + 0.5))), 1 + np.power(np.exp(1), (-10.0 * (x1 + 0.5)))) else: raise_value_error('zmode is neither [0] nor [1]') # Assuming that wii = 0 jac_x1 += np.multiply(K, np.sum(w, 1)) jac_x1 = np.diag(jac_x1.flatten()) - np.multiply(np.repeat(np.reshape(K, (x1.size, 1)), x1.size, axis=1), w) jac_x1 *= np.repeat(np.reshape(tau, (x1.size, 1)), x1.size, axis=1) jac_z *= tau jac_z = np.diag(jac_z.flatten()) return np.concatenate([jac_x1, jac_z], axis=1)
def __shape_parameters__(self, shape=None, loc=0.0, scale=1.0, use="scipy"): if isinstance(shape, tuple): self.__p_shape = shape i1 = np.ones((np.ones(self.p_shape) * loc * scale).shape) for p_key in self.pdf_params().keys(): try: setattr(self, p_key, getattr(self, p_key) * i1) except: try: setattr(self, p_key, np.reshape(getattr(self, p_key), self.p_shape)) except: raise_value_error( "Neither propagation nor reshaping worked for distribution parameter " + p_key + " reshaping\nto shape " + str(self.p_shape) + "\nfrom shape " + str(getattr(self, p_key)) + "!") self.__update_params__(loc, scale, use)
def fobj(p, pdf, target_stats, loc=0.0, scale=1.0, use="manual"): params = construct_pdf_params_dict(p, pdf) pdf.update_params(loc, scale, use, **params) f = 0.0 norm = 0.0 for ts_key, ts_val in target_stats.iteritems(): # norm += ts_val ** 2 try: f += (getattr(pdf, "_calc_" + ts_key)(loc, scale, use) - ts_val)**2 except: try: f += (getattr(pdf, ts_key) - ts_val)**2 except: raise_value_error( "Failed to calculate and/or return target statistic or parameter " + ts_key + " !") # if np.isnan(f) or np.isinf(f): # print("WTF?") # if norm > 0.0: # f /= norm return f