def read_vb_results(self, fit): est = {} samples = {} for ip, p in enumerate(fit['sampler_param_names']): p_split = p.split('.') p_name = p_split.pop(0) if est.get(p_name) is None: samples.update({p_name: []}) est.update({p_name: []}) if len(p_split) == 0: # scalar parameters samples[p_name] = fit["sampler_params"][ip] est[p_name] = fit["mean_pars"][ip] else: if len(p_split) == 1: # vector parameters samples[p_name].append(fit["sampler_params"][ip]) est[p_name].append(fit["mean_pars"][ip]) else: ii = int(p_split.pop(0)) - 1 if len(p_split) == 0: # 2D matrix parameters if len(est[p_name]) < ii + 1: samples[p_name].append([fit["sampler_params"][ip]]) est[p_name].append([fit["mean_pars"][ip]]) else: samples[p_name][ii].append( fit["sampler_params"][ip]) est[p_name][ii].append(fit["mean_pars"][ip]) else: if len(est[p_name]) < ii + 1: samples[p_name].append([]) est[p_name].append([]) jj = int(p_split.pop(0)) - 1 if len(p_split) == 0: # 3D matrix parameters if len(est[p_name][ii]) < jj + 1: samples[p_name][ii].append( [fit["sampler_params"][ip]]) est[p_name][ii].append([fit["mean_pars"][ip]]) else: if len(est[p_name][ii]) < jj + 1: samples[p_name][ii].append([]) est[p_name][ii].append([]) samples[p_name][ii][jj].append( fit["sampler_params"][ip]) est[p_name][ii][jj].append( fit["mean_pars"][ip]) else: raise_not_implemented_error( "Extracting of parameters of more than 3 dimensions is not " + "implemented yet for vb!", self.logger) for key in est.keys(): if isinstance(est[key], list): est[key] = np.squeeze(np.array(est[key])) if isinstance(samples[key], list): samples[key] = np.squeeze(np.array(samples[key])) return samples, est
def scipy_method(self, method, *args, **kwargs): if method in ["rvs", "ppf", "isf", "stats", "moment", "median", "mean", "interval"]: return self._scipy_method(method, self.loc, self.scale, *args, **kwargs) elif method in ["pdf", "logpdf", "cdf", "logcdf", "sf", "logsf"]: x, args, kwargs = get_x_arg_for_param_distrib(self, *args, **kwargs) return x, self._scipy_method(method, self.loc, self.scale, *args, **kwargs) else: raise_not_implemented_error("Scipy method " + method + " is not implemented for parameter " + self.name + "!")
def jacobian(self, state_variables, coupling, local_coupling=0.0, array=numpy.array, where=numpy.where, concat=numpy.concatenate): raise_not_implemented_error("Jacobian calculation of model " + self._ui_name + " is not implemented yet!")
def scipy_method(self, method, *args, **kwargs): if method in [ "rvs", "ppf", "isf", "stats", "moment", "median", "mean", "interval" ]: return self.max - self.star.scipy_method(method, *args, **kwargs) elif method in ["pdf", "logpdf", "cdf", "logcdf", "sf", "logsf"]: x, args, kwargs = get_x_arg_for_param_distrib( self, *args, **kwargs) args[0] = self.max - x pdf = self.star.scipy_method(method, *args, **kwargs)[1] return x, pdf else: raise_not_implemented_error( "Scipy method " + method + " is not implemented for transformed parameter " + self.name + "!")
def sample(self, parameter=(), loc=0.0, scale=1.0, **kwargs): if isinstance(parameter, Parameter): parameter_shape = parameter.p_shape low = parameter.low high = parameter.high loc = getattr(parameter, "loc", loc) scale = getattr(parameter, "scale", scale) else: low = np.array(kwargs.pop("low", -CalculusConfig.MAX_SINGLE_VALUE)) high = np.array(kwargs.pop("high", CalculusConfig.MAX_SINGLE_VALUE)) parameter_shape = kwargs.pop("shape", (1, )) scale = (high - low) * scale low = low + loc high = low + scale low, high = self.check_for_infinite_bounds(low.tolist(), high.tolist()) low, high, n_outputs, parameter_shape = self.check_size( low, high, parameter_shape) bounds = [list(b) for b in zip(low.tolist(), high.tolist())] self.adjust_shape(parameter_shape) self.sampler = importlib.import_module("SALib.sample." + self.sampler).sample size = self.n_samples problem = {'num_vars': n_outputs, 'bounds': bounds} if self.sampler is ff.sample: samples = (self.sampler(problem)).T else: other_params = {} if self.sampler is saltelli.sample: size = int(np.round(1.0 * size / (2 * n_outputs + 2))) elif self.sampler is fast_sampler.sample: other_params = {"M": kwargs.get("M", 4)} elif self.sampler is morris.sample: # I don't understand this method and its inputs. I don't think we will ever use it. raise_not_implemented_error() samples = self.sampler(problem, size, **other_params) # Adjust samples number: self.n_samples = samples.shape[0] self.shape = list(self.shape) self.shape[-1] = self.n_samples self.shape = tuple(self.shape) transpose_shape = tuple([self.n_samples] + list(self.shape)[0:-1]) return np.reshape(samples.T, transpose_shape).T
def load_model_data_from_file(self, reset_path=False, **kwargs): model_data_path = kwargs.get("model_data_path", self.model_data_path) if reset_path: self.model_data_path = model_data_path extension = model_data_path.split(".", -1)[-1] if isequal_string(extension, "R"): model_data = rload(model_data_path) elif isequal_string(extension, "npy"): model_data = np.load(model_data_path).item() elif isequal_string(extension, "mat"): model_data = loadmat(model_data_path) elif isequal_string(extension, "pkl"): with open(model_data_path, 'wb') as f: model_data = pickle.load(f) elif isequal_string(extension, "h5"): model_data = H5Reader().read_dictionary(model_data_path) else: raise_not_implemented_error( "model_data file (" + model_data_path + ") that are not one of (.R, .npy, .mat, .pkl) cannot be read!") return model_data
def run_pse_parallel(self): raise_not_implemented_error("PSE parallel not implemented!", self.logger)
def _numpy(self, loc=0.0, scale=1.0, size=(1,)): raise_not_implemented_error("No implementation of bernoulli distribution in numpy.random module!")