def test_Fast_with_NAN(): ''' Test if fast.analyze raise a ValueError when nan are passed in the Y values ''' problem, model_results, _ = setup_samples() # Should raise a ValueError type of error with pytest.raises(ValueError): fast.analyze(problem, model_results)
def test_fast_to_df(): params = ['x1', 'x2', 'x3'] problem = { 'num_vars': 3, 'names': params, 'groups': None, 'bounds': [[-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359]] } param_values = fast_sampler.sample(problem, 1000) Y = Ishigami.evaluate(param_values) Si = fast.analyze(problem, Y, print_to_console=False) Si_df = Si.to_df() expected_index = set(params) assert isinstance(Si_df, pd.DataFrame), \ "FAST Si: Expected DataFrame, got {}".format(type(Si_df)) assert set(Si_df.index) == expected_index, "Incorrect index in DataFrame" col_names = set(['S1', 'ST']) assert set(Si_df.columns) == col_names, \ "Unexpected column names in DataFrame. Expected {}, got {}".format( col_names, Si_df.columns)
def _analyze(*args, **kwargs): self = args[0] M = self.getKwarg('M', kwargs) print_to_console = kwargs.get('print_to_console', False) return fast.analyze(self.problem, self.results, M=M, print_to_console=print_to_console)
def test_regression_fast(): param_file = 'src/SALib/test_functions/params/Ishigami.txt' problem = read_param_file(param_file) param_values = fast_sampler.sample(problem, 10000) Y = Ishigami.evaluate(param_values) Si = fast.analyze(problem, Y, print_to_console=False) assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1) assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)
def FAST_analysis(network, num_samples, prob_def): print("Sampling via FAST sampler...") samples = fast_sampler.sample(prob_def, num_samples) samples = np.split(samples, samples.shape[1], axis=1) samples = [s.squeeze() for s in samples] values = {n: torch.tensor(s) for n, s in zip(prob_def["names"], samples)} print("Running GrFN...") Y = network.run(values).numpy() print("Analyzing via FAST...") return fast.analyze(prob_def, Y, print_to_console=True)
def _anlzr_salib(método, problema, mstr, simul, ops_método): ''' Parameters ---------- método: ['morris', 'fast'] líms_paráms, mapa_paráms se usan para generar el problema. mstr: sampled data simul: simulated model output ops_método: dict #{'num_levels': 8, 'grid_jump': 4} Returns ------- ''' if isinstance(mstr, list): mstr = np.asarray(mstr) if método == 'morris': if isinstance(simul, list): simul = np.asarray(simul) ops = {'X': mstr, 'Y': simul, 'num_levels': 16} ops.update(ops_método) mor = {} Si = morris.analyze(problema, **ops) mor.update( {'mu_star': {j: Si['mu_star'][i] for i, j in enumerate(problema['names'])}, 'sigma': {j: Si['sigma'][i] for i, j in enumerate(problema['names'])}, # 'sum_sigma': np.sum(Si['sigma']) }) return mor elif método == 'fast': ops = {'Y': simul} fa = {} Si = fast.analyze(problema, **ops) fa.update({'Si': {j: Si['S1'][i] for i, j in enumerate(problema['names'])}, 'ST': {j: Si['ST'][i] for i, j in enumerate(problema['names'])}, 'St-Si': {j: Si['ST'][i] - Si['S1'][i] for i, j in enumerate(problema['names'])}, # 'sum_s1': np.sum(Si['S1']), # 'sum_st': np.sum(Si['ST']), # 'sum_st-s1': np.absolute(np.sum(Si['ST']) - np.sum(Si['S1'])) }) return fa else: raise ValueError(_('Algoritmo "{}" no reconocido.').format(método))
def fast_analyze( parameters: MutableMapping[str, Distribution], model_output: Dict[int, Dict[str, RecordTransmitter]], harmonics: Optional[int], ) -> Dict[int, Dict[str, Any]]: # Returns a dictionary with S1, ST, S1_conf, ST_conf and names # (described in https://salib.readthedocs.io/en/latest/api.html) # for each evaluation performed with a sample, i.e. a polynomial # is evaluated with a set of sample coefficients for 10 values of x # ie. {0: {'S1': [0.3075(x), 0.4424(y), 4.531e-27(c)], ..., # 'names': ['x', 'y', 'z']}, 1: ...} if len(parameters) == 0: raise ValueError("Cannot study the sensitivity of no variables") records = [] for transmitter_map in model_output.values(): if len(transmitter_map) > 1: raise ValueError( "Cannot analyze sensitivity with multiple outputs") if len(transmitter_map) < 1: raise ValueError("Cannot analyze sensitivity with no output") for transmitter in transmitter_map.values(): records.append(get_event_loop().run_until_complete( transmitter.load())) ensemble_size = len(model_output) if harmonics is None: harmonics = 4 param_size = sum(dist.size for dist in parameters.values()) if ensemble_size % param_size == 0: sample_size = int(ensemble_size / param_size) else: raise ValueError("The size of the model output must be " "a multiple of the number of parameters") record_size = len(records[0].data) data = np.zeros([sample_size * param_size, record_size]) for i, record in enumerate(records): for j in range(record_size): data[i][j] = record.data[j] # type: ignore problem = _build_salib_problem(parameters) analysis = {} for j in range(record_size): analysis[j] = fast.analyze(problem, data[:, j], M=harmonics) return analysis
def _parallel_analyze(index, method, problem, samples, data, seed): if method == 'sobol': result = sobol.analyze(problem, data, calc_second_order=True, print_to_console=False, seed=seed) elif method == 'fast': result = fast.analyze(problem, data, print_to_console=False, seed=seed) elif method == 'rbd-fast': result = rbd_fast.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'morris': result = morris_analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'delta': result = delta.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'dgsm': result = dgsm.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'frac': result = ff_analyze(problem, samples, data, second_order=True, print_to_console=False, seed=seed) else: return 0 for key in result.keys(): result[key] = result[key].tolist() with open('{:s}.json'.format(index), 'w') as outfile: json.dump(result, outfile) return 0
def fast(self): """FAST sensitivity analysis of the objective function. This function estimates the sensitivity with the FAST method of the objective function with changes in the parameters using SALib: https://salib.readthedocs.io/en/latest/api.html#fast-fourier-amplitude-sensitivity-test Returns: dict: sensitivity values of parameters; dict has keys 'S1' and 'ST' """ X, y, problem = self._sensitivity_prep() n_sample = 2000 param_values = fast_sampler.sample(problem, n_sample) X_s, y_s = self._closest_points(problem, X, y, param_values) Si = fast.analyze(problem, y_s) return Si
def analyze(self, model, num_samples=10000): Y = model.evaluate(self.sample(num_samples=num_samples)) Sis = list([ fast.analyze(self.problem, Y[:, i], print_to_console=False) for i in range(Y.shape[1]) ]) S1s = [s['S1'] for s in Sis] STs = [s['ST'] for s in Sis] result_dict = { 'S1': dict(zip(self.output_names, S1s)), 'ST': dict(zip(self.output_names, STs)) } return result_dict
def analyze(self): """Initiate the analysis, and stores the result at data directory. Generates: Analysis result at 'acbm/data/output/fast.txt'. """ X = fast_sampler.sample(self.problem, self.n_samples, M=self.M, seed=self.seed_sample) Y = ACBM.evaluate(X) si = fast.analyze(self.problem, Y, M=self.M, seed=self.seed_analyze) pickle.dump(si, open(self.path_output + 'fast.txt', 'wb'))
def _analizar(símismo, vec_res, muestra, ops): if símismo.método == 'sobol': return sobol.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'fast': return fast.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'morris': return morris_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dmim': return delta.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dgsm': return dgsm.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'ff': return ff_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) else: raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(símismo.método))
def perform_analysis(problem, Y_list, method): S1_dic = OrderedDict() S1_dic['mu_TGFB'] = [] S1_dic['beta_TGFB_M_A'] = [] S1_dic['beta_TGFB_FIBRO'] = [] S1_dic['mu_MA'] = [] S1_dic['phi_MRA'] = [] S1_dic['theta_ACH'] = [] S1_dic['mu_MR'] = [] S1_dic['Pmax_MR'] = [] S1_dic['Pmin_MR'] = [] S1_dic['Keq_CH'] = [] Y_list = np.array(Y_list) print(Y_list.shape) print('hi') Y_list_trans = Y_list.transpose() total = len(Y_list_trans) count = 0 for Y in Y_list_trans: count += 1 if method == 'FAST': Si = fast.analyze(problem, Y, print_to_console=True) elif method == 'Saltelli': Si = sobol.analyze(problem, Y) mu_TGFB, beta_TGFB_M_A, beta_TGFB_FIBRO, mu_MA, phi_MRA, theta_ACH, mu_MR, Pmax_MR, Pmin_MR, Keq_CH = Si[ 'S1'] #Si['ST']# = Si['S1'] S1_dic['mu_TGFB'].append(mu_TGFB) S1_dic['beta_TGFB_M_A'].append(beta_TGFB_M_A) S1_dic['beta_TGFB_FIBRO'].append(beta_TGFB_FIBRO) S1_dic['mu_MA'].append(mu_MA) S1_dic['phi_MRA'].append(phi_MRA) S1_dic['theta_ACH'].append(theta_ACH) S1_dic['mu_MR'].append(mu_MR) S1_dic['Pmax_MR'].append(Pmax_MR) S1_dic['Pmin_MR'].append(Pmin_MR) S1_dic['Keq_CH'].append(Keq_CH) print(total, count) return pd.DataFrame(S1_dic)
def _parallel_analyze(data): seed = int(opts['seed']) samples = population['problem', 'samples'] problem = population['problem', 'definition'] if opts['method'] == 'sobol': return sobol.analyze(problem, data, calc_second_order=True, print_to_console=False) elif opts['method'] == 'fast': return fast.analyze(problem, data, print_to_console=False, seed=seed) elif opts['method'] == 'rbd-fast': return rbd_fast.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'morris': return morris_analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'delta': return delta.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'dgsm': return dgsm.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'frac': return ff_analyze(problem, samples, data, second_order=True, print_to_console=False, seed=seed) else: return 0
def salib_wrapper(problem, y_val, x, analysis_type='sobol', **kwargs): ''' Backend wrapper for sobol, fast and delta analysis. :meta private: ''' if analysis_type == 'sobol': return sobol.analyze(problem, y_val, **kwargs) elif analysis_type == 'fast': return fast.analyze(problem, y_val, **kwargs) elif analysis_type == 'delta': return delta.analyze(problem, x, y_val, **kwargs) elif analysis_type == 'rbd-fast': return rbd_fast.analyze(problem, x, y_val, **kwargs) else: raise Exception( 'Could not find analyzer. analysis_type must be sobol, fast, rbd-fast or delta.' )
def sensiAnal(model_results, Problem): weather = [] X = [] Y = [] for row in model_results: weather.append(row[0]) X.append(row[1:len(row) - 2]) Y.append(row[len(row) - 1]) climate = [ '1A', '2A', '2B', '3A', '3B', '3C', '4A', '4B', '4C', '5A', '5B', '6A', '6B', '7A', '8A' ] S1 = [] #first-order indices ST = [] #total-order indices Name = [] Clim = [] for x in climate: X_clim = [] Y_clim = [] for ind, val in enumerate(weather): if val == x: X_clim.append(X[ind]) Y_clim.append(Y[ind]) for row in Problem: if row[0] == x: problem = row[1] if len(X_clim) > 0: Si = fast.analyze(problem, np.array(Y_clim), print_to_console=False) s1_clim = Si['S1'] st_clim = Si['ST'] name_clim = problem['names'] S1.append(s1_clim) ST.append(st_clim) Name.append(name_clim) Clim.append(x) return Clim, Name, S1, ST
def sensiAnal(model_results,Problem): weather = [] X = [] Y = [] for row in model_results: weather.append(row[0]) X.append(row[1:len(row)-2]) Y.append(row[len(row)-1]) climate = ['1A','2A','2B','3A','3B','3C','4A','4B','4C','5A','5B','6A','6B','7A','8A'] S1 = []#first-order indices ST = []#total-order indices Name = [] Clim = [] for x in climate: X_clim = [] Y_clim = [] for ind,val in enumerate(weather): if val == x: X_clim.append(X[ind]) Y_clim.append(Y[ind]) for row in Problem: if row[0] == x: problem = row[1] if len(X_clim) > 0: Si = fast.analyze(problem,np.array(Y_clim),print_to_console=False) s1_clim = Si['S1'] st_clim = Si['ST'] name_clim = problem['names'] S1.append(s1_clim) ST.append(st_clim) Name.append(name_clim) Clim.append(x) return Clim,Name,S1,ST
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars': len(model.uncertainties), 'names': model.uncertainties.keys(), 'bounds': [[0.0, 1.0] for u in model.uncertainties], 'groups': kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample( problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample( problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample( problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample( problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:, i] = u.ppf(samples[:, i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult( list(result["names"] if "names" in result else problem["names"])) if "S1" in result: pretty_result["S1"] = { k: float(v) for k, v in zip(problem["names"], result["S1"]) } if "S1_conf" in result: pretty_result["S1_conf"] = { k: float(v) for k, v in zip(problem["names"], result["S1_conf"]) } if "ST" in result: pretty_result["ST"] = { k: float(v) for k, v in zip(problem["names"], result["ST"]) } if "ST_conf" in result: pretty_result["ST_conf"] = { k: float(v) for k, v in zip(problem["names"], result["ST_conf"]) } if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = { k: float(v) for k, v in zip(problem["names"], result["delta"]) } if "delta_conf" in result: pretty_result["delta_conf"] = { k: float(v) for k, v in zip(problem["names"], result["delta_conf"]) } if "vi" in result: pretty_result["vi"] = { k: float(v) for k, v in zip(problem["names"], result["vi"]) } if "vi_std" in result: pretty_result["vi_std"] = { k: float(v) for k, v in zip(problem["names"], result["vi_std"]) } if "dgsm" in result: pretty_result["dgsm"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm"]) } if "dgsm_conf" in result: pretty_result["dgsm_conf"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm_conf"]) } if "mu" in result: pretty_result["mu"] = { k: float(v) for k, v in zip(result["names"], result["mu"]) } if "mu_star" in result: pretty_result["mu_star"] = { k: float(v) for k, v in zip(result["names"], result["mu_star"]) } if "mu_star_conf" in result: pretty_result["mu_star_conf"] = { k: float(v) for k, v in zip(result["names"], result["mu_star_conf"]) } if "sigma" in result: pretty_result["sigma"] = { k: float(v) for k, v in zip(result["names"], result["sigma"]) } return pretty_result
counter = counter + 1 #plt.plot([1, 2, 5, 10], probabilities) #plt.ylabel('Probability') #plt.yscale('log') #plt.legend() #plt.xlabel('Number of Stop Codons') #plt.savefig('prob_per_stop.png') #Sensitivity Analysis - FAST problem = { 'num_vars': 5, 'names': [ 'stRNA_binding', 'rf1_binding', 'stRNA_unbinding', 'rf1_unbinding', 'initial_codons' ], 'bounds': [[stRNA_bindings.min(), stRNA_bindings.max()], [rf1_bindings.min(), rf1_bindings.max()], [stRNA_unbindings.min(), stRNA_unbindings.max()], [rf1_unbindings.min(), rf1_unbindings.max()], [200., 2000.]] } print('FAST') Si = fast.analyze(problem, probabilities, print_to_console=False) print('First order indices: ' + str(Si['S1'])) print('Total order indices: ' + str(Si['ST']))
def perform_analysis(problem, Y_list, method): S1_dic = OrderedDict() S1_dic['beta_CHMA'] = [] S1_dic['beta_CHNA'] = [] S1_dic['theta_ACH'] = [] S1_dic['beta_MANDA'] = [] S1_dic['lamb_ITMNDN'] = [] S1_dic['alpha_ITMNDN'] = [] S1_dic['Pmax_APE'] = [] S1_dic['Pmin_APE'] = [] S1_dic['rdistress'] = [] S1_dic['w_gauss_min'] = [] S1_dic['rinduce_peak'] = [] S1_dic['rinduce'] = [] S1_dic['r_AP'] = [] S1_dic['r_ITM'] = [] S1_dic['r_ITMpeak'] = [] S1_dic['r_NDN'] = [] S1_dic['lamb_MANDN'] = [] S1_dic['lamb_MANDA'] = [] S1_dic['mu_NDA'] = [] S1_dic['Keq_CH'] = [] S1_dic['r_Nhomeo'] = [] S1_dic['Pmax_NR'] = [] Y_list = np.array(Y_list) print(Y_list.shape) Y_list_trans = Y_list.transpose() total = len(Y_list_trans) count = 0 for Y in Y_list_trans: count += 1 if method == 'FAST': Si = fast.analyze(problem, Y, print_to_console=True) elif method == 'Saltelli': Si = sobol.analyze(problem, Y) beta_CHMA, beta_CHNA, theta_ACH, beta_MANDA, lamb_ITMNDN, alpha_ITMNDN, Pmax_APE, Pmin_APE, rdistress, \ w_gauss_min, rinduce_peak, rinduce, r_AP, r_ITM, r_ITMpeak, r_NDN, lamb_MANDN, lamb_MANDA, mu_NDA, \ Keq_CH, r_Nhomeo, Pmax_NR = Si['S1'] #Si['ST']# = Si['S1'] S1_dic['beta_CHMA'].append(beta_CHMA) S1_dic['beta_CHNA'].append(beta_CHNA) S1_dic['theta_ACH'].append(theta_ACH) S1_dic['beta_MANDA'].append(beta_MANDA) S1_dic['w_gauss_min'].append(w_gauss_min) S1_dic['lamb_ITMNDN'].append(lamb_ITMNDN) S1_dic['alpha_ITMNDN'].append(alpha_ITMNDN) S1_dic['Pmax_APE'].append(Pmax_APE) S1_dic['Pmin_APE'].append(Pmin_APE) S1_dic['rdistress'].append(rdistress) S1_dic['rinduce_peak'].append(rinduce_peak) S1_dic['rinduce'].append(rinduce) S1_dic['r_ITM'].append(r_ITM) S1_dic['r_ITMpeak'].append(r_ITMpeak) S1_dic['r_NDN'].append(r_NDN) S1_dic['r_AP'].append(r_AP) S1_dic['lamb_MANDN'].append(lamb_MANDN) S1_dic['lamb_MANDA'].append(lamb_MANDA) S1_dic['mu_NDA'].append(mu_NDA) S1_dic['Keq_CH'].append(Keq_CH) S1_dic['r_Nhomeo'].append(r_Nhomeo) S1_dic['Pmax_NR'].append(Pmax_NR) print(total, count) return pd.DataFrame(S1_dic)
param_values = saltelli.sample(problem, num_steps) Points = np.zeros([param_values.shape[0]]) for i in range(len(param_values)): x = param_values[i] x_dot, y_dot, z_dot = lorenz(x[0], x[1], x[2], x[3]) a = np.array((x_dot, y_dot, z_dot)) Points[i] = distance(a, start_point) # print(Points.shape) sensitivity = sobol.analyze(problem, Points) print("Sobol Sensitivity S1:") print(sensitivity["S1"]) print("Sobol Sensitivity ST:") print(sensitivity["ST"]) # Fast analysis fast_param_values = fast_sampler.sample(problem, num_steps) FastPoints = np.zeros([fast_param_values.shape[0]]) for i in range(len(fast_param_values)): x = fast_param_values[i] x_dot, y_dot, z_dot = lorenz(x[0], x[1], x[2], x[3]) a = np.array((x_dot, y_dot, z_dot)) FastPoints[i] = distance(a, start_point) fast_sensitivity = fast.analyze(problem, FastPoints, print_to_console=True)
Si_con = dgsm.analyze(problem, param_values, Y_con, conf_level=0.95, print_to_console=False) f1, (ax1, ax2) = plt.subplots(2, 1, sharex=True) SS1 = (Si_con['dgsm'][1:]) / Si['dgsm'][1:] SS2 = (Si_con['vi'][1:]) / Si['vi'][1:] sns.barplot(np.arange(2, 22), np.abs(SS1), ax=ax1) sns.barplot(np.arange(2, 22), np.abs(SS2), ax=ax2) ax1.set_title('Sdgsm') ax2.set_title('Svi') ax2.set_xlabel('Sensitivity') elif method_flag == 4: Si = fast.analyze(problem, Y, print_to_console=False) figure_keys = { 'ax1_title': 'S1', 'ax2_title': 'ST', 'ax2_lable': 'Parameter index', } Si_con = fast.analyze(problem, Y_con, print_to_console=False) f1, (ax1, ax2) = plt.subplots(2, 1, sharex=True) SS1 = (np.array(Si_con['S1'][1:])) / np.array(Si['S1'][1:]) SS2 = (np.array(Si_con['ST'][1:])) / np.array(Si['ST'][1:]) sns.barplot(np.arange(2, 22), np.abs(SS1), ax=ax1) sns.barplot(np.arange(2, 22), np.abs(SS2), ax=ax2) ax1.set_title('SS1') ax2.set_title('SST') ax2.set_xlabel('Sensitivity')
import sys sys.path.append('../..') from SALib.sample import fast_sampler from SALib.analyze import fast from SALib.test_functions import Ishigami import numpy as np # Read the parameter range file and generate samples param_file = '../../SALib/test_functions/params/Ishigami.txt' # Generate samples param_values = fast_sampler.sample(1000, param_file) # Run the "model" and save the output in a text file # This will happen offline for external models Y = Ishigami.evaluate(param_values) np.savetxt("model_output.txt", Y, delimiter=' ') # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = fast.analyze( param_file, 'model_output.txt', column=0, print_to_console=False) # Returns a dictionary with keys 'S1' and 'ST' # e.g. Si['S1'] contains the first-order index for each parameter, in the # same order as the parameter file
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars' : len(model.uncertainties), 'names' : model.uncertainties.keys(), 'bounds' : [[0.0, 1.0] for u in model.uncertainties], 'groups' : kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:,i] = u.ppf(samples[:,i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult(result["names"] if "names" in result else problem["names"]) if "S1" in result: pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])} if "S1_conf" in result: pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])} if "ST" in result: pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])} if "ST_conf" in result: pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])} if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])} if "delta_conf" in result: pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])} if "vi" in result: pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])} if "vi_std" in result: pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])} if "dgsm" in result: pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])} if "dgsm_conf" in result: pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])} if "mu" in result: pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])} if "mu_star" in result: pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])} if "mu_star_conf" in result: pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])} if "sigma" in result: pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])} return pretty_result
def analyze_fast(self): # FAST - Fourier Amplitude Sensitivity Test return fast.analyze(self.sa_problem, self.samples_y, print_to_console=self.options["print_to_console"])
import sys sys.path.append('../..') from SALib.sample import fast_sampler from SALib.analyze import fast from SALib.test_functions import Ishigami import numpy as np # Read the parameter range file and generate samples param_file = '../../SALib/test_functions/params/Ishigami.txt' # Generate samples param_values = fast_sampler.sample(1000, param_file) # Run the "model" and save the output in a text file # This will happen offline for external models Y = Ishigami.evaluate(param_values) np.savetxt("model_output.txt", Y, delimiter=' ') # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = fast.analyze(param_file, 'model_output.txt', column=0, print_to_console=False) #Returns a dictionary with keys 'S1' and 'ST' # e.g. Si['S1'] contains the first-order index for each parameter, in the same order as the parameter file
# Write the output as a dataframe and into a file saOutputDF = saOutput.to_df() fnOutletSAOutput_Mu = os.path.join(fdSA, "oltSA_{}_{}.txt".format(outLetNo, sampleMethod)) saOutputDF.to_csv(fnOutletSAOutput_Mu) elif ctrlSetting["saMethod"] == 3: # For FAST method # SALib.analyze.fast.analyze(problem, # Y, M=4, num_resamples=100, # conf_level=0.95, print_to_console=False, seed=None) # Returns a dictionary with keys ‘S1’ and ‘ST’, # where each entry is a list of size D (the number of parameters) # containing the indices in the same order as the parameter file. saOutput = fast.analyze(parmForSA, outLetAvgAnnList[outLetNo], M=4, num_resamples=100, conf_level=0.95, print_to_console = False) # Write the output as a dataframe and into a file saOutputDF = saOutput.to_df() fnOutletSAOutput_Fast = os.path.join(fdSA, "oltSATotal_{}_{}.txt".format(outLetNo, sampleMethod)) saOutputDF.to_csv(fnOutletSAOutput_Fast) calStatTime = datetime.datetime.now(timeZone) print("Time for calculating statistics: {}; Total Time: {}". format( calStatTime - runEndTime, calStatTime - startTime)) print("=================================================================") # End of for loop for total runs
elif method.lower() in ['sobol']: param_values = saltelli.sample(problem, N_samples) #For Sobol Method print("Sobol") #Inputs are prob,N_Yrs,Tropp,PT_d,PT_f,C_Vant,PTopp,inf,v,R,A_m,L_m,Mpv,g,MW_NaCl,csv_name #Tropp = 1 = transmission, 0 = no transmission #PTopp: 0 - no pretreatment, 1 - Draw Pretreatment, 2 = Feed pretreatment, 3 = feed and draw pretreatment Test = pse.comboUSA(param_values, N_Yrs, 0, Pt['MF'], Pt['MF'], C_Vant, 2, inf, v, R, M_geometry, g, MW_NaCl, csv_title_output) #%%#Run Analysis############################################################### Y = Test[ 'PV_net($)'].values #Currently, Y is set for Net Present Value. Can be changed as needed if method.lower() in ['fast']: Si = fast.analyze(problem, Y) #For Fourier Amplitude Sensitivity Test elif method.lower() in ['rbd']: Si = rbd_fast.analyze(problem, Y, param_values) #For RBD-FAST Method elif method.lower() in ['sobol']: Si = sobol.analyze(problem, Y) #For Sobol Method #Type: input the analysis type, e.g. "sobol" #Si = pse.SA_indices(problem,Si,Y,method,csv_title_SI) csv_title_SI_UC = "".join( (csv_title_SI, '_PV_net')) #Configured For net present value - change as needed Si_Var = pse.SA_indices(problem, Si, 'PV_net($)', method, csv_title_SI_UC, Notes, N_samples) #Placed at end so that files not unnecessarily generated for failed tests sys_in = pse.SAin(problem, param_values, csv_title_input)
import sys sys.path.append('../..') from SALib.analyze import fast from SALib.sample import fast_sampler from SALib.test_functions import Ishigami from SALib.util import read_param_file # Read the parameter range file and generate samples problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt') # Generate samples param_values = fast_sampler.sample(problem, 1000) # Run the "model" and save the output in a text file # This will happen offline for external models Y = Ishigami.evaluate(param_values) # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = fast.analyze(problem, Y, print_to_console=False) # Returns a dictionary with keys 'S1' and 'ST' # e.g. Si['S1'] contains the first-order index for each parameter, in the # same order as the parameter file
def fast_analyze( parameters: MutableMapping[str, Distribution], model_output: Dict[int, Dict[str, RecordTransmitter]], harmonics: Optional[int], ) -> Dict[int, Dict[str, Any]]: """ Perform a sensitivity analysis of parameters for a given model output. The result of the sensitivity analysis is presented as a dictionary with S1, ST, S1_conf, ST_conf and names (described in https://salib.readthedocs.io/en/latest/api.html) for each evaluation performed with a sample, i.e. a polynomial is evaluated with a set of sample coefficients for 10 values of ``x`` ie. ``{0: {'S1': [0.3075(x), 0.4424(y), 4.531e-27(z)], ..., 'names': ['x', 'y', 'z']}, 1: ...}`` """ # pylint: disable=too-many-branches if len(parameters) == 0: raise ValueError("Cannot study the sensitivity of no variables") records = [] for transmitter_map in model_output.values(): if len(transmitter_map) > 1: raise ValueError( "Cannot analyze sensitivity with multiple outputs") if len(transmitter_map) < 1: raise ValueError("Cannot analyze sensitivity with no output") for transmitter in transmitter_map.values(): records.append(get_event_loop().run_until_complete( transmitter.load())) ensemble_size = len(model_output) if harmonics is None: harmonics = 4 param_size = sum(dist.size for dist in parameters.values()) if ensemble_size % param_size == 0: sample_size = int(ensemble_size / param_size) else: raise ValueError("The size of the model output must be " "a multiple of the number of parameters") assert (len(set(record.record_type for record in records)) == 1 ), "Bug: Requires homogeneous model output records" assert records[0].record_type in ( RecordType.LIST_FLOAT, RecordType.SCALAR_FLOAT, ), "Bug: Model output must be scalar or lists" if records[0].record_type in (RecordType.LIST_FLOAT): record_size = len(records[0].data) # type: ignore data = np.zeros([sample_size * param_size, record_size]) for i, record in enumerate(records): for j in range(record_size): data[i][j] = record.data[j] # type: ignore elif records[0].record_type in (RecordType.SCALAR_FLOAT): record_size = 1 data = np.zeros([sample_size * param_size, record_size]) for i, record in enumerate(records): data[i][0] = record.data problem = _build_salib_problem(parameters) analysis = {} for j in range(record_size): analysis[j] = fast.analyze(problem, data[:, j], M=harmonics) return analysis
for j in range(len(fast_param_values)): x = fast_param_values[j] xs = np.empty(num_steps + 1) ys = np.empty(num_steps + 1) zs = np.empty(num_steps + 1) xs[0], ys[0], zs[0] = (x[0], x[1], x[2]) for i in range(num_steps): x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i], x[3]) xs[i + 1] = xs[i] + (x_dot * dt) ys[i + 1] = ys[i] + (y_dot * dt) zs[i + 1] = zs[i] + (z_dot * dt) a = distance(np.array((x_dot, y_dot, z_dot)), Points[i]) FastPoints.append(a) fast_sensitivity = fast.analyze(problem, np.asarray(FastPoints)) print("Fast Sensitivity Analysis: ") print(fast_sensitivity) # for j in range(morris_param_values): # x = morris_param_values[j] # xs = np.empty(num_steps + 1) # ys = np.empty(num_steps + 1) # zs = np.empty(num_steps + 1) # xs[0], ys[0], zs[0] = (x[0], x[1], x[2]) # for i in range(num_steps): # x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i], x[3]) # xs[i + 1] = xs[i] + (x_dot * dt) # ys[i + 1] = ys[i] + (y_dot * dt) # zs[i + 1] = zs[i] + (z_dot * dt) # a = distance(np.array((x_dot, y_dot, z_dot)), Points[i])
def cycle_3_sensitivity(parameters, exponential_level, parameter_base_values, cycles=None, verbose=False): if cycles is None: cycles = [3] bounds = [[-exponential_level, exponential_level]] * len(parameters) problem = { 'num_vars': len(parameters), 'names': parameters, 'bounds': bounds } N = 1000 param_exponents = fast_sampler.sample(problem, N) t = np.linspace(0, 180, 1801) Y = np.zeros([param_exponents.shape[0], t.shape[0]]) if 3 in cycles: for i, X in enumerate(param_exponents): if verbose: print('Solving Equation', i + 1, '/', len(param_exponents)) param_values = np.asarray(parameter_base_values) * np.power(10, X) Y[i] = evaluate_model(param_values, t, 3) sensitivities1_3cycles = [None] * len(t) sensitivitiesT_3cycles = [None] * len(t) for i in range(0, len(t)): if verbose: print('Analysing Sensitivities at time', t[i], '/', t[-1]) Si = fast.analyze(problem, Y[:, i]) sensitivities1_3cycles[i] = Si['S1'] sensitivitiesT_3cycles[i] = Si['ST'] for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_3cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity') plt.savefig('images/Sensitivity1_3enzymes.pdf') plt.show() print('First Sensitivity peaks for 3 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivities1_3cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivities1_3cycles)[peak, i]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivitiesT_3cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity') plt.savefig('images/SensitivityT_3enzymes.pdf') plt.show() print('Total Sensitivity peaks for 3 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivitiesT_3cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivitiesT_3cycles)[peak, i]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_3cycles)[:, i] / max(np.asarray(sensitivities1_3cycles)[1:, i]), label=parameters[i]) plt.xlabel('t') plt.ylabel('sensitivity') plt.yticks([]) plt.savefig('images/SensitivityScaled1_3enzymes.pdf') plt.show() if 4 in cycles: for i, X in enumerate(param_exponents): if verbose: print('Solving Equation', i + 1, '/', len(param_exponents)) param_values = np.asarray(parameter_base_values) * np.power(10, X) Y[i] = evaluate_model(param_values, t, 4) sensitivities1_4cycles = [None] * len(t) sensitivitiesT_4cycles = [None] * len(t) for i in range(0, len(t)): # print('Analysing Sensitivities at time', t[i], '/', t[-1]) Si = fast.analyze(problem, Y[:, i]) sensitivities1_4cycles[i] = Si['S1'] sensitivitiesT_4cycles[i] = Si['ST'] for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_4cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity') plt.savefig('images/Sensitivity1_4enzymes.pdf') plt.show() print('First Sensitivity peaks for 4 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivities1_4cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivities1_4cycles)[peak, i]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivitiesT_4cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity 4 cycle') plt.savefig('images/SensitivityT_4enzymes.pdf') plt.show() print('Total Sensitivity peaks for 4 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivitiesT_4cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivitiesT_4cycles)[peak, i]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_4cycles)[:, i] / max(np.asarray(sensitivities1_4cycles)[1:, i]), label=parameters[i]) plt.xlabel('t') plt.ylabel('sensitivity') plt.yticks([]) plt.savefig('images/SensitivityScaled1_4enzymes.pdf') plt.show() if 2 in cycles: t = np.linspace(0, 180, 1801) Y = np.zeros([param_exponents.shape[0], t.shape[0]]) for i, X in enumerate(param_exponents): if verbose: print('Solving Equation', i + 1, '/', len(param_exponents)) param_values = np.asarray(parameter_base_values) * np.power(10, X) Y[i] = evaluate_model(param_values, t, 2) sensitivities1_2cycles = [None] * len(t) sensitivitiesT_2cycles = [None] * len(t) for i in range(0, len(t)): # print('Analysing Sensitivities at time', t[i], '/', t[-1]) Si = fast.analyze(problem, Y[:, i]) sensitivities1_2cycles[i] = Si['S1'] sensitivitiesT_2cycles[i] = Si['ST'] for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_2cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity') plt.savefig('images/Sensitivity1_2enzymes.pdf') plt.show() print('First Sensitivity peaks for 2 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivities1_2cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivities1_2cycles)[peak, i]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivitiesT_2cycles)[:, i], label=parameters[i]) plt.xlabel('t') plt.ylabel('Sensitivity') plt.savefig('images/SensitivityT_2enzymes.pdf') plt.show() print('Total Sensitivity peaks for 2 cycles:') for i in range(0, len(parameters)): print('\t', parameters[i], 'peaks:') peak_list, _ = find_peaks( np.asarray(sensitivitiesT_2cycles)[100:, i]) peak_list = np.sort(peak_list) for peak in peak_list: print('\t\t at t =', t[peak], 'for a sensitivity of', np.asarray(sensitivitiesT_2cycles)[peak, i]) print(np.asarray(sensitivities1_2cycles)[:, 0]) for i in range(0, len(parameters)): plt.plot(t[:], np.asarray(sensitivities1_2cycles)[:, i] / max(np.asarray(sensitivities1_2cycles)[1:, i]), label=parameters[i]) plt.legend(loc='center right') plt.xlabel('t') plt.ylabel('sensitivity') plt.yticks([]) plt.savefig('images/SensitivityScaled1_2enzymes.pdf') plt.show()
def run(self, input_ids=None, output_ids=None, method=None, calc_second_order=None, conf_level=None, **kwargs): self._update_parameters(method, calc_second_order, conf_level) self.other_parameters = kwargs if input_ids is None: input_ids = range(self.n_inputs) self.problem = { "num_vars": len(input_ids), "names": np.array(self.input_names)[input_ids].tolist(), "bounds": np.array(self.input_bounds)[input_ids].tolist() } if output_ids is None: output_ids = range(self.n_outputs) n_outputs = len(output_ids) if self.method.lower() == "sobol": self.logger.warning( "'sobol' method requires 'saltelli' sampling scheme!") # Additional keyword parameters and their defaults: # calc_second_order (bool): Calculate second-order sensitivities (default True) # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # parallel: False, # n_processors: None self.analyzer = lambda output: sobol.analyze( self.problem, output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), parallel=self.other_parameters.get("parallel", False), n_processors=self.other_parameters.get("n_processors", None), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["latin", "delta"]): self.logger.warning( "'latin' sampling scheme is recommended for 'delta' method!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: delta.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["fast", "fast_sampler"]): self.logger.warning( "'fast' method requires 'fast_sampler' sampling scheme!") # Additional keyword parameters and their defaults: # M (int): The interference parameter, # i.e., the number of harmonics to sum in the Fourier series decomposition (default 4) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: fast.analyze( self.problem, output, M=self.other_parameters.get("M", 4), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["ff", "fractional_factorial"]): # Additional keyword parameters and their defaults: # second_order (bool, default=False): Include interaction effects # print_to_console (bool, default=False): Print results directly to console self.logger.warning( "'fractional_factorial' method requires 'fractional_factorial' sampling scheme!" ) self.analyzer = lambda output: ff.analyze( self.problem, self.input_samples[:, input_ids], output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower().lower() == "morris": self.logger.warning( "'morris' method requires 'morris' sampling scheme!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # grid_jump (int): The grid jump size, must be identical to the value passed to # SALib.sample.morris.sample() (default 2) # num_levels (int): The number of grid levels, must be identical to the value passed to # SALib.sample.morris (default 4) self.analyzer = lambda output: morris.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, grid_jump=self.other_parameters.get("grid_jump", 2), num_levels=self.other_parameters.get("num_levels", 4), num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower() == "dgsm": # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: dgsm.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) else: raise_value_error("Method " + str(self.method) + " is not one of the available methods " + str(METHODS) + " !") output_names = [] results = [] for io in output_ids: output_names.append(self.output_names[io]) results.append(self.analyzer(self.output_values[:, io])) # TODO: Adjust list_of_dicts_to_dicts_of_ndarrays to handle ndarray concatenation results = list_of_dicts_to_dicts_of_ndarrays(results) results.update({"output_names": output_names}) return results
# load problem definition with open(f"{txtinout}/sens_def.stb", "rb") as f: sens_def = pickle.load(f) # Perform analysis if sensitivity_method == "RBD_FAST": with open(f"{txtinout}/sample_def.stb", "rb") as f: sample = pickle.load(f) Si = rbd_fast.analyze(sens_def, sample, par_performance, print_to_console=True) if sensitivity_method == "DMIM": with open(f"{txtinout}/sample_def.stb", "rb") as f: sample = pickle.load(f) Si = delta.analyze(sens_def, sample, par_performance, print_to_console=True) if sensitivity_method == "FAST": Si = fast.analyze(sens_def, par_performance, print_to_console=True) if sensitivity_method == "sobol": Si = sobol.analyze(sens_def, par_performance, print_to_console=False) # Save the first-order sensitivity indices numpy.savetxt(f"{txtinout}/s1_sensitivity.stb", Si['S1'], delimiter=",", newline="\n") # numpy.savetxt(f"{txtinout}/s2_sensitivity.stb", Si['S2'], delimiter=",", newline="\n")
problem = { 'num_vars': 12, 'names': ['b3', 'D', 'F', 'f', 'n', 'P', 'p', 'R3', 'V', 'Ad', 'k', 'N'], 'bounds': [[6000, 10000], [540, 900], [18.75, 31.25], [2.25, 3.75], [15, 25], [810, 1350], [270, 450], [93750000, 156250000], [1.69925485921, 2.83209143201], [0.15, 0.25], [0.000375, 0.000625], [375, 625]] } param_values = fast_sampler.sample(problem, 100) Y = np.zeros(param_values.shape[0]) for i, vals in enumerate(param_values): X = Simulation(contactRadius=vals[0]**(1 / 3), numDCells=int(vals[1]), freePathMean=vals[2], freePathStDev=vals[3], tCellActivationThreshold=int(vals[4]), firstDCArrival=vals[5], DCArrivalDuration=vals[6], radius=vals[7]**(1 / 3), tGammaShape=vals[8], cogAgInDermis=vals[9], antigenDecayRate=vals[10], numAntigenInContactArea=int(vals[11])) Y[i] = X.simulate() Si = fast.analyze(problem, Y) log = open("SensitivityAnalysis.log", "a") sys.stdout = log print(Si)