def test_dgsm_to_df(): params = ['x1', 'x2', 'x3'] problem = { 'num_vars': 3, 'names': params, 'groups': None, 'bounds': [[-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359]] } param_values = finite_diff.sample(problem, 1000, delta=0.001) Y = Ishigami.evaluate(param_values) Si = dgsm.analyze(problem, param_values, Y, print_to_console=False) Si_df = Si.to_df() assert isinstance(Si_df, pd.DataFrame), \ "DGSM Si: Expected DataFrame, got {}".format(type(Si_df)) assert set(Si_df.index) == set(params), "Incorrect index in DataFrame" col_names = ['vi', 'vi_std', 'dgsm', 'dgsm_conf'] assert set(Si_df.columns) == set(col_names), \ "Unexpected column names in DataFrame"
def test_regression_dgsm(): param_file = 'src/SALib/test_functions/params/Ishigami.txt' problem = read_param_file(param_file) param_values = finite_diff.sample(problem, 10000, delta=0.001) Y = Ishigami.evaluate(param_values) Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False) assert_allclose(Si['dgsm'], [2.229, 7.066, 3.180], atol=5e-2, rtol=1e-1)
def test_Dgsm_with_NAN(): ''' Test if dgsm.analyze raise a ValueError when nan are passed in the Y values ''' problem, model_results, param_values = setup_samples() # Should raise a ValueError type of error with pytest.raises(ValueError): dgsm.analyze(problem, param_values, model_results) # if __name__ == '__main__': # test_Sobol_with_NAN() # test_Sobol_with_no_NAN_flag() # test_Sobol_with_no_NAN_flag_as_default() # test_Delta_with_NAN() # test_rbd_fast_with_NAN() # test_Morris_with_NAN() # test_FF_with_NAN() # test_Fast_with_NAN() # test_Dgsm_with_NAN()
def _parallel_analyze(index, method, problem, samples, data, seed): if method == 'sobol': result = sobol.analyze(problem, data, calc_second_order=True, print_to_console=False, seed=seed) elif method == 'fast': result = fast.analyze(problem, data, print_to_console=False, seed=seed) elif method == 'rbd-fast': result = rbd_fast.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'morris': result = morris_analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'delta': result = delta.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'dgsm': result = dgsm.analyze(problem, samples, data, print_to_console=False, seed=seed) elif method == 'frac': result = ff_analyze(problem, samples, data, second_order=True, print_to_console=False, seed=seed) else: return 0 for key in result.keys(): result[key] = result[key].tolist() with open('{:s}.json'.format(index), 'w') as outfile: json.dump(result, outfile) return 0
def analyze(self, model, num_samples=10000): param_values = self.sample(num_samples=num_samples) Y = model.evaluate(param_values) Sis = list([ dgsm.analyze(self.problem, param_values, Y[:, i], print_to_console=False) for i in range(Y.shape[1]) ]) S1s = [s['dgsm'] for s in Sis] result_dict = {'S1': dict(zip(self.output_names, S1s))} return result_dict
def _analizar(símismo, vec_res, muestra, ops): if símismo.método == 'sobol': return sobol.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'fast': return fast.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'morris': return morris_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dmim': return delta.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dgsm': return dgsm.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'ff': return ff_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) else: raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(símismo.método))
def _parallel_analyze(data): seed = int(opts['seed']) samples = population['problem', 'samples'] problem = population['problem', 'definition'] if opts['method'] == 'sobol': return sobol.analyze(problem, data, calc_second_order=True, print_to_console=False) elif opts['method'] == 'fast': return fast.analyze(problem, data, print_to_console=False, seed=seed) elif opts['method'] == 'rbd-fast': return rbd_fast.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'morris': return morris_analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'delta': return delta.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'dgsm': return dgsm.analyze(problem, samples, data, print_to_console=False, seed=seed) elif opts['method'] == 'frac': return ff_analyze(problem, samples, data, second_order=True, print_to_console=False, seed=seed) else: return 0
def analyze(self): """Initiate the analysis, and stores the result at data directory. Generates: Analysis result at 'acbm/data/output/dgsm.txt'. """ X = finite_diff.sample(self.problem, self.n_samples, delta=self.delta, seed=self.seed_sample) Y = ACBM.evaluate(X) si = dgsm.analyze(self.problem, X, Y, seed=self.seed_analyze) # scale down the values of vi si['vi'] = [x**(1 / 16) for x in si['vi']] pickle.dump(si, open(self.path_output + 'dgsm.txt', 'wb'))
outPMV[i] = gpPMV.predict(X.reshape(1, -1)) out_calc = time.time() - t0 print('=== outputs generated in %d seconds ===' % out_calc) np.save('outFEE%d'%N, outFEE) np.save('outPMV%d'%N, outPMV) '''Perform analysis''' try: # test = np.load('NA') SiFEE = np.load('SiFEE%d.npy' % N) SiPMV = np.load('SiPMV%d.npy' % N) print('=== saved results loaded ===') except FileNotFoundError: t0 = time.time() SiFEE = dgsm.analyze(problem, param_values, outFEE, print_to_console=False) SiPMV = dgsm.analyze(problem, param_values, outPMV, print_to_console=False) analyze = time.time() - t0 print('=== sensitivity analyzed in %d seconds ===' % analyze) np.save('SiFEE%d'%N, SiFEE) np.save('SiPMV%d'%N, SiPMV) print(SiPMV.item().get('vi')) colors = sns.hls_palette(10, l=.55, s=.6) externality_colors = ["#be0119", "#7a6a4f", "#94ac02", "#0e87cc", "#887191"] # sns.palplot(externality_colors) plt.rcParams['font.serif'] = 'DejaVu Serif' plt.rcParams['figure.figsize'] = 10, 6.5 plt.rcParams['figure.constrained_layout.use'] = True
from SALib.analyze import dgsm from SALib.sample import finite_diff from SALib.test_functions import Ishigami from SALib.util import read_param_file sys.path.append('../..') # Read the parameter range file and generate samples problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt') # Generate samples param_values = finite_diff.sample(problem, 1000, delta=0.001) # Run the "model" -- this will happen offline for external models Y = Ishigami.evaluate(param_values) # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=True) # Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf' # e.g. Si['vi'] contains the sensitivity measure for each parameter, in # the same order as the parameter file # For comparison, Morris mu* < sqrt(v_i) # and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars': len(model.uncertainties), 'names': model.uncertainties.keys(), 'bounds': [[0.0, 1.0] for u in model.uncertainties], 'groups': kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample( problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample( problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample( problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample( problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:, i] = u.ppf(samples[:, i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult( list(result["names"] if "names" in result else problem["names"])) if "S1" in result: pretty_result["S1"] = { k: float(v) for k, v in zip(problem["names"], result["S1"]) } if "S1_conf" in result: pretty_result["S1_conf"] = { k: float(v) for k, v in zip(problem["names"], result["S1_conf"]) } if "ST" in result: pretty_result["ST"] = { k: float(v) for k, v in zip(problem["names"], result["ST"]) } if "ST_conf" in result: pretty_result["ST_conf"] = { k: float(v) for k, v in zip(problem["names"], result["ST_conf"]) } if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = { k: float(v) for k, v in zip(problem["names"], result["delta"]) } if "delta_conf" in result: pretty_result["delta_conf"] = { k: float(v) for k, v in zip(problem["names"], result["delta_conf"]) } if "vi" in result: pretty_result["vi"] = { k: float(v) for k, v in zip(problem["names"], result["vi"]) } if "vi_std" in result: pretty_result["vi_std"] = { k: float(v) for k, v in zip(problem["names"], result["vi_std"]) } if "dgsm" in result: pretty_result["dgsm"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm"]) } if "dgsm_conf" in result: pretty_result["dgsm_conf"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm_conf"]) } if "mu" in result: pretty_result["mu"] = { k: float(v) for k, v in zip(result["names"], result["mu"]) } if "mu_star" in result: pretty_result["mu_star"] = { k: float(v) for k, v in zip(result["names"], result["mu_star"]) } if "mu_star_conf" in result: pretty_result["mu_star_conf"] = { k: float(v) for k, v in zip(result["names"], result["mu_star_conf"]) } if "sigma" in result: pretty_result["sigma"] = { k: float(v) for k, v in zip(result["names"], result["sigma"]) } return pretty_result
num_resamples=10, conf_level=0.95, print_to_console=False) f1, (ax1, ax2) = plt.subplots(2, 1, sharex=True) SS1 = (Si_con['S1'][1:]) / Si['S1'][1:] SS2 = (Si_con['delta'][1:]) / Si['delta'][1:] sns.barplot(np.arange(2, 22), np.abs(SS1), ax=ax1) sns.barplot(np.arange(2, 22), np.abs(SS2), ax=ax2) ax1.set_title('SS1') ax2.set_title('SDelta') ax2.set_xlabel('Sensitivity') elif method_flag == 3: Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False) figure_keys = { 'ax1_title': 'dgsm', 'ax2_title': 'dgsm_conf', 'ax2_lable': 'Parameter index', 'ax3_title': 'vi', 'ax4_title': 'vi_std', 'ax4_lable': 'Parameter index', } Si_con = dgsm.analyze(problem, param_values, Y_con, conf_level=0.95, print_to_console=False)
import sys from SALib.analyze import dgsm from SALib.sample import finite_diff from SALib.test_functions import Ishigami from SALib.util import read_param_file sys.path.append('../..') # Read the parameter range file and generate samples problem = read_param_file('../../SALib/test_functions/params/Ishigami.txt') # Generate samples param_values = finite_diff.sample(problem, 1000, delta=0.001) # Run the "model" -- this will happen offline for external models Y = Ishigami.evaluate(param_values) # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False) # Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf' # e.g. Si['vi'] contains the sensitivity measure for each parameter, in # the same order as the parameter file # For comparison, Morris mu* < sqrt(v_i) # and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars' : len(model.uncertainties), 'names' : model.uncertainties.keys(), 'bounds' : [[0.0, 1.0] for u in model.uncertainties], 'groups' : kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:,i] = u.ppf(samples[:,i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult(result["names"] if "names" in result else problem["names"]) if "S1" in result: pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])} if "S1_conf" in result: pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])} if "ST" in result: pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])} if "ST_conf" in result: pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])} if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])} if "delta_conf" in result: pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])} if "vi" in result: pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])} if "vi_std" in result: pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])} if "dgsm" in result: pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])} if "dgsm_conf" in result: pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])} if "mu" in result: pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])} if "mu_star" in result: pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])} if "mu_star_conf" in result: pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])} if "sigma" in result: pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])} return pretty_result
def run(self, input_ids=None, output_ids=None, method=None, calc_second_order=None, conf_level=None, **kwargs): self._update_parameters(method, calc_second_order, conf_level) self.other_parameters = kwargs if input_ids is None: input_ids = range(self.n_inputs) self.problem = { "num_vars": len(input_ids), "names": np.array(self.input_names)[input_ids].tolist(), "bounds": np.array(self.input_bounds)[input_ids].tolist() } if output_ids is None: output_ids = range(self.n_outputs) n_outputs = len(output_ids) if self.method.lower() == "sobol": self.logger.warning( "'sobol' method requires 'saltelli' sampling scheme!") # Additional keyword parameters and their defaults: # calc_second_order (bool): Calculate second-order sensitivities (default True) # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # parallel: False, # n_processors: None self.analyzer = lambda output: sobol.analyze( self.problem, output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), parallel=self.other_parameters.get("parallel", False), n_processors=self.other_parameters.get("n_processors", None), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["latin", "delta"]): self.logger.warning( "'latin' sampling scheme is recommended for 'delta' method!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: delta.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["fast", "fast_sampler"]): self.logger.warning( "'fast' method requires 'fast_sampler' sampling scheme!") # Additional keyword parameters and their defaults: # M (int): The interference parameter, # i.e., the number of harmonics to sum in the Fourier series decomposition (default 4) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: fast.analyze( self.problem, output, M=self.other_parameters.get("M", 4), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["ff", "fractional_factorial"]): # Additional keyword parameters and their defaults: # second_order (bool, default=False): Include interaction effects # print_to_console (bool, default=False): Print results directly to console self.logger.warning( "'fractional_factorial' method requires 'fractional_factorial' sampling scheme!" ) self.analyzer = lambda output: ff.analyze( self.problem, self.input_samples[:, input_ids], output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower().lower() == "morris": self.logger.warning( "'morris' method requires 'morris' sampling scheme!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # grid_jump (int): The grid jump size, must be identical to the value passed to # SALib.sample.morris.sample() (default 2) # num_levels (int): The number of grid levels, must be identical to the value passed to # SALib.sample.morris (default 4) self.analyzer = lambda output: morris.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, grid_jump=self.other_parameters.get("grid_jump", 2), num_levels=self.other_parameters.get("num_levels", 4), num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower() == "dgsm": # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: dgsm.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) else: raise_value_error("Method " + str(self.method) + " is not one of the available methods " + str(METHODS) + " !") output_names = [] results = [] for io in output_ids: output_names.append(self.output_names[io]) results.append(self.analyzer(self.output_values[:, io])) # TODO: Adjust list_of_dicts_to_dicts_of_ndarrays to handle ndarray concatenation results = list_of_dicts_to_dicts_of_ndarrays(results) results.update({"output_names": output_names}) return results
import numpy as np # Read the parameter range file and generate samples param_file = '../../SALib/test_functions/params/Ishigami.txt' # Generate samples param_values = finite_diff.sample(1000, param_file, delta=0.001) # Save the parameter values in a file (they are needed in the analysis) np.savetxt('model_input.txt', param_values, delimiter=' ') # Run the "model" and save the output in a text file # This will happen offline for external models Y = Ishigami.evaluate(param_values) np.savetxt('model_output.txt', Y, delimiter=' ') # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = dgsm.analyze(param_file, 'model_input.txt', 'model_output.txt', column=0, conf_level=0.95, print_to_console=False) # Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf' # e.g. Si['vi'] contains the sensitivity measure for each parameter, in # the same order as the parameter file # For comparison, Morris mu* < sqrt(v_i) # and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
from SALib.sample import finite_diff from SALib.analyze import dgsm from SALib.test_functions import Ishigami import numpy as np # Read the parameter range file and generate samples param_file = '../../SALib/test_functions/params/Ishigami.txt' # Generate samples param_values = finite_diff.sample(1000, param_file, delta=0.001) # Save the parameter values in a file (they are needed in the analysis) np.savetxt('model_input.txt', param_values, delimiter=' ') # Run the "model" and save the output in a text file # This will happen offline for external models Y = Ishigami.evaluate(param_values) np.savetxt('model_output.txt', Y, delimiter=' ') # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) Si = dgsm.analyze(param_file, 'model_input.txt', 'model_output.txt', column=0, conf_level=0.95, print_to_console=False) # Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf' # e.g. Si['vi'] contains the sensitivity measure for each parameter, in # the same order as the parameter file # For comparison, Morris mu* < sqrt(v_i) # and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)