def test_FF_with_NAN(): ''' Test if ff.analyze raise a ValueError when nan are passed in the Y values ''' problem, model_results, param_values = setup_samples() # Should raise a ValueError type of error with pytest.raises(ValueError): ff.analyze(problem, param_values, model_results)
def test_ff_to_df(): params = ['x1', 'x2', 'x3'] main_index = params + ['dummy_0'] problem = { 'num_vars': 3, 'names': params, 'groups': None, 'bounds': [[-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359]] } X = ff_sample.sample(problem) Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) Si = ff.analyze(problem, X, Y, second_order=True, print_to_console=False) main_effect, inter_effect = Si.to_df() assert isinstance(main_effect, pd.DataFrame), \ "FF ME: Expected DataFrame, got {}".format(type(main_effect)) assert isinstance(main_effect, pd.DataFrame), \ "FF IE: Expected DataFrame, got {}".format(type(inter_effect)) assert set(main_effect.index) == set(main_index), \ "Incorrect index in Main Effect DataFrame" inter_index = set([('x1', 'x2'), ('x1', 'x3'), ('x2', 'x3'), ('x1', 'dummy_0'), ('x2', 'dummy_0'), ('x3', 'dummy_0')]) assert set(inter_effect.index) == inter_index, \ "Incorrect index in Interaction Effect DataFrame"
def test_ff_to_df(): params = ['x1', 'x2', 'x3'] main_index = params + ['dummy_0'] problem = { 'num_vars': 3, 'names': params, 'groups': None, 'bounds': [[-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359], [-3.14159265359, 3.14159265359]] } X = ff_sample.sample(problem) Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) Si = ff.analyze(problem, X, Y, second_order=True, print_to_console=False) main_effect, inter_effect = Si.to_df() assert isinstance(main_effect, pd.DataFrame), \ "FF ME: Expected DataFrame, got {}".format(type(main_effect)) assert isinstance(main_effect, pd.DataFrame), \ "FF IE: Expected DataFrame, got {}".format(type(inter_effect)) assert set(main_effect.index) == set(main_index), \ "Incorrect index in Main Effect DataFrame" inter_index = set([('x1', 'x2'), ('x1', 'x3'), ('x2', 'x3'), ('x1', 'dummy_0'), ('x2', 'dummy_0'), ('x3', 'dummy_0')]) assert set(inter_effect.index) == inter_index, \ "Incorrect index in Interaction Effect DataFrame"
def test_interactions_from_saltelli(): ''' ''' problem = { 'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T, 'num_vars': 12, 'names': ["x" + str(x + 1) for x in range(12)] } X = sample(problem) Y = np.array([ 10, -2, 4, -8, 2, 6, -4, 0, 2, 6, -4, 0, 10, -2, 4, -8, -2, -6, 4, 0, -10, 2, -4, 8, -10, 2, -4, 8, -2, -6, 4, 0 ]) Si = analyze(problem, X, Y, second_order=True) actual = Si['IE'] expected = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] assert_equal(actual, expected)
def test_ff_example(): ''' ''' problem = { 'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T, 'num_vars': 12, 'names': ["x" + str(x + 1) for x in range(12)] } X = sample(problem) Y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 4 * X[:, 6] * X[:, 11] expected = np.array([ 10, -2, 4, -8, 2, 6, -4, 0, 2, 6, -4, 0, 10, -2, 4, -8, -2, -6, 4, 0, -10, 2, -4, 8, -10, 2, -4, 8, -2, -6, 4, 0 ]) assert_equal(Y, expected) Si = analyze(problem, X, Y) expected = np.array([1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float) assert_equal(expected, Si['ME'])
def analyze_ff(self): # Fractional Factorial return ff.analyze(self.sa_problem, self.samples_x, self.samples_y, second_order=True, print_to_console=self.options["print_to_console"])
def _analizar(símismo, vec_res, muestra, ops): if símismo.método == 'sobol': return sobol.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'fast': return fast.analyze(problem=símismo.problema, Y=vec_res, **ops) elif símismo.método == 'morris': return morris_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dmim': return delta.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'dgsm': return dgsm.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) elif símismo.método == 'ff': return ff_anlz.analyze(problem=símismo.problema, X=muestra, Y=vec_res, **ops) else: raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(símismo.método))
def test_ff_analyze(): ''' ''' problem = { 'bounds': [[0., 2.5], [0., 1.], [0., 1.], [0., 1.]], 'num_vars': 4, 'names': ['x1', 'x2', 'x3', 'x4'] } X = np.array([[1, 1, 1, 1], [1, 0, 1, 0], [1, 1, 0, 0], [1, 0, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 0]], dtype=np.float) Y = np.array([1.5, 1, 1.5, 1, 2, 2.5, 2, 2.5], dtype=np.float) actual = analyze(problem, X, Y) expected = { 'ME': np.array([-0.5, 0.25, 0., 0.]), 'names': ['x1', 'x2', 'x3', 'x4'] } assert_equal(actual, expected)
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars': len(model.uncertainties), 'names': model.uncertainties.keys(), 'bounds': [[0.0, 1.0] for u in model.uncertainties], 'groups': kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample( problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample( problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample( problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample( problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:, i] = u.ppf(samples[:, i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze( problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult( list(result["names"] if "names" in result else problem["names"])) if "S1" in result: pretty_result["S1"] = { k: float(v) for k, v in zip(problem["names"], result["S1"]) } if "S1_conf" in result: pretty_result["S1_conf"] = { k: float(v) for k, v in zip(problem["names"], result["S1_conf"]) } if "ST" in result: pretty_result["ST"] = { k: float(v) for k, v in zip(problem["names"], result["ST"]) } if "ST_conf" in result: pretty_result["ST_conf"] = { k: float(v) for k, v in zip(problem["names"], result["ST_conf"]) } if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = { k: float(v) for k, v in zip(problem["names"], result["delta"]) } if "delta_conf" in result: pretty_result["delta_conf"] = { k: float(v) for k, v in zip(problem["names"], result["delta_conf"]) } if "vi" in result: pretty_result["vi"] = { k: float(v) for k, v in zip(problem["names"], result["vi"]) } if "vi_std" in result: pretty_result["vi_std"] = { k: float(v) for k, v in zip(problem["names"], result["vi_std"]) } if "dgsm" in result: pretty_result["dgsm"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm"]) } if "dgsm_conf" in result: pretty_result["dgsm_conf"] = { k: float(v) for k, v in zip(problem["names"], result["dgsm_conf"]) } if "mu" in result: pretty_result["mu"] = { k: float(v) for k, v in zip(result["names"], result["mu"]) } if "mu_star" in result: pretty_result["mu_star"] = { k: float(v) for k, v in zip(result["names"], result["mu_star"]) } if "mu_star_conf" in result: pretty_result["mu_star_conf"] = { k: float(v) for k, v in zip(result["names"], result["mu_star_conf"]) } if "sigma" in result: pretty_result["sigma"] = { k: float(v) for k, v in zip(result["names"], result["sigma"]) } return pretty_result
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs): if len(model.uncertainties) == 0: raise ValueError("no uncertainties defined in model") problem = { 'num_vars' : len(model.uncertainties), 'names' : model.uncertainties.keys(), 'bounds' : [[0.0, 1.0] for u in model.uncertainties], 'groups' : kwargs.get("groups", None) } # estimate the argument N passed to the sampler that produces the requested # number of samples N = _predict_N(method, nsamples, problem["num_vars"], kwargs) # generate the samples if method == "sobol": samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs)) elif method == "morris": samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs)) elif method == "fast": samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs)) elif method == "ff": samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs)) elif method == "dgsm": samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs)) elif method == "delta": if "samples" in kwargs: samples = kwargs["samples"] else: samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs)) # convert from samples in [0, 1] to uncertainty domain for i, u in enumerate(model.uncertainties): samples[:,i] = u.ppf(samples[:,i]) # run the model and collect the responses responses = np.empty(samples.shape[0]) for i in range(samples.shape[0]): sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])} responses[i] = evaluate(model, overwrite(sample, policy))[response] # run the sensitivity analysis method if method == "sobol": result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs)) elif method == "morris": result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs)) elif method == "fast": result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs)) elif method == "ff": result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs)) elif method == "dgsm": result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs)) elif method == "delta": result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs)) # convert the SALib results into a form allowing pretty printing and # lookups using the parameter name pretty_result = SAResult(result["names"] if "names" in result else problem["names"]) if "S1" in result: pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])} if "S1_conf" in result: pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])} if "ST" in result: pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])} if "ST_conf" in result: pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])} if "S2" in result: pretty_result["S2"] = _S2_to_dict(result["S2"], problem) if "S2_conf" in result: pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem) if "delta" in result: pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])} if "delta_conf" in result: pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])} if "vi" in result: pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])} if "vi_std" in result: pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])} if "dgsm" in result: pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])} if "dgsm_conf" in result: pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])} if "mu" in result: pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])} if "mu_star" in result: pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])} if "mu_star_conf" in result: pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])} if "sigma" in result: pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])} return pretty_result
from SALib.sample.ff import sample from SALib.test_functions import Ishigami from SALib.util import read_param_file sys.path.append('../..') # Read the parameter range file and generate samples problem = read_param_file('../../SALib/test_functions/params/Ishigami.txt') # or define manually without a parameter file: # problem = { # 'num_vars': 3, # 'names': ['x1', 'x2', 'x3'], # 'groups': None, # 'bounds': [[-3.14159265359, 3.14159265359], # [-3.14159265359, 3.14159265359], # [-3.14159265359, 3.14159265359]] # } # Generate samples X = sample(problem) # Run the "model" -- this will happen offline for external models Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) # Perform the sensitivity analysis using the model output # Specify which column of the output file to analyze (zero-indexed) analyze(problem, X, Y, second_order=True, print_to_console=True) # Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect) # The techniques bulks out the number of parameters with dummy parameters to the # nearest 2**n. Any results involving dummy parameters should be treated with # a sceptical eye.
from SALib.analyze.ff import analyze from SALib.sample.ff import sample from SALib.util import read_param_file sys.path.append('../..') # Read the parameter range file and generate samples problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt') # or define manually without a parameter file: # problem = { # 'num_vars': 3, # 'names': ['x1', 'x2', 'x3'], # 'groups': None, # 'bounds': [[-3.14159265359, 3.14159265359], # [-3.14159265359, 3.14159265359], # [-3.14159265359, 3.14159265359]] # } # Generate samples X = sample(problem) # Run the "model" -- this will happen offline for external models Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) # Perform the sensitivity analysis using the model output analyze(problem, X, Y, second_order=True, print_to_console=True) # Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect) # The techniques bulks out the number of parameters with dummy parameters to the # nearest 2**n. Any results involving dummy parameters should be treated with # a sceptical eye.
def run(self, input_ids=None, output_ids=None, method=None, calc_second_order=None, conf_level=None, **kwargs): self._update_parameters(method, calc_second_order, conf_level) self.other_parameters = kwargs if input_ids is None: input_ids = range(self.n_inputs) self.problem = { "num_vars": len(input_ids), "names": np.array(self.input_names)[input_ids].tolist(), "bounds": np.array(self.input_bounds)[input_ids].tolist() } if output_ids is None: output_ids = range(self.n_outputs) n_outputs = len(output_ids) if self.method.lower() == "sobol": self.logger.warning( "'sobol' method requires 'saltelli' sampling scheme!") # Additional keyword parameters and their defaults: # calc_second_order (bool): Calculate second-order sensitivities (default True) # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # parallel: False, # n_processors: None self.analyzer = lambda output: sobol.analyze( self.problem, output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), parallel=self.other_parameters.get("parallel", False), n_processors=self.other_parameters.get("n_processors", None), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["latin", "delta"]): self.logger.warning( "'latin' sampling scheme is recommended for 'delta' method!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: delta.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["fast", "fast_sampler"]): self.logger.warning( "'fast' method requires 'fast_sampler' sampling scheme!") # Additional keyword parameters and their defaults: # M (int): The interference parameter, # i.e., the number of harmonics to sum in the Fourier series decomposition (default 4) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: fast.analyze( self.problem, output, M=self.other_parameters.get("M", 4), print_to_console=self.other_parameters.get( "print_to_console", False)) elif np.in1d(self.method.lower(), ["ff", "fractional_factorial"]): # Additional keyword parameters and their defaults: # second_order (bool, default=False): Include interaction effects # print_to_console (bool, default=False): Print results directly to console self.logger.warning( "'fractional_factorial' method requires 'fractional_factorial' sampling scheme!" ) self.analyzer = lambda output: ff.analyze( self.problem, self.input_samples[:, input_ids], output, calc_second_order=self.calc_second_order, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower().lower() == "morris": self.logger.warning( "'morris' method requires 'morris' sampling scheme!") # Additional keyword parameters and their defaults: # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) # grid_jump (int): The grid jump size, must be identical to the value passed to # SALib.sample.morris.sample() (default 2) # num_levels (int): The number of grid levels, must be identical to the value passed to # SALib.sample.morris (default 4) self.analyzer = lambda output: morris.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, grid_jump=self.other_parameters.get("grid_jump", 2), num_levels=self.other_parameters.get("num_levels", 4), num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) elif self.method.lower() == "dgsm": # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000) # conf_level (float): The confidence interval level (default 0.95) # print_to_console (bool): Print results directly to console (default False) self.analyzer = lambda output: dgsm.analyze( self.problem, self.input_samples[:, input_ids], output, conf_level=self.conf_level, num_resamples=self.other_parameters.get("num_resamples", 1000), print_to_console=self.other_parameters.get( "print_to_console", False)) else: raise_value_error("Method " + str(self.method) + " is not one of the available methods " + str(METHODS) + " !") output_names = [] results = [] for io in output_ids: output_names.append(self.output_names[io]) results.append(self.analyzer(self.output_values[:, io])) # TODO: Adjust list_of_dicts_to_dicts_of_ndarrays to handle ndarray concatenation results = list_of_dicts_to_dicts_of_ndarrays(results) results.update({"output_names": output_names}) return results