Esempio n. 1
0
def test_finite_diff_sample_seed():
    N, problem = problem_setup()

    sample1 = finite_diff.sample(problem, N, skip_values=1001)
    sample2 = finite_diff.sample(problem, N, skip_values=1002)

    np.testing.assert_equal(np.any(np.not_equal(sample1, sample2)), True)
Esempio n. 2
0
    def redraw(self, random_method: str):
        """Redraw the random number with the given method

        :param random_method: the random method to use

        """
        problem = {
            "num_vars": self.num_dim,
            "names": list(range(self.num_dim)),
            "bounds": [[0, 1]] * self.num_dim,
        }
        if random_method == "pseudo_random":
            seq = np.random.random((self.bucket_size, 2))
        elif random_method == "sobol_sequence":
            seq = sobol_sequence.sample(self.bucket_size, 2)
        elif random_method == "saltelli":
            seq = saltelli.sample(problem, self.bucket_size, calc_second_order=False)
        elif random_method == "latin_hypercube":
            seq = latin.sample(problem, self.bucket_size)
        elif random_method == "finite_differences":
            seq = finite_diff.sample(problem, self.bucket_size)
        elif random_method == "fast":
            seq = fast_sampler.sample(problem, self.bucket_size, M=45)
        else:
            raise ValueError(f"Unknown random method {random_method}")
        self.random_draws[random_method] = seq
Esempio n. 3
0
def test_dgsm_to_df():
    params = ['x1', 'x2', 'x3']
    problem = {
        'num_vars':
        3,
        'names':
        params,
        'groups':
        None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    param_values = finite_diff.sample(problem, 1000, delta=0.001)
    Y = Ishigami.evaluate(param_values)

    Si = dgsm.analyze(problem, param_values, Y, print_to_console=False)
    Si_df = Si.to_df()

    assert isinstance(Si_df, pd.DataFrame), \
        "DGSM Si: Expected DataFrame, got {}".format(type(Si_df))
    assert set(Si_df.index) == set(params), "Incorrect index in DataFrame"

    col_names = ['vi', 'vi_std', 'dgsm', 'dgsm_conf']
    assert set(Si_df.columns) == set(col_names), \
        "Unexpected column names in DataFrame"
Esempio n. 4
0
def test_regression_dgsm():
    param_file = 'src/SALib/test_functions/params/Ishigami.txt'
    problem = read_param_file(param_file)
    param_values = finite_diff.sample(problem, 10000, delta=0.001)

    Y = Ishigami.evaluate(param_values)

    Si = dgsm.analyze(problem, param_values, Y,
                      conf_level=0.95, print_to_console=False)

    assert_allclose(Si['dgsm'], [2.229, 7.066, 3.180], atol=5e-2, rtol=1e-1)
Esempio n. 5
0
def test_regression_dgsm():
    param_file = 'src/SALib/test_functions/params/Ishigami.txt'
    problem = read_param_file(param_file)
    param_values = finite_diff.sample(problem, 10000, delta=0.001)

    Y = Ishigami.evaluate(param_values)

    Si = dgsm.analyze(problem, param_values, Y,
                      conf_level=0.95, print_to_console=False)

    assert_allclose(Si['dgsm'], [2.229, 7.066, 3.180], atol=5e-2, rtol=1e-1)
Esempio n. 6
0
 def redraw(self, random_method):
     problem = {'num_vars': 2, 'names': ['x', 'y'], 'bounds': [[0, 1]] * 2}
     if random_method == 'pseudo_random':
         seq = np.random.random((NUM_DATA_POINTS, 2))
     elif random_method == 'sobol_sequence':
         seq = sobol_sequence.sample(NUM_DATA_POINTS, 2)
     elif random_method == 'saltelli':
         seq = saltelli.sample(problem,
                               NUM_DATA_POINTS,
                               calc_second_order=False)
     elif random_method == 'latin_hypercube':
         seq = latin.sample(problem, NUM_DATA_POINTS)
     elif random_method == 'finite_differences':
         seq = finite_diff.sample(problem, NUM_DATA_POINTS)
     elif random_method == 'fast':
         seq = fast_sampler.sample(problem, NUM_DATA_POINTS, M=45)
     self.random_draws[random_method] = seq
Esempio n. 7
0
    def analyze(self):
        """Initiate the analysis, and stores the result at data directory.

        Generates:
            Analysis result at 'acbm/data/output/dgsm.txt'.

        """

        X = finite_diff.sample(self.problem,
                               self.n_samples,
                               delta=self.delta,
                               seed=self.seed_sample)
        Y = ACBM.evaluate(X)
        si = dgsm.analyze(self.problem, X, Y, seed=self.seed_analyze)

        # scale down the values of vi
        si['vi'] = [x**(1 / 16) for x in si['vi']]
        pickle.dump(si, open(self.path_output + 'dgsm.txt', 'wb'))
Esempio n. 8
0
def test_dgsm_to_df():
    params = ['x1', 'x2', 'x3']
    problem = {
        'num_vars': 3,
        'names': params,
        'groups': None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    param_values = finite_diff.sample(problem, 1000, delta=0.001)
    Y = Ishigami.evaluate(param_values)

    Si = dgsm.analyze(problem, param_values, Y, print_to_console=False)
    Si_df = Si.to_df()

    assert isinstance(Si_df, pd.DataFrame), \
        "DGSM Si: Expected DataFrame, got {}".format(type(Si_df))
    assert set(Si_df.index) == set(params), "Incorrect index in DataFrame"

    col_names = ['vi', 'vi_std', 'dgsm', 'dgsm_conf']
    assert set(Si_df.columns) == set(col_names), \
        "Unexpected column names in DataFrame"
}
t0 = time.time()
for obj in paramSet['input']:
    problem['names'].append(str(obj['ID']))
    if min(obj['Sample Values']) == 1e-8 and max(obj['Sample Values']) == 1e-8:
        problem['bounds'].append([0, 0.1])
    else:
        problem['bounds'].append([min(obj['Sample Values']), max(obj['Sample Values'])])
    problem['num_vars'] += 1
problem_load = time.time() - t0
print('=== problem formulated in %d seconds ===' % problem_load)

'''Generate samples'''
N = 5
t0 = time.time()
param_values = finite_diff.sample(problem, N)
param_calc = time.time() - t0
print('=== parameters generated in %d seconds ===' % param_calc)
print(param_values.shape)
'''load gaussian process models'''
gpFEE = load('gpFEEMetaModel4000.joblib')
gpPMV = load('gpPMVMetaModel4000.joblib')

'''run snalysis for both models'''
try:
    # test = np.load('NA')
    outFEE = np.load('outFEE%d.npy' % N)
    outPMV = np.load('outPMV%d.npy' % N)
    print('=== saved outputs loaded ===')
except FileNotFoundError:
    outFEE = np.zeros([param_values.shape[0]])
Esempio n. 10
0
import sys

from SALib.analyze import dgsm
from SALib.sample import finite_diff
from SALib.test_functions import Ishigami
from SALib.util import read_param_file

sys.path.append('../..')

# Read the parameter range file and generate samples
problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt')

# Generate samples
param_values = finite_diff.sample(problem, 1000, delta=0.001)

# Run the "model" -- this will happen offline for external models
Y = Ishigami.evaluate(param_values)

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = dgsm.analyze(problem,
                  param_values,
                  Y,
                  conf_level=0.95,
                  print_to_console=True)
# Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf'
# e.g. Si['vi'] contains the sensitivity measure for each parameter, in
# the same order as the parameter file

# For comparison, Morris mu* < sqrt(v_i)
# and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
Esempio n. 11
0
 def sample(self, num_samples=10000):
     param_values = finite_diff.sample(self.problem, num_samples)
     return param_values
Esempio n. 12
0
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")

    problem = {
        'num_vars': len(model.uncertainties),
        'names': model.uncertainties.keys(),
        'bounds': [[0.0, 1.0] for u in model.uncertainties],
        'groups': kwargs.get("groups", None)
    }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)

    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N,
                                  **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(
            problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(
            problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(
            problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(
            problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N,
                                   **_cleanup_kwargs(latin.sample, kwargs))

    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:, i] = u.ppf(samples[:, i])

    # run the model and collect the responses
    responses = np.empty(samples.shape[0])

    for i in range(samples.shape[0]):
        sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]

    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses,
                               **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses,
                              **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses,
                              **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses,
                               **_cleanup_kwargs(delta.analyze, kwargs))

    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(
        list(result["names"] if "names" in result else problem["names"]))

    if "S1" in result:
        pretty_result["S1"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1"])
        }
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1_conf"])
        }
    if "ST" in result:
        pretty_result["ST"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST"])
        }
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST_conf"])
        }
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta"])
        }
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta_conf"])
        }
    if "vi" in result:
        pretty_result["vi"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi"])
        }
    if "vi_std" in result:
        pretty_result["vi_std"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi_std"])
        }
    if "dgsm" in result:
        pretty_result["dgsm"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm"])
        }
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm_conf"])
        }
    if "mu" in result:
        pretty_result["mu"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu"])
        }
    if "mu_star" in result:
        pretty_result["mu_star"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star"])
        }
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star_conf"])
        }
    if "sigma" in result:
        pretty_result["sigma"] = {
            k: float(v)
            for k, v in zip(result["names"], result["sigma"])
        }

    return pretty_result
Esempio n. 13
0
        [lob_search, upb_search],
        [lob_search, upb_search],
        [lob_search, upb_search],
        [lob_search, upb_search],
    ]
}

## Generate samples
if method_flag == 1:
    param_values = saltelli.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 2:
    param_values = latin.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 3:
    param_values = finite_diff.sample(problem, sample_number, delta=0.001)
    parm = (param_values + 1) * 7e10
elif method_flag == 4:
    param_values = fast_sampler.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 5:
    param_values = ff_sample(problem)
    parm = (param_values[:, :21] + 1) * 7e10
elif method_flag == 6:
    param_values = morris_sample(problem, N=sample_number, num_levels=4, grid_jump=2, \
                      optimal_trajectories=None)
    parm = (param_values + 1) * 7e10
elif method_flag == 7:
    param_values = saltelli.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
Esempio n. 14
0
import sys

from SALib.analyze import dgsm
from SALib.sample import finite_diff
from SALib.test_functions import Ishigami
from SALib.util import read_param_file


sys.path.append('../..')


# Read the parameter range file and generate samples
problem = read_param_file('../../SALib/test_functions/params/Ishigami.txt')

# Generate samples
param_values = finite_diff.sample(problem, 1000, delta=0.001)

# Run the "model" -- this will happen offline for external models
Y = Ishigami.evaluate(param_values)

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False)
# Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf'
# e.g. Si['vi'] contains the sensitivity measure for each parameter, in
# the same order as the parameter file

# For comparison, Morris mu* < sqrt(v_i)
# and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
Esempio n. 15
0
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")
    
    problem = { 'num_vars' : len(model.uncertainties),
                'names' : model.uncertainties.keys(),
                'bounds' : [[0.0, 1.0] for u in model.uncertainties],
                'groups' : kwargs.get("groups", None) }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)
    
    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs))
            
    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:,i] = u.ppf(samples[:,i])
        
    # run the model and collect the responses
    responses = np.empty(samples.shape[0])
    
    for i in range(samples.shape[0]):
        sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]
    
    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs))
         
    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(result["names"] if "names" in result else problem["names"])
    
    if "S1" in result:
        pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])}
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])}
    if "ST" in result:
        pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])}
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])}
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])}
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])}
    if "vi" in result:
        pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])}
    if "vi_std" in result:
        pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])}
    if "dgsm" in result:
        pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])}
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])}
    if "mu" in result:
        pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])}
    if "mu_star" in result:
        pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])}
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])}
    if "sigma" in result:
        pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])}

    return pretty_result
Esempio n. 16
0
File: dgsm.py Progetto: amicol/SALib
import sys
sys.path.append('../..')

from SALib.sample import finite_diff
from SALib.analyze import dgsm
from SALib.test_functions import Ishigami
import numpy as np

# Read the parameter range file and generate samples
param_file = '../../SALib/test_functions/params/Ishigami.txt'

# Generate samples
param_values = finite_diff.sample(1000, param_file, delta=0.001)

# Save the parameter values in a file (they are needed in the analysis)
np.savetxt('model_input.txt', param_values, delimiter=' ')

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)
np.savetxt('model_output.txt', Y, delimiter=' ')

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = dgsm.analyze(param_file,
                  'model_input.txt',
                  'model_output.txt',
                  column=0,
                  conf_level=0.95,
                  print_to_console=False)
# Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf'
Esempio n. 17
0
File: dgsm.py Progetto: amicol/SALib
import sys
sys.path.append('../..')

from SALib.sample import finite_diff
from SALib.analyze import dgsm
from SALib.test_functions import Ishigami
import numpy as np

# Read the parameter range file and generate samples
param_file = '../../SALib/test_functions/params/Ishigami.txt'

# Generate samples
param_values = finite_diff.sample(1000, param_file, delta=0.001)

# Save the parameter values in a file (they are needed in the analysis)
np.savetxt('model_input.txt', param_values, delimiter=' ')

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)
np.savetxt('model_output.txt', Y, delimiter=' ')

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = dgsm.analyze(param_file, 'model_input.txt', 'model_output.txt',
                  column=0, conf_level=0.95, print_to_console=False)
# Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf'
# e.g. Si['vi'] contains the sensitivity measure for each parameter, in
# the same order as the parameter file

# For comparison, Morris mu* < sqrt(v_i)