예제 #1
0
파일: test_to_df.py 프로젝트: SALib/SALib
def test_ff_to_df():
    params = ['x1', 'x2', 'x3']
    main_index = params + ['dummy_0']

    problem = {
        'num_vars': 3,
        'names': params,
        'groups': None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    X = ff_sample.sample(problem)
    Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0]))
    Si = ff.analyze(problem, X, Y, second_order=True, print_to_console=False)
    main_effect, inter_effect = Si.to_df()

    assert isinstance(main_effect, pd.DataFrame), \
        "FF ME: Expected DataFrame, got {}".format(type(main_effect))
    assert isinstance(main_effect, pd.DataFrame), \
        "FF IE: Expected DataFrame, got {}".format(type(inter_effect))
    assert set(main_effect.index) == set(main_index), \
        "Incorrect index in Main Effect DataFrame"

    inter_index = set([('x1', 'x2'),
                       ('x1', 'x3'),
                       ('x2', 'x3'),
                       ('x1', 'dummy_0'),
                       ('x2', 'dummy_0'),
                       ('x3', 'dummy_0')])
    assert set(inter_effect.index) == inter_index, \
        "Incorrect index in Interaction Effect DataFrame"
예제 #2
0
def test_ff_to_df():
    params = ['x1', 'x2', 'x3']
    main_index = params + ['dummy_0']

    problem = {
        'num_vars':
        3,
        'names':
        params,
        'groups':
        None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    X = ff_sample.sample(problem)
    Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0]))
    Si = ff.analyze(problem, X, Y, second_order=True, print_to_console=False)
    main_effect, inter_effect = Si.to_df()

    assert isinstance(main_effect, pd.DataFrame), \
        "FF ME: Expected DataFrame, got {}".format(type(main_effect))
    assert isinstance(main_effect, pd.DataFrame), \
        "FF IE: Expected DataFrame, got {}".format(type(inter_effect))
    assert set(main_effect.index) == set(main_index), \
        "Incorrect index in Main Effect DataFrame"

    inter_index = set([('x1', 'x2'), ('x1', 'x3'), ('x2', 'x3'),
                       ('x1', 'dummy_0'), ('x2', 'dummy_0'),
                       ('x3', 'dummy_0')])
    assert set(inter_effect.index) == inter_index, \
        "Incorrect index in Interaction Effect DataFrame"
예제 #3
0
def test_interactions_from_saltelli():
    '''
    '''
    problem = {
        'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T,
        'num_vars': 12,
        'names': ["x" + str(x + 1) for x in range(12)]
    }

    X = sample(problem)

    Y = np.array([
        10, -2, 4, -8, 2, 6, -4, 0, 2, 6, -4, 0, 10, -2, 4, -8, -2, -6, 4, 0,
        -10, 2, -4, 8, -10, 2, -4, 8, -2, -6, 4, 0
    ])

    Si = analyze(problem, X, Y, second_order=True)
    actual = Si['IE']
    expected = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    ]

    assert_equal(actual, expected)
예제 #4
0
def test_ff_example():
    '''
    '''

    problem = {
        'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T,
        'num_vars': 12,
        'names': ["x" + str(x + 1) for x in range(12)]
    }

    X = sample(problem)
    Y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 4 * X[:, 6] * X[:, 11]

    expected = np.array([
        10, -2, 4, -8, 2, 6, -4, 0, 2, 6, -4, 0, 10, -2, 4, -8, -2, -6, 4, 0,
        -10, 2, -4, 8, -10, 2, -4, 8, -2, -6, 4, 0
    ])

    assert_equal(Y, expected)

    Si = analyze(problem, X, Y)

    expected = np.array([1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                        dtype=np.float)
    assert_equal(expected, Si['ME'])
예제 #5
0
def test_ff_sample():
    problem = {
        'bounds': [[0., 1.], [0., 1.], [0., 1.], [0., 1.]],
        'num_vars': 4,
        'names': ['x1', 'x2', 'x3', 'x4']
    }
    actual = sample(problem)
    expected = np.array(
        [[1, 1, 1, 1], [1, 0, 1, 0], [1, 1, 0, 0], [1, 0, 0, 1], [0, 0, 0, 0],
         [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 0]],
        dtype=np.float)
    assert_equal(actual, expected)
예제 #6
0
 def _gen_muestrea(símismo, n, ops):
     if símismo.método == 'sobol':
         return saltelli.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'fast':
         return fast_sampler.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'morris':
         return morris_muestra.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'dmim':
         return latin.sample(problem=símismo.problema, N=n)
     elif símismo.método == 'dgsm':
         return saltelli.sample(problem=símismo.problema, N=n)
     elif símismo.método == 'ff':
         return ff_muestra.sample(problem=símismo.problema)
     else:
         raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(símismo.método))
예제 #7
0
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")

    problem = {
        'num_vars': len(model.uncertainties),
        'names': model.uncertainties.keys(),
        'bounds': [[0.0, 1.0] for u in model.uncertainties],
        'groups': kwargs.get("groups", None)
    }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)

    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N,
                                  **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(
            problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(
            problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(
            problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(
            problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N,
                                   **_cleanup_kwargs(latin.sample, kwargs))

    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:, i] = u.ppf(samples[:, i])

    # run the model and collect the responses
    responses = np.empty(samples.shape[0])

    for i in range(samples.shape[0]):
        sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]

    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses,
                               **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses,
                              **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses,
                              **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses,
                               **_cleanup_kwargs(delta.analyze, kwargs))

    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(
        list(result["names"] if "names" in result else problem["names"]))

    if "S1" in result:
        pretty_result["S1"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1"])
        }
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1_conf"])
        }
    if "ST" in result:
        pretty_result["ST"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST"])
        }
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST_conf"])
        }
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta"])
        }
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta_conf"])
        }
    if "vi" in result:
        pretty_result["vi"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi"])
        }
    if "vi_std" in result:
        pretty_result["vi_std"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi_std"])
        }
    if "dgsm" in result:
        pretty_result["dgsm"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm"])
        }
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm_conf"])
        }
    if "mu" in result:
        pretty_result["mu"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu"])
        }
    if "mu_star" in result:
        pretty_result["mu_star"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star"])
        }
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star_conf"])
        }
    if "sigma" in result:
        pretty_result["sigma"] = {
            k: float(v)
            for k, v in zip(result["names"], result["sigma"])
        }

    return pretty_result
예제 #8
0
파일: sa.py 프로젝트: arita37/Rhodium
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")
    
    problem = { 'num_vars' : len(model.uncertainties),
                'names' : model.uncertainties.keys(),
                'bounds' : [[0.0, 1.0] for u in model.uncertainties],
                'groups' : kwargs.get("groups", None) }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)
    
    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs))
            
    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:,i] = u.ppf(samples[:,i])
        
    # run the model and collect the responses
    responses = np.empty(samples.shape[0])
    
    for i in range(samples.shape[0]):
        sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]
    
    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs))
         
    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(result["names"] if "names" in result else problem["names"])
    
    if "S1" in result:
        pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])}
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])}
    if "ST" in result:
        pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])}
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])}
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])}
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])}
    if "vi" in result:
        pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])}
    if "vi_std" in result:
        pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])}
    if "dgsm" in result:
        pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])}
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])}
    if "mu" in result:
        pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])}
    if "mu_star" in result:
        pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])}
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])}
    if "sigma" in result:
        pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])}

    return pretty_result
예제 #9
0
파일: ff.py 프로젝트: calvinwhealton/SALib
from SALib.sample.ff import sample
from SALib.test_functions import Ishigami
from SALib.util import read_param_file

sys.path.append('../..')

# Read the parameter range file and generate samples
problem = read_param_file('../../SALib/test_functions/params/Ishigami.txt')
# or define manually without a parameter file:
# problem = {
#  'num_vars': 3, 
#  'names': ['x1', 'x2', 'x3'], 
#  'groups': None, 
#  'bounds': [[-3.14159265359, 3.14159265359], 
#             [-3.14159265359, 3.14159265359], 
#             [-3.14159265359, 3.14159265359]]
# }

# Generate samples
X = sample(problem)

# Run the "model" -- this will happen offline for external models
Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0]))

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
analyze(problem, X, Y, second_order=True, print_to_console=True)
# Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect)
# The techniques bulks out the number of parameters with dummy parameters to the
# nearest 2**n.  Any results involving dummy parameters should be treated with
# a sceptical eye.
예제 #10
0
파일: ff.py 프로젝트: zha/SALib
from SALib.analyze.ff import analyze
from SALib.sample.ff import sample
from SALib.util import read_param_file

sys.path.append('../..')

# Read the parameter range file and generate samples
problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt')
# or define manually without a parameter file:
# problem = {
#  'num_vars': 3,
#  'names': ['x1', 'x2', 'x3'],
#  'groups': None,
#  'bounds': [[-3.14159265359, 3.14159265359],
#             [-3.14159265359, 3.14159265359],
#             [-3.14159265359, 3.14159265359]]
# }

# Generate samples
X = sample(problem)

# Run the "model" -- this will happen offline for external models
Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0]))

# Perform the sensitivity analysis using the model output
analyze(problem, X, Y, second_order=True, print_to_console=True)
# Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect)
# The techniques bulks out the number of parameters with dummy parameters to the
# nearest 2**n.  Any results involving dummy parameters should be treated with
# a sceptical eye.
예제 #11
0
파일: Sensib.py 프로젝트: agronholm/Tikon
def prep_anal_sensib(método, n, problema, opciones):
    """

    :param método:
    :type método: str
    :param n:
    :type n: int
    :param problema:
    :type problema: dict
    :param opciones:
    :type opciones: dict
    :return:
    :rtype:
    """

    método_mín = método.lower()

    if método_mín == 'sobol':
        # Preparar opciones
        conv_ops_muestrear = {'calc_segundo_orden': 'calc_second_order'}
        conv_ops_anlz = {'calc_segundo_orden': 'calc_second_order', 'núm_remuestreos': 'num_resamples',
                         'nivel_conf': 'conf_level', 'paralelo': 'parallel', 'n_procesadores': 'n_processors'}

        # La opciones para las funciones de de muestreo y de análisis
        ops_muestrear = {conv_ops_muestrear[a]: val for a, val in opciones.items() if a in conv_ops_muestrear}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular cuáles valores de parámetros tenemos que poner para el análisis Sobol
        vals_paráms = saltelli.sample(problem=problema, N=n, **ops_muestrear)

        # La función de análisis
        fun_anlz = sobol.analyze

    elif método_mín == 'fast':
        # Preparar opciones
        if 'M' in opciones:
            ops_muestrear = ops_anlz = {'M': opciones['M']}
        else:
            ops_muestrear = ops_anlz = {}

        # Calcular para FAST
        vals_paráms = fast_sampler.sample(problem=problema, N=n, **ops_muestrear)

        # La función de análisis
        fun_anlz = fast.analyze

    elif método_mín == 'morris':
        # Preparar opciones
        conv_ops_muestrear = {'núm_niveles': 'num_levels', 'salto_cuadr': 'grid_jump',
                              'traj_optimal': 'optimal_trajectories', 'opt_local': 'local_optimization'}
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level',
                         'salto_cuadr': 'grid_jump', 'núm_niveles': 'num_levels'}

        # La opciones para las funciones de de muestreo y de análisis
        ops_muestrear = {conv_ops_muestrear[a]: val for a, val in opciones.items() if a in conv_ops_muestrear}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para Morris
        vals_paráms = morris_muestra.sample(problem=problema, N=n, **ops_muestrear)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = morris_anlz

    elif método_mín == 'dmim':
        # Preparar opciones
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level'}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para DMIM
        vals_paráms = latin.sample(problem=problema, N=n)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = delta.analyze

    elif método_mín == 'dgsm':  # para hacer: verificar
        # Preparar opciones
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level'}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para DGSM
        vals_paráms = saltelli.sample(problem=problema, N=n)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = dgsm

    elif método_mín == 'ff':

        # Preparar opciones
        if 'segundo_orden' in opciones:
            ops_anlz = {'second_order': opciones['segundo_orden']}
        else:
            ops_anlz = {}

        # Calcular para FF
        vals_paráms = ff_muestra.sample(problem=problema)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = ff_anlz

    else:
        raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(método))

    return vals_paráms, fun_anlz, ops_anlz