Ejemplo n.º 1
0
 def test_piston_gradient(self):
     benchmark = setup_benchmark("piston")
     sample = pya.generate_independent_random_samples(benchmark.variable, 1)
     print(benchmark.jac(sample))
     errors = pya.check_gradients(benchmark.fun, benchmark.jac, sample)
     errors = errors[np.isfinite(errors)]
     assert errors.max() > 0.1 and errors.min() <= 6e-7
Ejemplo n.º 2
0
    def test_approximate_sparse_grid_user_options(self):
        nvars = 3
        benchmark = setup_benchmark('ishigami', a=7, b=0.1)
        univariate_variables = [stats.uniform(0, 1)] * nvars
        errors = []

        def callback(approx):
            nsamples = 1000
            error = compute_l2_error(approx, benchmark.fun,
                                     approx.variable_transformation.variable,
                                     nsamples)
            errors.append(error)

        univariate_quad_rule_info = [
            pya.clenshaw_curtis_in_polynomial_order,
            pya.clenshaw_curtis_rule_growth
        ]
        options = {
            'univariate_quad_rule_info': univariate_quad_rule_info,
            'max_nsamples': 110,
            'tol': 0,
            'verbose': False
        }
        approx = approximate(benchmark.fun, univariate_variables,
                             'sparse-grid', callback, options)
        assert np.min(errors) < 1e-12
Ejemplo n.º 3
0
 def test_approximate_polynomial_chaos_custom_poly_type(self):
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     nvars = benchmark.variable.num_vars()
     # for this test purposefully select wrong variable to make sure
     # poly_type overide is activated
     univariate_variables = [stats.beta(5, 5, -np.pi, 2 * np.pi)] * nvars
     variable = IndependentMultivariateRandomVariable(univariate_variables)
     var_trans = AffineRandomVariableTransformation(variable)
     # specify correct basis so it is not chosen from variable
     poly_opts = {'poly_type': 'legendre', 'var_trans': var_trans}
     options = {
         'poly_opts': poly_opts,
         'variable': variable,
         'options': {
             'max_num_step_increases': 1
         }
     }
     ntrain_samples = 400
     train_samples = np.random.uniform(-np.pi, np.pi,
                                       (nvars, ntrain_samples))
     train_vals = benchmark.fun(train_samples)
     approx = approximate(train_samples,
                          train_vals,
                          method='polynomial_chaos',
                          options=options).approx
     nsamples = 100
     error = compute_l2_error(approx,
                              benchmark.fun,
                              approx.var_trans.variable,
                              nsamples,
                              rel=True)
     print(error)
     assert error < 1e-4
     assert np.allclose(approx.mean(), benchmark.mean, atol=error)
Ejemplo n.º 4
0
    def test_qmc_sobol_sensitivity_analysis_oakley(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("oakley")

        nsamples = 100000
        nvars = benchmark.variable.num_vars()
        order = 1
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        fun = benchmark.fun
        for sampling_method in ['sobol', 'random', 'halton']:
            sobol_indices, total_effect_indices, var, mean = \
                sampling_based_sobol_indices(
                    fun, benchmark.variable, interaction_terms, nsamples,
                sampling_method)

            # print(mean-benchmark.mean)
            assert np.allclose(mean, benchmark.mean, atol=2e-2)
            # print(var-benchmark.variance)
            assert np.allclose(benchmark.variance, var, rtol=2e-2)
            main_effects = sobol_indices[:nvars]
            # print(main_effects-benchmark.main_effects)
            assert np.allclose(main_effects, benchmark.main_effects, atol=2e-2)
Ejemplo n.º 5
0
 def test_random_oscillator_analytical_solution(self):
     benchmark = setup_benchmark("random_oscillator")
     time = benchmark.fun.t
     sample = benchmark.variable.get_statistics("mean")
     asol = benchmark.fun.analytical_solution(sample, time)
     nsol = benchmark.fun.numerical_solution(sample.squeeze())
     assert np.allclose(asol, nsol[:, 0])
Ejemplo n.º 6
0
    def test_cantilever_beam_gradients(self):
        benchmark = setup_benchmark('cantilever_beam')
        from pyapprox.models.wrappers import ActiveSetVariableModel
        fun = ActiveSetVariableModel(
            benchmark.fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        jac = ActiveSetVariableModel(
            benchmark.jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(fun, jac, init_guess, disp=True)
        assert errors.min() < 4e-7

        constraint_fun = ActiveSetVariableModel(
            benchmark.constraint_fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        constraint_jac = ActiveSetVariableModel(
            benchmark.constraint_jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(constraint_fun,
                                     constraint_jac,
                                     init_guess,
                                     disp=True)
        assert errors.min() < 4e-7

        nsamples = 10
        samples = pya.generate_independent_random_samples(
            benchmark.variable, nsamples)
        constraint_fun = ActiveSetVariableModel(
            benchmark.constraint_fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(), samples,
            benchmark.design_var_indices)
        constraint_jac = ActiveSetVariableModel(
            benchmark.constraint_jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(), samples,
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(
            lambda x: constraint_fun(x).flatten(order='F'),
            constraint_jac,
            init_guess,
            disp=True)
        assert errors.min() < 4e-7
Ejemplo n.º 7
0
 def test_approximate_sparse_grid_default_options(self):
     nvars = 3
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun, univariate_variables,
                                   "sparse_grid").approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
Ejemplo n.º 8
0
 def test_approximate_polynomial_chaos_default_options(self):
     nvars = 3
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = approximate(benchmark.fun,
                          univariate_variables,
                          method='polynomial-chaos')
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
Ejemplo n.º 9
0
    def test_analyze_sensitivity_sparse_grid(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.adaptive_sparse_grid import isotropic_refinement_indicator
        benchmark = setup_benchmark("oakley")
        options = {'max_nsamples':2000}
        #'refinement_indicator':isotropic_refinement_indicator}
        res = analyze_sensitivity(
            benchmark.fun,benchmark.variable.all_variables(),"sparse-grid",
            options=options)

        #print(res.main_effects-benchmark.main_effects)
        assert np.allclose(res.main_effects,benchmark.main_effects,atol=2e-4)
Ejemplo n.º 10
0
    def test_analyze_sensitivity_polynomial_chaos(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.adaptive_sparse_grid import isotropic_refinement_indicator
        benchmark = setup_benchmark("ishigami",a=7,b=0.1)
        options = {'max_nsamples':500}
        #'refinement_indicator':isotropic_refinement_indicator}
        res = analyze_sensitivity(
            benchmark.fun,benchmark.variable.all_variables(),"polynomial-chaos",
            options=options)

        #print(res.main_effects-benchmark.main_effects)
        assert np.allclose(res.main_effects,benchmark.main_effects,atol=2e-4)
Ejemplo n.º 11
0
 def test_rosenbrock_function_gradient_and_hessian_prod(self):
     benchmark = setup_benchmark("rosenbrock", nvars=2)
     init_guess = benchmark.variable.get_statistics('mean') +\
         benchmark.variable.get_statistics('std')
     errors = pya.check_gradients(benchmark.fun,
                                  benchmark.jac,
                                  init_guess,
                                  disp=False)
     assert errors.min() < 1e-5
     errors = pya.check_hessian(benchmark.jac,
                                benchmark.hessp,
                                init_guess,
                                disp=False)
     assert errors.min() < 1e-5
Ejemplo n.º 12
0
 def test_approximate_polynomial_chaos_default_options(self):
     nvars = 3
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     # we can use different univariate variables than specified by
     # benchmark
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun,
                                   univariate_variables,
                                   method='polynomial_chaos').approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
Ejemplo n.º 13
0
    def test_approximate_neural_network(self):
        np.random.seed(2)
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()
        nqoi = 1
        maxiter = 30000
        print(benchmark.variable)
        # var_trans = pya.AffineRandomVariableTransformation(
        #      [stats.uniform(-2, 4)]*nvars)
        var_trans = pya.AffineRandomVariableTransformation(benchmark.variable)
        network_opts = {
            "activation_func": "sigmoid",
            "layers": [nvars, 75, nqoi],
            "loss_func": "squared_loss",
            "var_trans": var_trans,
            "lag_mult": 0
        }
        optimizer_opts = {
            "method": "L-BFGS-B",
            "options": {
                "maxiter": maxiter,
                "iprint": -1,
                "gtol": 1e-6
            }
        }
        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts
        }
        ntrain_samples = 500
        train_samples = pya.generate_independent_random_samples(
            var_trans.variable, ntrain_samples)
        train_samples = var_trans.map_from_canonical_space(
            np.cos(np.random.uniform(0, np.pi, (nvars, ntrain_samples))))
        train_vals = benchmark.fun(train_samples)

        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts,
            "x0": 10
        }
        approx = approximate(train_samples, train_vals, "neural_network",
                             opts).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun, var_trans.variable,
                                 nsamples)
        print(error)
        assert error < 6e-2
Ejemplo n.º 14
0
    def test_repeat_qmc_sobol_sensitivity_analysis_ishigami(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)

        nsamples = 10000
        nvars = benchmark.variable.num_vars()
        order = 3
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        sampling_method = 'sobol'
        nsobol_realizations = 5
        rep_sobol_indices, rep_total_effect_indices, rep_var, rep_mean = \
            repeat_sampling_based_sobol_indices(
                benchmark.fun, benchmark.variable, interaction_terms, nsamples,
                sampling_method, nsobol_realizations=nsobol_realizations)

        assert np.allclose(rep_mean.mean(axis=0), benchmark.mean, atol=2e-3)
        # check that there is variation in output. If not then we are not
        # generating different samlpe sets for each sobol realization
        assert rep_mean.std(axis=0) > 0

        sobol_indices = rep_sobol_indices.mean(axis=0)
        main_effects = sobol_indices[:nvars]
        assert np.allclose(main_effects, benchmark.main_effects, atol=2e-3)

        total_effect_indices = rep_total_effect_indices.mean(axis=0)
        assert np.allclose(total_effect_indices,
                           benchmark.total_effects,
                           atol=2e-3)

        for ii in range(interaction_terms.shape[1]):
            index = interaction_terms[:, ii]
            assert np.allclose(
                np.where(index > 0)[0],
                benchmark.sobol_interaction_indices[ii])

        sobol_indies = rep_sobol_indices.mean(axis=0)
        assert np.allclose(sobol_indices,
                           benchmark.sobol_indices,
                           rtol=5e-3,
                           atol=1e-3)
Ejemplo n.º 15
0
    def test_ishigami_function_gradient_and_hessian(self):
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        init_guess = benchmark.variable.get_statistics('mean') +\
            benchmark.variable.get_statistics('std')
        errors = pya.check_gradients(benchmark.fun,
                                     benchmark.jac,
                                     init_guess,
                                     disp=False)
        # print(errors.min())
        assert errors.min() < 2e-6

        def hess_matvec(x, v):
            return np.dot(benchmark.hess(x), v)

        errors = pya.check_hessian(benchmark.jac,
                                   hess_matvec,
                                   init_guess,
                                   disp=False)
        assert errors.min() < 2e-7
Ejemplo n.º 16
0
    def test_approximate_polynomial_chaos_induced(self):
        nvars = 3
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        # we can use different univariate variables than specified by
        # benchmark. In this case we use the same but setup them uphear
        # to demonstrate this functionality
        univariate_variables = [stats.uniform(0, 1)] * nvars
        # approx = adaptive_approximate(
        #     benchmark.fun, univariate_variables,
        #     method="polynomial_chaos",
        #     options={"method": "induced",
        #              "options": {"max_nsamples": 200,
        #                          "induced_sampling": True,
        #                          "cond_tol": 1e8}}).approx
        # nsamples = 100
        # error = compute_l2_error(
        #     approx, benchmark.fun, approx.variable_transformation.variable,
        #     nsamples)
        # print(error)
        # assert error < 1e-5

        # probablility sampling
        approx = adaptive_approximate(benchmark.fun,
                                      univariate_variables,
                                      method="polynomial_chaos",
                                      options={
                                          "method": "induced",
                                          "options": {
                                              "max_nsamples": 100,
                                              "induced_sampling": False,
                                              "cond_tol": 1e4,
                                              "max_level_1d": 4,
                                              "verbose": 3
                                          }
                                      }).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples)
        print(error)
        assert error < 1e-5
Ejemplo n.º 17
0
    def test_analyze_sensitivity_polynomial_chaos(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)

        num_samples = 1000
        train_samples = pya.generate_independent_random_samples(
            benchmark.variable, num_samples)
        train_vals = benchmark.fun(train_samples)

        pce = approximate(
            train_samples, train_vals, 'polynomial_chaos', {
                'basis_type': 'hyperbolic_cross',
                'variable': benchmark.variable,
                'options': {
                    'max_degree': 8
                }
            }).approx

        res = analyze_sensitivity_polynomial_chaos(pce)
        assert np.allclose(res.main_effects, benchmark.main_effects, atol=2e-3)
Ejemplo n.º 18
0
 def test_approximate_polynomial_chaos_leja(self):
     nvars = 3
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     # we can use different univariate variables than specified by
     # benchmark. In this case we use the same but setup them uphear
     # to demonstrate this functionality
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun,
                                   univariate_variables,
                                   method="polynomial_chaos",
                                   options={
                                       "method": "leja",
                                       "options": {
                                           "max_nsamples": 100
                                       }
                                   }).approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
Ejemplo n.º 19
0
 def test_approximate_polynomial_chaos_custom_poly_type(self):
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     nvars = benchmark.variable.num_vars()
     # this test purposefully select wrong variable to make sure
     # poly_type overide is activated
     univariate_variables = [stats.beta(5, 5, -np.pi, 2 * np.pi)] * nvars
     variable = pya.IndependentMultivariateRandomVariable(
         univariate_variables)
     var_trans = pya.AffineRandomVariableTransformation(variable)
     # specify correct basis so it is not chosen from var_trans.variable
     poly_opts = {"var_trans": var_trans}
     # but rather from another variable which will invoke Legendre polys
     basis_opts = pya.define_poly_options_from_variable(
         pya.IndependentMultivariateRandomVariable([stats.uniform()] *
                                                   nvars))
     poly_opts["poly_types"] = basis_opts
     options = {
         "poly_opts": poly_opts,
         "variable": variable,
         "options": {
             "max_num_step_increases": 1
         }
     }
     ntrain_samples = 400
     train_samples = np.random.uniform(-np.pi, np.pi,
                                       (nvars, ntrain_samples))
     train_vals = benchmark.fun(train_samples)
     approx = approximate(train_samples,
                          train_vals,
                          method="polynomial_chaos",
                          options=options).approx
     nsamples = 100
     error = compute_l2_error(approx,
                              benchmark.fun,
                              approx.var_trans.variable,
                              nsamples,
                              rel=True)
     # print(error)
     assert error < 1e-4
     assert np.allclose(approx.mean(), benchmark.mean, atol=error)
Ejemplo n.º 20
0
    def test_analyze_sensitivity_sparse_grid(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import adaptive_approximate
        benchmark = setup_benchmark("oakley")

        options = {'max_nsamples': 2000, 'verbose': 0}
        approx = adaptive_approximate(benchmark.fun,
                                      benchmark.variable.all_variables(),
                                      'sparse_grid', options).approx

        from pyapprox.approximate import compute_l2_error
        nsamples = 100
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples,
                                 rel=True)
        # print(error)
        assert error < 3e-3

        res = analyze_sensitivity_sparse_grid(approx)
        assert np.allclose(res.main_effects, benchmark.main_effects, atol=2e-4)
Ejemplo n.º 21
0
    def test_approximate_sparse_grid_user_options(self):
        nvars = 3
        benchmark = setup_benchmark('ishigami', a=7, b=0.1)
        univariate_variables = benchmark['variable'].all_variables()
        errors = []

        def callback(approx):
            nsamples = 1000
            error = compute_l2_error(approx, benchmark.fun,
                                     approx.variable_transformation.variable,
                                     nsamples)
            errors.append(error)

        univariate_quad_rule_info = [
            pya.clenshaw_curtis_in_polynomial_order,
            pya.clenshaw_curtis_rule_growth
        ]
        # ishigami has same value at first 3 points in clenshaw curtis rule
        # and so adaptivity will not work so use different rule
        #growth_rule=partial(pya.constant_increment_growth_rule,4)
        #univariate_quad_rule_info = [
        #    pya.get_univariate_leja_quadrature_rule(
        #        univariate_variables[0],growth_rule),growth_rule]
        refinement_indicator = partial(variance_refinement_indicator,
                                       convex_param=0.5)
        options = {
            'univariate_quad_rule_info': univariate_quad_rule_info,
            'max_nsamples': 300,
            'tol': 0,
            'verbose': False,
            'callback': callback,
            'verbose': 0,
            'refinement_indicator': refinement_indicator
        }
        approx = adaptive_approximate(benchmark.fun, univariate_variables,
                                      'sparse_grid', options)
        #print(np.min(errors))
        assert np.min(errors) < 1e-3
Ejemplo n.º 22
0
    def test_qmc_sobol_sensitivity_analysis_ishigami(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)

        nsamples = 10000
        nvars = benchmark.variable.num_vars()
        order = 3
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        sampling_method = 'sobol'
        sobol_indices, total_effect_indices, var, mean = \
            sampling_based_sobol_indices(
                benchmark.fun, benchmark.variable, interaction_terms, nsamples,
                sampling_method, qmc_start_index=100)

        assert np.allclose(mean, benchmark.mean, atol=2e-3)

        main_effects = sobol_indices[:nvars]
        assert np.allclose(main_effects, benchmark.main_effects, atol=2e-3)

        assert np.allclose(total_effect_indices,
                           benchmark.total_effects,
                           atol=2e-3)

        for ii in range(interaction_terms.shape[1]):
            index = interaction_terms[:, ii]
            assert np.allclose(
                np.where(index > 0)[0],
                benchmark.sobol_interaction_indices[ii])
        assert np.allclose(sobol_indices,
                           benchmark.sobol_indices,
                           rtol=5e-3,
                           atol=1e-3)
Ejemplo n.º 23
0
    def test_approximate_sparse_grid_user_options(self):
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        univariate_variables = benchmark["variable"].all_variables()
        errors = []

        def callback(approx):
            nsamples = 1000
            error = compute_l2_error(approx, benchmark.fun,
                                     approx.variable_transformation.variable,
                                     nsamples)
            errors.append(error)

        univariate_quad_rule_info = [
            pya.clenshaw_curtis_in_polynomial_order,
            pya.clenshaw_curtis_rule_growth, None, None
        ]
        # ishigami has same value at first 3 points in clenshaw curtis rule
        # and so adaptivity will not work so use different rule
        # growth_rule=partial(pya.constant_increment_growth_rule,4)
        # univariate_quad_rule_info = [
        #    pya.get_univariate_leja_quadrature_rule(
        #        univariate_variables[0],growth_rule),growth_rule]
        refinement_indicator = partial(pya.variance_refinement_indicator,
                                       convex_param=0.5)
        options = {
            "univariate_quad_rule_info": univariate_quad_rule_info,
            "max_nsamples": 300,
            "tol": 0,
            "callback": callback,
            "verbose": 0,
            "refinement_indicator": refinement_indicator
        }
        adaptive_approximate(benchmark.fun, univariate_variables,
                             "sparse_grid", options).approx
        # print(np.min(errors))
        assert np.min(errors) < 1e-3
Ejemplo n.º 24
0
----------------
This tutorial describes how to setup a function with random inputs. It also provides examples of how to use model wrappers to time function calls and evaluate a function at multiple samples in parallel.

We start by defining a function of two random variables. We will use the Rosenbrock becnhmark. See :func:`pyapprox.benchmarks.benchmarks.setup_rosenbrock_function`
"""
from pyapprox.models.wrappers import TimerModelWrapper, WorkTrackingModel
from pyapprox.models.wrappers import evaluate_1darray_function_on_2d_array
import os
from pyapprox.models.wrappers import PoolModel
from pyapprox.control_variate_monte_carlo import ModelEnsemble
import time
import numpy as np
from scipy import stats
import pyapprox as pya
from pyapprox.benchmarks.benchmarks import setup_benchmark
benchmark = setup_benchmark('rosenbrock', nvars=2)

#%%
#Print the attributes of the benchmark with
print(benchmark.keys())

#%%
#Any of these attributes can be accessed, e.g. the Rosenbrock function can be accessed using ``benchmark.fun`` (i.e. the attribute ``fun``).
#
#Now lets define the inputs to the function of interest. For independent random variables we use SciPy random variablest to represent each one-dimensional variables. For documentation refer to the `scipy.stats module <https://docs.scipy.org/doc/scipy/reference/stats.html>`_.
#
#We define multivariate random variables by specifying each 1D variable in a list. Here we will setup a 2D variable which is the tensor product of two independent and identically distributed uniform random variables

univariate_variables = [stats.uniform(-2, 4), stats.uniform(-2, 4)]
variable = pya.IndependentMultivariateRandomVariable(univariate_variables)
Ejemplo n.º 25
0
Lets first consider a simple example with one unknown parameter. The following sets up the problem
"""
import numpy as np
import pyapprox as pya
from scipy.stats import uniform
from pyapprox.models.wrappers import MultiLevelWrapper
import matplotlib.pyplot as plt

nmodels = 3
nrandom_vars, corr_len = 1, 1 / 2
max_eval_concurrency = 1
from pyapprox.benchmarks.benchmarks import setup_benchmark

benchmark = setup_benchmark('multi_level_advection_diffusion',
                            nvars=nrandom_vars,
                            corr_len=corr_len,
                            max_eval_concurrency=max_eval_concurrency)
model = benchmark.fun
variable = benchmark.variable

#%%
#Now lets us plot each model as a function of the random variable
lb, ub = variable.get_statistics('interval', alpha=1)[0]
nsamples = 10
random_samples = np.linspace(lb, ub, nsamples)[np.newaxis, :]
config_vars = np.arange(nmodels)[np.newaxis, :]
samples = pya.get_all_sample_combinations(random_samples, config_vars)
values = model(samples)
values = np.reshape(values, (nsamples, nmodels))

import dolfin as dl
Ejemplo n.º 26
0
   Uncertainty     Symbol    Prior
   =============== ========= =======================
   Yield stress    :math:`R` :math:`N(40000,2000)`
   Young's modulus :math:`E` :math:`N(2.9e7,1.45e6)`
   Horizontal load :math:`X` :math:`N(500,100)`
   Vertical Load   :math:`Y` :math:`N(1000,100)`
   =============== ========= =======================

First we must specify the distribution of the random variables
"""
import numpy as np
import pyapprox as pya
from pyapprox.benchmarks.benchmarks import setup_benchmark
from functools import partial
from pyapprox.optimization import *
benchmark = setup_benchmark('cantilever_beam')

from pyapprox.models.wrappers import ActiveSetVariableModel
nsamples = 10
samples = pya.generate_independent_random_samples(benchmark.variable, nsamples)
fun = ActiveSetVariableModel(
    benchmark.fun,
    benchmark.variable.num_vars() + benchmark.design_variable.num_vars(),
    samples, benchmark.design_var_indices)
jac = ActiveSetVariableModel(
    benchmark.jac,
    benchmark.variable.num_vars() + benchmark.design_variable.num_vars(),
    samples, benchmark.design_var_indices)

generate_random_samples = partial(pya.generate_independent_random_samples,
                                  benchmark.variable, 100)
    return fig, axs


def generate_random_samples(m, n):
    samples = pya.halton_sequence(m, 0, n)
    samples = samples * 2 * np.sqrt(3) - np.sqrt(3)
    return samples


from functools import partial
import matplotlib.pyplot as plt
nvars, corr_len = 2, 0.1
#model = setup_model(nvars,corr_len,max_eval_concurrency=1)
from pyapprox.benchmarks.benchmarks import setup_benchmark
benchmark = setup_benchmark('advection-diffusion',
                            nvars=nvars,
                            corr_len=corr_len,
                            max_eval_concurrency=4)
model = benchmark.fun
validation_levels = [5] * 3
data = error_vs_cost(
    model, partial(generate_random_samples, benchmark.variable.num_vars()),
    validation_levels)
plot_error_vs_cost(data, 'time')
plt.show()

#%%
#The above figure depicts the changes induced in the error of the mean of the QoI, i.e. :math:`\mathbb{E}[f_\ai]`, as the mesh and temporal discretizations are changed.  The legend labels denote the mesh discretization parameter values :math:`(\alpha_1,\alpha_2,\alpha_3)` used to solve the advection diffusion equation. Numeric values represent discretization parameters that are held fixed while the symbol :math:`\cdot` denotes that the corresponding parameter is varying. The reference solution is obtained using the model indexed by :math:`(6,6,6)`.
#
#The dashed lines represent the theoretical rates of the convergence of the deterministic error when refining :math:`h_1` (left), :math:`h_2` (middle), and :math:`\Delta t` (right).
#The error decreases quadratically with both :math:`h_1` and :math:`h_2` and linearly with :math:`\Delta t` until a saturation point is reached. These saturation points occur when the error induced by a coarse resolution in one mesh parameter dominates the others. For example the left plot shows that when refining :math:`h_1` the final error in :math:`\mathbb{E}[f]` is dictated by the error induced by using the mesh size :math:`h_2`, provided :math:`\Delta t` is small enough. Similarly the right plot shows, that for fixed :math:`h_2`, the highest accuracy that can be obtained by refining :math:`\Delta t` is dependent on the resolution of :math:`h_1`.
#
Ejemplo n.º 28
0
    def test_analytic_sobol_indices_from_gaussian_process(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()

        ntrain_samples = 500
        # train_samples = pya.generate_independent_random_samples(
        #     benchmark.variable, ntrain_samples)
        train_samples = pya.sobol_sequence(nvars,
                                           ntrain_samples,
                                           variable=benchmark.variable)

        train_vals = benchmark.fun(train_samples)
        approx = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': np.inf,
            'normalize_y': True,
            'alpha': 1e-10
        }).approx

        nsobol_samples = int(1e4)
        from pyapprox.approximate import compute_l2_error
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 benchmark.variable,
                                 nsobol_samples,
                                 rel=True)
        print(error)

        order = 2
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        result = analytic_sobol_indices_from_gaussian_process(
            approx,
            benchmark.variable,
            interaction_terms,
            ngp_realizations=1000,
            stat_functions=(np.mean, np.std),
            ninterpolation_samples=2000,
            ncandidate_samples=3000,
            use_cholesky=False,
            alpha=1e-8)

        mean_mean = result['mean']['mean']
        mean_sobol_indices = result['sobol_indices']['mean']
        mean_total_effects = result['total_effects']['mean']
        mean_main_effects = mean_sobol_indices[:nvars]

        print(result['mean']['values'][-1])
        print(result['variance']['values'][-1])
        print(benchmark.main_effects[:, 0] - mean_main_effects)
        print(benchmark.total_effects[:, 0] - mean_total_effects)
        print(benchmark.sobol_indices[:-1, 0] - mean_sobol_indices)
        assert np.allclose(mean_mean, benchmark.mean, rtol=1e-3, atol=3e-3)
        assert np.allclose(mean_main_effects,
                           benchmark.main_effects[:, 0],
                           rtol=1e-3,
                           atol=3e-3)
        assert np.allclose(mean_total_effects,
                           benchmark.total_effects[:, 0],
                           rtol=1e-3,
                           atol=3e-3)
        assert np.allclose(mean_sobol_indices,
                           benchmark.sobol_indices[:-1, 0],
                           rtol=1e-3,
                           atol=3e-3)
Ejemplo n.º 29
0
    def test_sampling_based_sobol_indices_from_gaussian_process(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()

        # nsobol_samples and ntrain_samples effect assert tolerances
        ntrain_samples = 500
        nsobol_samples = int(1e4)
        train_samples = pya.generate_independent_random_samples(
            benchmark.variable, ntrain_samples)
        # from pyapprox import CholeskySampler
        # sampler = CholeskySampler(nvars, 10000, benchmark.variable)
        # kernel = pya.Matern(
        #     np.array([1]*nvars), length_scale_bounds='fixed', nu=np.inf)
        # sampler.set_kernel(kernel)
        # train_samples = sampler(ntrain_samples)[0]

        train_vals = benchmark.fun(train_samples)
        approx = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': np.inf,
            'normalize_y': True
        }).approx

        from pyapprox.approximate import compute_l2_error
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 benchmark.variable,
                                 nsobol_samples,
                                 rel=True)
        print('error', error)
        # assert error < 4e-2

        order = 2
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        result = sampling_based_sobol_indices_from_gaussian_process(
            approx,
            benchmark.variable,
            interaction_terms,
            nsobol_samples,
            sampling_method='sobol',
            ngp_realizations=1000,
            normalize=True,
            nsobol_realizations=3,
            stat_functions=(np.mean, np.std),
            ninterpolation_samples=1000,
            ncandidate_samples=2000)

        mean_mean = result['mean']['mean']
        mean_sobol_indices = result['sobol_indices']['mean']
        mean_total_effects = result['total_effects']['mean']
        mean_main_effects = mean_sobol_indices[:nvars]

        print(benchmark.mean - mean_mean)
        print(benchmark.main_effects[:, 0] - mean_main_effects)
        print(benchmark.total_effects[:, 0] - mean_total_effects)
        print(benchmark.sobol_indices[:-1, 0] - mean_sobol_indices)
        assert np.allclose(mean_mean, benchmark.mean, atol=3e-2)
        assert np.allclose(mean_main_effects,
                           benchmark.main_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_total_effects,
                           benchmark.total_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_sobol_indices,
                           benchmark.sobol_indices[:-1, 0],
                           atol=1e-2)
        axs[ii].set_xlabel(r'$\mathrm{Work}$ $W_{\alpha}$')
        axs[0].set_ylabel(
            r'$\left| \mathbb{E}[f]-\mathbb{E}[f_{\alpha}]\right| / \left| \mathbb{E}[f]\right|$')
    return fig, axs


def generate_random_samples(m, n):
    samples = pya.halton_sequence(m, 0, n)
    samples = samples*2*np.sqrt(3)-np.sqrt(3)
    return samples


nvars, corr_len = 2, 0.1
#model = setup_model(nvars,corr_len, max_eval_concurrency=1)
benchmark = setup_benchmark(
    'multi_index_advection_diffusion', nvars=nvars, corr_len=corr_len,
    max_eval_concurrency=1)
model = benchmark.fun
validation_levels = [5]*3
data = error_vs_cost(
    model, partial(generate_random_samples, benchmark.variable.num_vars()),
    validation_levels)
plot_error_vs_cost(data, 'time')
plt.show()

#%%
#The above figure depicts the changes induced in the error of the mean of the QoI, i.e. :math:`\mathbb{E}[f_\ai]`, as the mesh and temporal discretizations are changed.  The legend labels denote the mesh discretization parameter values :math:`(\alpha_1,\alpha_2,\alpha_3)` used to solve the advection diffusion equation. Numeric values represent discretization parameters that are held fixed while the symbol :math:`\cdot` denotes that the corresponding parameter is varying. The reference solution is obtained using the model indexed by :math:`(6,6,6)`.
#
#The dashed lines represent the theoretical rates of the convergence of the deterministic error when refining :math:`h_1` (left), :math:`h_2` (middle), and :math:`\Delta t` (right).
#The error decreases quadratically with both :math:`h_1` and :math:`h_2` and linearly with :math:`\Delta t` until a saturation point is reached. These saturation points occur when the error induced by a coarse resolution in one mesh parameter dominates the others. For example the left plot shows that when refining :math:`h_1` the final error in :math:`\mathbb{E}[f]` is dictated by the error induced by using the mesh size :math:`h_2`, provided :math:`\Delta t` is small enough. Similarly the right plot shows, that for fixed :math:`h_2`, the highest accuracy that can be obtained by refining :math:`\Delta t` is dependent on the resolution of :math:`h_1`.
#