예제 #1
0
    def test_adaptive_approximate_gaussian_process_normalize_inputs(self):
        from sklearn.gaussian_process.kernels import Matern
        num_vars = 1
        univariate_variables = [stats.beta(5, 10, 0, 2)] * num_vars

        # Generate random function
        nu = np.inf  # 2.5
        kernel = Matern(0.1, nu=nu)
        X = np.linspace(-1, 1, 1000)[np.newaxis, :]
        alpha = np.random.normal(0, 1, X.shape[1])

        def fun(x):
            return kernel(x.T, X.T).dot(alpha)[:, np.newaxis]
            # return np.cos(2*np.pi*x.sum(axis=0)/num_vars)[:, np.newaxis]

        errors = []
        validation_samples = pya.generate_independent_random_samples(
            pya.IndependentMultivariateRandomVariable(univariate_variables),
            100)
        validation_values = fun(validation_samples)

        def callback(gp):
            gp_vals = gp(validation_samples)
            assert gp_vals.shape == validation_values.shape
            error = np.linalg.norm(gp_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
            print(error, gp.y_train_.shape[0])
            errors.append(error)

        weight_function = partial(
            pya.tensor_product_pdf,
            univariate_pdfs=[v.pdf for v in univariate_variables])

        gp = adaptive_approximate(
            fun, univariate_variables, "gaussian_process", {
                "nu": nu,
                "noise_level": None,
                "normalize_y": True,
                "alpha": 1e-10,
                "normalize_inputs": True,
                "weight_function": weight_function,
                "ncandidate_samples": 1e3,
                "callback": callback
            }).approx

        # import matplotlib.pyplot as plt
        # plt.plot(gp.X_train_.T[0, :], 0*gp.X_train_.T[0, :], "s")
        # plt.plot(gp.get_training_samples()[0, :], 0*gp.get_training_samples()[0, :], "x")
        # plt.plot(gp.sampler.candidate_samples[0, :], 0*gp.sampler.candidate_samples[0, :], "^")
        # plt.plot(validation_samples[0, :], validation_values[:, 0], "o")
        # var = univariate_variables[0]
        # lb, ub = var.interval(1)
        # xx = np.linspace(lb, ub, 101)
        # plt.plot(xx, var.pdf(xx), "r-")
        # plt.show()
        print(errors[-1])
        assert errors[-1] < 1e-7
예제 #2
0
 def test_approximate_sparse_grid_default_options(self):
     nvars = 3
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun, univariate_variables,
                                   "sparse_grid").approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
예제 #3
0
def adaptive_approximate_multi_index_sparse_grid(fun, variable, options):
    """
    A light weight wrapper for building multi-index approximations. 
    Some checks are made to ensure certain required options have been provided.
    See :func:`pyapprox.approximate.adaptive_approximate_sparse_grid` for more
    details.
    """
    assert 'config_variables_idx' in options
    assert 'config_var_trans' in options
    sparse_grid = adaptive_approximate(fun, variable, 'sparse_grid', options)
    return sparse_grid
예제 #4
0
    def test_adaptive_approximate_gaussian_process(self):
        from sklearn.gaussian_process.kernels import Matern
        num_vars = 1
        univariate_variables = [stats.uniform(-1, 2)] * num_vars

        # Generate random function
        nu = np.inf  # 2.5
        kernel = Matern(0.1, nu=nu)
        X = np.linspace(-1, 1, 1000)[np.newaxis, :]
        alpha = np.random.normal(0, 1, X.shape[1])

        def fun(x):
            return kernel(x.T, X.T).dot(alpha)[:, np.newaxis]
            # return np.cos(2*np.pi*x.sum(axis=0)/num_vars)[:, np.newaxis]

        errors = []
        validation_samples = np.random.uniform(-1, 1, (num_vars, 100))
        validation_values = fun(validation_samples)

        def callback(gp):
            gp_vals = gp(validation_samples)
            assert gp_vals.shape == validation_values.shape
            error = np.linalg.norm(gp_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
            print(error, gp.y_train_.shape[0])
            errors.append(error)

        adaptive_approximate(
            fun, univariate_variables, "gaussian_process", {
                "nu": nu,
                "noise_level": None,
                "normalize_y": True,
                "alpha": 1e-10,
                "ncandidate_samples": 1e3,
                "callback": callback
            }).approx
        assert errors[-1] < 1e-8
예제 #5
0
    def test_approximate_sparse_grid_user_options(self):
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        univariate_variables = benchmark["variable"].all_variables()
        errors = []

        def callback(approx):
            nsamples = 1000
            error = compute_l2_error(approx, benchmark.fun,
                                     approx.variable_transformation.variable,
                                     nsamples)
            errors.append(error)

        univariate_quad_rule_info = [
            pya.clenshaw_curtis_in_polynomial_order,
            pya.clenshaw_curtis_rule_growth, None, None
        ]
        # ishigami has same value at first 3 points in clenshaw curtis rule
        # and so adaptivity will not work so use different rule
        # growth_rule=partial(pya.constant_increment_growth_rule,4)
        # univariate_quad_rule_info = [
        #    pya.get_univariate_leja_quadrature_rule(
        #        univariate_variables[0],growth_rule),growth_rule]
        refinement_indicator = partial(pya.variance_refinement_indicator,
                                       convex_param=0.5)
        options = {
            "univariate_quad_rule_info": univariate_quad_rule_info,
            "max_nsamples": 300,
            "tol": 0,
            "callback": callback,
            "verbose": 0,
            "refinement_indicator": refinement_indicator
        }
        adaptive_approximate(benchmark.fun, univariate_variables,
                             "sparse_grid", options).approx
        # print(np.min(errors))
        assert np.min(errors) < 1e-3
예제 #6
0
    def test_approximate_polynomial_chaos_induced(self):
        nvars = 3
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        # we can use different univariate variables than specified by
        # benchmark. In this case we use the same but setup them uphear
        # to demonstrate this functionality
        univariate_variables = [stats.uniform(0, 1)] * nvars
        # approx = adaptive_approximate(
        #     benchmark.fun, univariate_variables,
        #     method="polynomial_chaos",
        #     options={"method": "induced",
        #              "options": {"max_nsamples": 200,
        #                          "induced_sampling": True,
        #                          "cond_tol": 1e8}}).approx
        # nsamples = 100
        # error = compute_l2_error(
        #     approx, benchmark.fun, approx.variable_transformation.variable,
        #     nsamples)
        # print(error)
        # assert error < 1e-5

        # probablility sampling
        approx = adaptive_approximate(benchmark.fun,
                                      univariate_variables,
                                      method="polynomial_chaos",
                                      options={
                                          "method": "induced",
                                          "options": {
                                              "max_nsamples": 100,
                                              "induced_sampling": False,
                                              "cond_tol": 1e4,
                                              "max_level_1d": 4,
                                              "verbose": 3
                                          }
                                      }).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples)
        print(error)
        assert error < 1e-5
예제 #7
0
 def test_approximate_polynomial_chaos_leja(self):
     nvars = 3
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     # we can use different univariate variables than specified by
     # benchmark. In this case we use the same but setup them uphear
     # to demonstrate this functionality
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun,
                                   univariate_variables,
                                   method="polynomial_chaos",
                                   options={
                                       "method": "leja",
                                       "options": {
                                           "max_nsamples": 100
                                       }
                                   }).approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
예제 #8
0
    def test_approximate_sparse_grid_discrete(self):
        def fun(samples):
            return np.cos(samples.sum(axis=0) / 20)[:, None]

        nvars = 2
        univariate_variables = [stats.binom(20, 0.5)] * nvars
        approx = adaptive_approximate(fun, univariate_variables,
                                      "sparse_grid").approx
        nsamples = 100
        error = compute_l2_error(approx, fun,
                                 approx.variable_transformation.variable,
                                 nsamples)
        assert error < 1e-12
        # check leja samples are nested. Sparse grid uses christoffel
        # leja sequence that does not change preconditioner everytime
        # lu pivot is performed, but we can still enforce nestedness
        # by specifiying initial points. This tests make sure this is done
        # correctly
        for ll in range(1, len(approx.samples_1d[0])):
            n = approx.samples_1d[0][ll - 1].shape[0]
            assert np.allclose(approx.samples_1d[0][ll][:n],
                               approx.samples_1d[0][ll - 1])
예제 #9
0
    def test_analyze_sensitivity_sparse_grid(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import adaptive_approximate
        benchmark = setup_benchmark("oakley")

        options = {'max_nsamples': 2000, 'verbose': 0}
        approx = adaptive_approximate(benchmark.fun,
                                      benchmark.variable.all_variables(),
                                      'sparse_grid', options).approx

        from pyapprox.approximate import compute_l2_error
        nsamples = 100
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples,
                                 rel=True)
        # print(error)
        assert error < 3e-3

        res = analyze_sensitivity_sparse_grid(approx)
        assert np.allclose(res.main_effects, benchmark.main_effects, atol=2e-4)