def test_approximate_polynomial_chaos_induced(self):
        nvars = 3
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        # we can use different univariate variables than specified by
        # benchmark. In this case we use the same but setup them uphear
        # to demonstrate this functionality
        univariate_variables = [stats.uniform(0, 1)] * nvars
        # approx = adaptive_approximate(
        #     benchmark.fun, univariate_variables,
        #     method="polynomial_chaos",
        #     options={"method": "induced",
        #              "options": {"max_nsamples": 200,
        #                          "induced_sampling": True,
        #                          "cond_tol": 1e8}}).approx
        # nsamples = 100
        # error = compute_l2_error(
        #     approx, benchmark.fun, approx.variable_transformation.variable,
        #     nsamples)
        # print(error)
        # assert error < 1e-5

        # probablility sampling
        approx = adaptive_approximate(benchmark.fun,
                                      univariate_variables,
                                      method="polynomial_chaos",
                                      options={
                                          "method": "induced",
                                          "options": {
                                              "max_nsamples": 100,
                                              "induced_sampling": False,
                                              "cond_tol": 1e4,
                                              "max_level_1d": 4,
                                              "verbose": 3
                                          }
                                      }).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples)
        print(error)
        assert error < 1e-5
 def test_approximate_polynomial_chaos_leja(self):
     nvars = 3
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     # we can use different univariate variables than specified by
     # benchmark. In this case we use the same but setup them uphear
     # to demonstrate this functionality
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = adaptive_approximate(benchmark.fun,
                                   univariate_variables,
                                   method="polynomial_chaos",
                                   options={
                                       "method": "leja",
                                       "options": {
                                           "max_nsamples": 100
                                       }
                                   }).approx
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
 def test_approximate_polynomial_chaos_custom_poly_type(self):
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     nvars = benchmark.variable.num_vars()
     # this test purposefully select wrong variable to make sure
     # poly_type overide is activated
     univariate_variables = [stats.beta(5, 5, -np.pi, 2 * np.pi)] * nvars
     variable = pya.IndependentMultivariateRandomVariable(
         univariate_variables)
     var_trans = pya.AffineRandomVariableTransformation(variable)
     # specify correct basis so it is not chosen from var_trans.variable
     poly_opts = {"var_trans": var_trans}
     # but rather from another variable which will invoke Legendre polys
     basis_opts = pya.define_poly_options_from_variable(
         pya.IndependentMultivariateRandomVariable([stats.uniform()] *
                                                   nvars))
     poly_opts["poly_types"] = basis_opts
     options = {
         "poly_opts": poly_opts,
         "variable": variable,
         "options": {
             "max_num_step_increases": 1
         }
     }
     ntrain_samples = 400
     train_samples = np.random.uniform(-np.pi, np.pi,
                                       (nvars, ntrain_samples))
     train_vals = benchmark.fun(train_samples)
     approx = approximate(train_samples,
                          train_vals,
                          method="polynomial_chaos",
                          options=options).approx
     nsamples = 100
     error = compute_l2_error(approx,
                              benchmark.fun,
                              approx.var_trans.variable,
                              nsamples,
                              rel=True)
     # print(error)
     assert error < 1e-4
     assert np.allclose(approx.mean(), benchmark.mean, atol=error)
    def test_approximate_sparse_grid_discrete(self):
        def fun(samples):
            return np.cos(samples.sum(axis=0) / 20)[:, None]

        nvars = 2
        univariate_variables = [stats.binom(20, 0.5)] * nvars
        approx = adaptive_approximate(fun, univariate_variables,
                                      "sparse_grid").approx
        nsamples = 100
        error = compute_l2_error(approx, fun,
                                 approx.variable_transformation.variable,
                                 nsamples)
        assert error < 1e-12
        # check leja samples are nested. Sparse grid uses christoffel
        # leja sequence that does not change preconditioner everytime
        # lu pivot is performed, but we can still enforce nestedness
        # by specifiying initial points. This tests make sure this is done
        # correctly
        for ll in range(1, len(approx.samples_1d[0])):
            n = approx.samples_1d[0][ll - 1].shape[0]
            assert np.allclose(approx.samples_1d[0][ll][:n],
                               approx.samples_1d[0][ll - 1])
示例#5
0
    def test_analyze_sensitivity_sparse_grid(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import adaptive_approximate
        benchmark = setup_benchmark("oakley")

        options = {'max_nsamples': 2000, 'verbose': 0}
        approx = adaptive_approximate(benchmark.fun,
                                      benchmark.variable.all_variables(),
                                      'sparse_grid', options).approx

        from pyapprox.approximate import compute_l2_error
        nsamples = 100
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 approx.variable_transformation.variable,
                                 nsamples,
                                 rel=True)
        # print(error)
        assert error < 3e-3

        res = analyze_sensitivity_sparse_grid(approx)
        assert np.allclose(res.main_effects, benchmark.main_effects, atol=2e-4)
 def callback(approx):
     nsamples = 1000
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     errors.append(error)
示例#7
0
    def test_analytic_sobol_indices_from_gaussian_process(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()

        ntrain_samples = 500
        # train_samples = pya.generate_independent_random_samples(
        #     benchmark.variable, ntrain_samples)
        train_samples = pya.sobol_sequence(nvars,
                                           ntrain_samples,
                                           variable=benchmark.variable)

        train_vals = benchmark.fun(train_samples)
        approx = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': np.inf,
            'normalize_y': True,
            'alpha': 1e-10
        }).approx

        nsobol_samples = int(1e4)
        from pyapprox.approximate import compute_l2_error
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 benchmark.variable,
                                 nsobol_samples,
                                 rel=True)
        print(error)

        order = 2
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        result = analytic_sobol_indices_from_gaussian_process(
            approx,
            benchmark.variable,
            interaction_terms,
            ngp_realizations=1000,
            stat_functions=(np.mean, np.std),
            ninterpolation_samples=2000,
            ncandidate_samples=3000,
            use_cholesky=False,
            alpha=1e-8)

        mean_mean = result['mean']['mean']
        mean_sobol_indices = result['sobol_indices']['mean']
        mean_total_effects = result['total_effects']['mean']
        mean_main_effects = mean_sobol_indices[:nvars]

        print(result['mean']['values'][-1])
        print(result['variance']['values'][-1])
        print(benchmark.main_effects[:, 0] - mean_main_effects)
        print(benchmark.total_effects[:, 0] - mean_total_effects)
        print(benchmark.sobol_indices[:-1, 0] - mean_sobol_indices)
        assert np.allclose(mean_mean, benchmark.mean, rtol=1e-3, atol=3e-3)
        assert np.allclose(mean_main_effects,
                           benchmark.main_effects[:, 0],
                           rtol=1e-3,
                           atol=3e-3)
        assert np.allclose(mean_total_effects,
                           benchmark.total_effects[:, 0],
                           rtol=1e-3,
                           atol=3e-3)
        assert np.allclose(mean_sobol_indices,
                           benchmark.sobol_indices[:-1, 0],
                           rtol=1e-3,
                           atol=3e-3)
示例#8
0
    def test_sampling_based_sobol_indices_from_gaussian_process(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()

        # nsobol_samples and ntrain_samples effect assert tolerances
        ntrain_samples = 500
        nsobol_samples = int(1e4)
        train_samples = pya.generate_independent_random_samples(
            benchmark.variable, ntrain_samples)
        # from pyapprox import CholeskySampler
        # sampler = CholeskySampler(nvars, 10000, benchmark.variable)
        # kernel = pya.Matern(
        #     np.array([1]*nvars), length_scale_bounds='fixed', nu=np.inf)
        # sampler.set_kernel(kernel)
        # train_samples = sampler(ntrain_samples)[0]

        train_vals = benchmark.fun(train_samples)
        approx = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': np.inf,
            'normalize_y': True
        }).approx

        from pyapprox.approximate import compute_l2_error
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 benchmark.variable,
                                 nsobol_samples,
                                 rel=True)
        print('error', error)
        # assert error < 4e-2

        order = 2
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        result = sampling_based_sobol_indices_from_gaussian_process(
            approx,
            benchmark.variable,
            interaction_terms,
            nsobol_samples,
            sampling_method='sobol',
            ngp_realizations=1000,
            normalize=True,
            nsobol_realizations=3,
            stat_functions=(np.mean, np.std),
            ninterpolation_samples=1000,
            ncandidate_samples=2000)

        mean_mean = result['mean']['mean']
        mean_sobol_indices = result['sobol_indices']['mean']
        mean_total_effects = result['total_effects']['mean']
        mean_main_effects = mean_sobol_indices[:nvars]

        print(benchmark.mean - mean_mean)
        print(benchmark.main_effects[:, 0] - mean_main_effects)
        print(benchmark.total_effects[:, 0] - mean_total_effects)
        print(benchmark.sobol_indices[:-1, 0] - mean_sobol_indices)
        assert np.allclose(mean_mean, benchmark.mean, atol=3e-2)
        assert np.allclose(mean_main_effects,
                           benchmark.main_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_total_effects,
                           benchmark.total_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_sobol_indices,
                           benchmark.sobol_indices[:-1, 0],
                           atol=1e-2)