예제 #1
0
    def test_approximate_gaussian_process(self):
        from sklearn.gaussian_process.kernels import Matern
        num_vars = 1
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        num_samples = 100
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)

        # Generate random function
        nu = np.inf  # 2.5
        kernel = Matern(0.5, nu=nu)
        X = np.linspace(-1, 1, 1000)[np.newaxis, :]
        alpha = np.random.normal(0, 1, X.shape[1])
        train_vals = kernel(train_samples.T, X.T).dot(alpha)[:, np.newaxis]

        gp = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': nu,
            'noise_level': 1e-8
        }).approx

        error = np.linalg.norm(gp(X)[:, 0] -
                               kernel(X.T, X.T).dot(alpha)) / np.sqrt(
                                   X.shape[1])
        assert error < 1e-5
예제 #2
0
 def test_approximate_polynomial_chaos_custom_poly_type(self):
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     nvars = benchmark.variable.num_vars()
     # for this test purposefully select wrong variable to make sure
     # poly_type overide is activated
     univariate_variables = [stats.beta(5, 5, -np.pi, 2 * np.pi)] * nvars
     variable = IndependentMultivariateRandomVariable(univariate_variables)
     var_trans = AffineRandomVariableTransformation(variable)
     # specify correct basis so it is not chosen from variable
     poly_opts = {'poly_type': 'legendre', 'var_trans': var_trans}
     options = {
         'poly_opts': poly_opts,
         'variable': variable,
         'options': {
             'max_num_step_increases': 1
         }
     }
     ntrain_samples = 400
     train_samples = np.random.uniform(-np.pi, np.pi,
                                       (nvars, ntrain_samples))
     train_vals = benchmark.fun(train_samples)
     approx = approximate(train_samples,
                          train_vals,
                          method='polynomial_chaos',
                          options=options).approx
     nsamples = 100
     error = compute_l2_error(approx,
                              benchmark.fun,
                              approx.var_trans.variable,
                              nsamples,
                              rel=True)
     print(error)
     assert error < 1e-4
     assert np.allclose(approx.mean(), benchmark.mean, atol=error)
예제 #3
0
    def test_approximate_sparse_grid_user_options(self):
        nvars = 3
        benchmark = setup_benchmark('ishigami', a=7, b=0.1)
        univariate_variables = [stats.uniform(0, 1)] * nvars
        errors = []

        def callback(approx):
            nsamples = 1000
            error = compute_l2_error(approx, benchmark.fun,
                                     approx.variable_transformation.variable,
                                     nsamples)
            errors.append(error)

        univariate_quad_rule_info = [
            pya.clenshaw_curtis_in_polynomial_order,
            pya.clenshaw_curtis_rule_growth
        ]
        options = {
            'univariate_quad_rule_info': univariate_quad_rule_info,
            'max_nsamples': 110,
            'tol': 0,
            'verbose': False
        }
        approx = approximate(benchmark.fun, univariate_variables,
                             'sparse-grid', callback, options)
        assert np.min(errors) < 1e-12
예제 #4
0
    def help_cross_validate_pce_degree(self, solver_type, solver_options):
        print(solver_type, solver_options)
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        # factor of 2 does not pass test but 2.2 does
        num_samples = int(poly.num_terms() * 2.2)
        coef = np.random.normal(0, 1, (poly.indices.shape[1], 2))
        coef[pya.nchoosek(num_vars + 2, 2):, 0] = 0
        # for first qoi make degree 2 the best degree
        poly.set_coefficients(coef)

        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, 'polynomial_chaos', {
                'basis_type': 'hyperbolic_cross',
                'variable': variable,
                'options': {
                    'verbose': 3,
                    'solver_type': solver_type,
                    'min_degree': 1,
                    'max_degree': degree + 1,
                    'linear_solver_options': solver_options
                }
            }).approx

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        approx_res = cross_validate_pce_degree(
            poly,
            train_samples,
            train_vals,
            1,
            degree + 1,
            solver_type=solver_type,
            linear_solver_options=solver_options)
        assert np.allclose(approx_res.degrees, [2, 3])
예제 #5
0
 def test_approximate_sparse_grid_default_options(self):
     nvars = 3
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = approximate(benchmark.fun, univariate_variables,
                          'sparse-grid')
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
예제 #6
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 0] = 0
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, 'polynomial_chaos', {
                'basis_type': 'expanding_basis',
                'variable': variable,
                'options': {
                    'max_num_expansion_steps_iter': 1,
                    'verbose': 3,
                    'max_num_terms': 1000,
                    'max_num_step_increases': 2,
                    'max_num_init_terms': 33
                }
            }).approx

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples),
                           atol=1e-8), error
예제 #7
0
 def test_approximate_polynomial_chaos_default_options(self):
     nvars = 3
     benchmark = setup_benchmark('ishigami', a=7, b=0.1)
     univariate_variables = [stats.uniform(0, 1)] * nvars
     approx = approximate(benchmark.fun,
                          univariate_variables,
                          method='polynomial-chaos')
     nsamples = 100
     error = compute_l2_error(approx, benchmark.fun,
                              approx.variable_transformation.variable,
                              nsamples)
     assert error < 1e-12
예제 #8
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        poly.set_coefficients((np.random.normal(0, 1, poly.indices.shape[1]) /
                               (degrees + 1)**2)[:, np.newaxis])
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'expanding_basis',
            'variable': variable
        })

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(
            poly(validation_samples),true_poly(validation_samples),atol=1e-8),\
            error
예제 #9
0
    def test_cross_validate_pce_degree(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        num_samples = poly.num_terms() * 2
        poly.set_coefficients(
            np.random.normal(0, 1, (poly.indices.shape[1], 1)))
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'hyperbolic_cross',
            'variable': variable
        })

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        poly, best_degree = cross_validate_pce_degree(poly, train_samples,
                                                      train_vals, 1,
                                                      degree + 2)
        assert best_degree == degree
예제 #10
0
    def test_cross_validate_approximation_after_regularization_selection(self):
        """
        This test is useful as it shows how to use cross_validate_approximation
        to produce a list of approximations on each cross validation fold
        once regularization parameters have been chosen.
        These can be used to show variance in predictions of values, sensitivity
        indices, etc.

        Ideally this could be avoided if sklearn stored the coefficients 
        and alphas for each fold and then we can just find the coefficients
        that correspond to the first time the path drops below the best_alpha
        """
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 0] = 0
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        result = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'expanding_basis',
            'variable': variable
        })

        # Even with the same folds, iterative methods such as Lars, LarsLasso
        # and OMP will not have cv_score from approximate and cross validate
        # approximation exactly the same because iterative methods interpolate
        # residuals to compute cross validation scores
        nfolds = 10
        linear_solver_options = [{
            'alpha': result.reg_params[0]
        }, {
            'alpha': result.reg_params[1]
        }]
        indices = [
            result.approx.indices[:, np.where(np.absolute(c) > 0)[0]]
            for c in result.approx.coefficients.T
        ]
        options = {
            'basis_type': 'fixed',
            'variable': variable,
            'options': {
                'linear_solver_options': linear_solver_options,
                'indices': indices
            }
        }
        approx_list, residues_list, cv_score = cross_validate_approximation(
            train_samples,
            train_vals,
            options,
            nfolds,
            'polynomial_chaos',
            random_folds='sklearn')

        assert (np.all(cv_score < 1e-14) and np.all(result.scores < 1e-14))