Example #1
0
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in 
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        univariate_pdf = lambda x: beta.pdf(x, a=alpha_stat, b=beta_stat)
        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        #print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
    def test_sample_based_apc_orthonormality(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        num_moment_samples = 10000
        moment_matrix_samples = generate_independent_random_samples(
            random_var_trans.variable, num_moment_samples)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_from_samples, samples=moment_matrix_samples)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)

        num_samples = 10000
        samples = generate_independent_random_samples(
            random_var_trans.variable, num_samples)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T, basis_matrix) / num_samples,
                           np.eye(basis_matrix.shape[1]),
                           atol=1e-1)
Example #3
0
    def test_analytical_moment_based_apc_orthonormality_identity(self):
        """
        Test that when the correct orthonormal basis is used and integrated
        using quadrature that the rotation matrix is the identity. Test sets 
        user domain to be different to canonical domain
        """
        num_vars = 1
        alpha_stat = 1
        beta_stat = 1
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = random_var_trans.map_from_canonical_space(
                x[np.newaxis, :])[0, :]
            return x, w

        # Test qr factorization to compute rotation matrix
        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        assert np.allclose(pce.R_inv, np.eye(pce.R_inv.shape[0]))

        # Test cholesky factorization to compute rotation matrix
        def compute_grammian_function(basis_matrix_function, indices):
            num_samples = 10 * degree
            basis_matrix = compute_moment_matrix_using_tensor_product_quadrature(
                basis_matrix_function,
                num_samples,
                num_vars,
                univariate_quadrature_rule=univariate_quadrature_rule)
            return basis_matrix.T.dot(basis_matrix)

        pce_chol = APC(compute_grammian_function=compute_grammian_function)
        pce_chol.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce_chol.set_indices(indices)

        assert np.allclose(pce_chol.R_inv, np.eye(pce_chol.R_inv.shape[0]))
Example #4
0
    def test_get_unrotated_basis_coefficients(self):
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        univariate_pdf = lambda x: beta.pdf(x, a=alpha_stat, b=beta_stat)
        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1.) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # set pce coefficients randomly
        pce.coefficients = np.random.normal(0., 1., (indices.shape[1], 1))

        unrotated_basis_coefficients = compute_coefficients_of_unrotated_basis(
            pce.coefficients, pce.R_inv)

        num_samples = 10
        samples = np.random.uniform(0., 1., (num_vars, num_samples))
        true_values = pce(samples)
        values = np.dot(pce.unrotated_basis_matrix(samples),
                        unrotated_basis_coefficients)
        assert np.allclose(values, true_values)
Example #5
0
    def test_compute_grammian_using_sparse_grid_quadrature(self):
        """
        Test compute_grammian_of_mixture_models_using_sparse_grid_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)

        from pyapprox.mixture_model import \
            get_leja_univariate_quadrature_rules_of_beta_mixture,\
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature
        from pyapprox.univariate_quadrature import leja_growth_rule
        rv_params = [[alpha_stat, beta_stat]]
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params,leja_growth_rule,None)

        mixture_univariate_growth_rules = [leja_growth_rule]
        compute_grammian_function = partial(
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature,
            mixture_univariate_quadrature_rules=
            mixture_univariate_quadrature_rules,
            mixture_univariate_growth_rules=[leja_growth_rule],
            num_vars=num_vars)

        pce = APC(compute_grammian_function=compute_grammian_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # use Gauss quadrature for true distribution to integrate APC basis
        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        #print (np.dot(basis_matrix.T*weights,basis_matrix))
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
Example #6
0
    def setup_sd_opt_problem(self, SDOptProblem):
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices

        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        nsamples = 4
        degree = 2
        samples = np.random.normal(0, 1, (1, nsamples))
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        basis_matrix = pce.basis_matrix(samples)
        probabilities = np.ones((nsamples)) / nsamples

        sd_opt_problem = SDOptProblem(basis_matrix, values[:, 0], values[:, 0],
                                      probabilities)
        return sd_opt_problem
Example #7
0
    def test_uniform_2d_user_domain(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^2                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, no return subset of points          #
        # ----------------------------------------------------- #

        # Set PCE options
        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([x[0]**2 + x[1]**2 + x[0] * x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points(1, var_trans, 'CC')
        helper_least_factorization(pts,
                                   model,
                                   var_trans,
                                   pce_opts,
                                   oli_opts,
                                   basis_generator,
                                   exact_mean=11. / 12.)
Example #8
0
    def test_identity_map_subset(self):
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        var_trans.set_identity_maps([1])

        samples = np.random.uniform(0, 1, (num_vars, 4))
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[1, :], samples[1, :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)

        univariate_variables = [
            stats.uniform(-1, 2),
            stats.beta(1, 1, -1, 2),
            stats.norm(-1, np.sqrt(4)),
            stats.uniform(),
            stats.uniform(-1, 2),
            stats.beta(2, 1, -2, 3)
        ]
        var_trans = AffineRandomVariableTransformation(univariate_variables)
        var_trans.set_identity_maps([4, 2])

        from pyapprox.probability_measure_sampling import \
            generate_independent_random_samples
        samples = generate_independent_random_samples(var_trans.variable, 10)
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[[2, 4], :], samples[[2, 4], :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)
Example #9
0
    def test_uniform_3d_user_domain(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^3                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, no return subset of points          #
        # ----------------------------------------------------- #

        # Set PCE options
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        pce_opts = {'poly_type':'jacobi','alpha_poly':0,'beta_poly':0,
                    'var_trans':var_trans}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        # define target function
        model = lambda x: np.array(
            [np.sum( x**2,axis=0 )+x[0]*x[1]+x[1]*x[2]+x[0]*x[1]*x[2]]).T

        # define points to interpolate
        pts = get_tensor_product_points( 2, var_trans, 'CC' )
        helper_least_factorization(
            pts, model, var_trans, pce_opts, oli_opts,
            basis_generator, exact_mean=13./8. )
Example #10
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
Example #11
0
    def setup(self, num_vars, alpha_stat, beta_stat):
        def univariate_weight_function(x):
            return beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x)

        def univariate_weight_function_deriv(x):
            return beta_pdf_derivative(alpha_stat, beta_stat, (x + 1) / 2) / 4

        weight_function = partial(evaluate_tensor_product_function,
                                  [univariate_weight_function] * num_vars)

        weight_function_deriv = partial(
            gradient_of_tensor_product_function,
            [univariate_weight_function] * num_vars,
            [univariate_weight_function_deriv] * num_vars)

        assert np.allclose((univariate_weight_function(0.5 + 1e-6) -
                            univariate_weight_function(0.5)) / 1e-6,
                           univariate_weight_function_deriv(0.5),
                           atol=1e-6)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-2, 1), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        return weight_function, weight_function_deriv, poly
Example #12
0
    def test_uniform_2d_subset_of_points(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^2                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, YES return subset of points         #
        # ----------------------------------------------------- #

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        pce_opts = {'poly_type':'jacobi','alpha_poly':0,'beta_poly':0,
                    'var_trans':var_trans}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}
        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        # define target function
        model = lambda x: np.asarray([x[0]**2 + x[1]**2 +  x[0]*x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points(1, var_trans, 'CC')
        helper_least_factorization(
            pts, model, var_trans, pce_opts, oli_opts, basis_generator,
            max_num_pts = 6, exact_mean=11./12. )
Example #13
0
    def test_fekete_gauss_lobatto(self):
        num_vars=1
        degree=3
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.linspace(-1.,1.,n)[np.newaxis,:]

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        precond_func = lambda matrix, samples: 0.25*np.ones(matrix.shape[0])
        samples,_ = get_fekete_samples(
            poly.basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        assert samples.shape[1]==degree+1

        # The samples should be close to the Gauss-Lobatto samples
        gauss_lobatto_samples =  np.asarray(
            [-1.0, - 0.447213595499957939281834733746,
             0.447213595499957939281834733746, 1.0 ])
        assert np.allclose(np.sort(samples),gauss_lobatto_samples,atol=1e-1)
Example #14
0
    def test_fekete_rosenblatt_interpolation(self):
        np.random.seed(2)
        degree=3

        __,__,joint_density,limits = rosenblatt_example_2d(num_samples=1)
        num_vars=len(limits)//2

        rosenblatt_opts = {'limits':limits,'num_quad_samples_1d':20}
        var_trans_1 = RosenblattTransformation(
            joint_density,num_vars,rosenblatt_opts)
        # rosenblatt maps to [0,1] but polynomials of bounded variables
        # are in [-1,1] so add second transformation for this second mapping
        var_trans_2 = define_iid_random_variable_transformation(
            uniform(),num_vars)
        var_trans = TransformationComposition([var_trans_1, var_trans_2])

        poly = PolynomialChaosExpansion()
        poly.configure({'poly_type':'jacobi','alpha_poly':0.,
                        'beta_poly':0.,'var_trans':var_trans})
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)
        
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        canonical_samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(canonical_samples)
        assert np.allclose(
            canonical_samples,var_trans.map_to_canonical_space(samples))

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        # function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis]
        # values = function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(
            canonical_samples,values,data_structures)
        poly.set_coefficients(coef)
        
        assert np.allclose(poly(samples),values)

        # compare mean computed using quadrature and mean computed using
        # first coefficient of expansion. This is not testing that mean
        # is correct because rosenblatt transformation introduces large error
        # which makes it hard to compute accurate mean from pce or quadrature
        quad_w = get_quadrature_weights_from_fekete_samples(
            canonical_samples,data_structures)
        values_at_quad_x = values[:,0]
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),poly.mean())
    def setup(self, num_vars, alpha_stat, beta_stat):

        # univariate_weight_function=lambda x: beta(alpha_stat,beta_stat).pdf(
        #    (x+1)/2)/2
        def univariate_weight_function(x):
            return beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x)

        def univariate_weight_function_deriv(x):
            return beta_pdf_derivative(alpha_stat, beta_stat, (x + 1) / 2) / 4

        weight_function = partial(evaluate_tensor_product_function,
                                  [univariate_weight_function] * num_vars)

        weight_function_deriv = partial(
            gradient_of_tensor_product_function,
            [univariate_weight_function] * num_vars,
            [univariate_weight_function_deriv] * num_vars)

        assert np.allclose((univariate_weight_function(0.5 + 1e-6) -
                            univariate_weight_function(0.5)) / 1e-6,
                           univariate_weight_function_deriv(0.5),
                           atol=1e-6)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-2, 1), num_vars)
        poly_opts = {
            'alpha_poly': beta_stat - 1,
            'beta_poly': alpha_stat - 1,
            'var_trans': var_trans,
            'poly_type': 'jacobi'
        }
        poly.configure(poly_opts)

        return weight_function, weight_function_deriv, poly
Example #16
0
    def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
            self):
        num_vars = 2
        degree = 3
        # rv_params = [[6,2],[2,6]]
        rv_params = [[1, 1]]
        leja_basename = None
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, leja_basename)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.configure(poly_opts)
        poly.set_indices(indices)

        num_mixtures = len(rv_params)
        mixture_univariate_growth_rules = [leja_growth_rule] * num_mixtures
        grammian_matrix = \
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
                poly.basis_matrix, indices,
                mixture_univariate_quadrature_rules,
                mixture_univariate_growth_rules, num_vars)

        assert (np.all(np.isfinite(grammian_matrix)))

        if num_mixtures == 1:
            II = np.where(abs(grammian_matrix) > 1e-8)
            # check only non-zero inner-products are along diagonal, i.e.
            # for integrals of indices multiplied by themselves
            assert np.allclose(II, np.tile(np.arange(indices.shape[1]),
                                           (2, 1)))
Example #17
0
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in 
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'truncation_tol': 1e-5,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = FPC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        #print np.dot(basis_matrix.T*weights,basis_matrix)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
Example #18
0
    def test_beta_2d_preconditioning(self):
        """
        Interpolate a set of points using preconditioing. First select
        all initial points then adding a subset of the remaining points.

        x in Beta(2,5)[0,1]^2
        """

        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # from scipy.special import beta as beta_fn
        # def beta_pdf(x,alpha_poly,beta_poly):
        #     values = (1.-x)**(alpha_poly) * (1.+x)**(beta_poly)
        #     values /= 2.**(beta_poly+alpha_poly+1)*beta_fn(
        #         beta_poly+1,alpha_poly+1)
        #     return values
        # univariate_pdf = partial(beta_pdf,alpha_poly=beta_stat-1,beta_poly=alpha_stat-1)

        univariate_beta_pdf = partial(beta.pdf, a=alpha_stat, b=beta_stat)

        def univariate_pdf(x):
            return univariate_beta_pdf((x + 1.) / 2.) / 2.

        preconditioning_function = partial(tensor_product_pdf,
                                           univariate_pdfs=univariate_pdf)

        # define target function
        def model(x):
            return np.asarray([(x[0]**2 - 1) + (x[1]**2 - 1) + x[0] * x[1]]).T

        # define points to interpolate
        pts = generate_independent_random_samples(var_trans.variable, 12)
        initial_pts = np.array([pts[:, 0]]).T

        helper_least_factorization(
            pts,
            model,
            var_trans,
            pce_opts,
            oli_opts,
            basis_generator,
            initial_pts=initial_pts,
            max_num_pts=12,
            preconditioning_function=preconditioning_function)
    def test_lu_leja_interpolation(self):
        num_vars = 2
        degree = 15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        def generate_candidate_samples(n): return np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        def precond_func(matrix, samples): return christoffel_weights(matrix)
        samples, data_structures = get_lu_leja_samples(
            poly.canonical_basis_matrix, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max() <= 1 and samples.min() >= 0.

        c = np.random.uniform(0., 1., num_vars)
        c *= 20/c.sum()
        w = np.zeros_like(c)
        w[0] = np.random.uniform(0., 1., 1)
        genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        values = genz_function(samples)

        # Ensure coef produce an interpolant
        coef = interpolate_lu_leja_samples(samples, values, data_structures)

        # Ignore basis functions (columns) that were not considered during the
        # incomplete LU factorization
        poly.set_indices(poly.indices[:, :num_leja_samples])
        poly.set_coefficients(coef)

        assert np.allclose(poly(samples), values)

        quad_w = get_quadrature_weights_from_lu_leja_samples(
            samples, data_structures)
        values_at_quad_x = values[:, 0]

        # will get closer if degree is increased
        # print (np.dot(values_at_quad_x,quad_w),genz_function.integrate())
        assert np.allclose(
            np.dot(values_at_quad_x, quad_w), genz_function.integrate(),
            atol=1e-4)
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
Example #21
0
    def test_uniform_2d_degenerate_initial_and_subset_points(self):
        """
        Interpolate a set of points, by first selecting all initial points
        which are degenerate then adding a subset of the remaining points.

        CHECK: Orthogonal least interpolation produces an interpolant but does
        not approximate the function exactly.

        x in U[0,1]^2
        """

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {
            'verbosity': 0,
            'assume_non_degeneracy': False,
            'enforce_all_initial_points_used': True,
            'enforce_ordering_of_initial_points': True
        }

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([
                0.5 * (3 * x[0]**2 - 1) + 0.5 * (3 * x[1]**2 - 1) + x[0] * x[1]
            ]).T

        # define points to interpolate
        pts = get_tensor_product_points(2, var_trans, 'CC')
        initial_pts = get_tensor_product_points(1, var_trans, 'CC')
        self.assertRaises(Exception,
                          helper_least_factorization,
                          pts,
                          model,
                          var_trans,
                          pce_opts,
                          oli_opts,
                          basis_generator,
                          initial_pts=initial_pts,
                          max_num_pts=12,
                          use_preconditioning=1)
Example #22
0
def bivariate_uniform_example():
        
    num_vars = 2
    var_trans = define_iid_random_variable_transformation(
        uniform(),num_vars)
    c = np.random.uniform(0.,1.,num_vars)
    c*=20/c.sum()
    w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
    model = GenzFunction( "oscillatory", num_vars,c=c,w=w)
    generate_parameter_sweeps_and_plot(
        model,{'ranges':var_trans.get_ranges()},
        "parameter-sweeps-test-dir/genz-parameter-sweeps-test.npz",
        'hypercube',num_sweeps=2,show=False)
    # png file save in test-dir do not remove dir if want to check png file
    shutil.rmtree('parameter-sweeps-test-dir/')
    plt.show()
    def test_oli_leja_interpolation(self):
        num_vars = 2
        degree = 5

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000

        # oli_leja requires candidates in user space
        def generate_candidate_samples(n): return (np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-3
        def precond_func(samples): return 1./christoffel_function(
            samples, poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)

        assert samples.max() <= 1 and samples.min() >= 0.

        # c = np.random.uniform(0., 1., num_vars)
        # c *= 20/c.sum()
        # w = np.zeros_like(c)
        # w[0] = np.random.uniform(0., 1., 1)
        # genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        # values = genz_function(samples)
        # exact_integral = genz_function.integrate()

        values = np.sum(samples**2, axis=0)[:, None]
        # exact_integral = num_vars/3

        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples, values)
        assert np.allclose(poly(samples), values)
Example #24
0
    def test_fekete_interpolation(self):
        num_vars=2
        degree=15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)


        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        # must use canonical_basis_matrix to generate basis matrix
        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(samples,values,data_structures)
        poly.set_coefficients(coef)
        assert np.allclose(poly(samples),values)

        quad_w = get_quadrature_weights_from_fekete_samples(
            samples,data_structures)
        values_at_quad_x = values[:,0]
        # increase degree if want smaller atol
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),genz_function.integrate(),
            atol=1e-4)
Example #25
0
    def test_pickle_affine_random_variable_transformation(self):
        import pickle
        import os

        num_vars = 2
        alpha_stat = 2
        beta_stat = 10
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat, 0, 1), num_vars)

        filename = 'rv_trans.pkl'
        with open(filename, 'wb') as f:
            pickle.dump(var_trans, f)

        with open(filename, 'rb') as f:
            pickle.load(f)

        os.remove(filename)
Example #26
0
    def test_solve_linear_system_method(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 2
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        poly_moments = compute_polynomial_moments_using_tensor_product_quadrature(
            pce.basis_matrix, 2 * degree, num_vars, univariate_quadrature_rule)

        R_inv = compute_rotation_from_moments_linear_system(poly_moments)

        R_inv_gs = compute_rotation_from_moments_gram_schmidt(poly_moments)
        assert np.allclose(R_inv, R_inv_gs)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)
        assert np.allclose(R_inv, apc.R_inv)
Example #27
0
    def test_oli_leja_interpolation(self):
        num_vars=2
        degree=5
        
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))
        generate_candidate_samples=lambda n: (np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        precond_func = lambda samples: 1./christoffel_function(
            samples,poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly,generate_candidate_samples,
            num_candidate_samples,num_leja_samples,
            preconditioning_function=precond_func)
        #samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples,values)
        assert np.allclose(poly(samples),values)
Example #28
0
    def test_transformation_composition_I(self):

        np.random.seed(2)
        true_samples, true_canonical_samples, joint_density, limits = \
            rosenblatt_example_2d(num_samples=10)

        #  rosenblatt_example_2d is defined on [0,1] remap to [-1,1]
        true_canonical_samples = true_canonical_samples * 2 - 1

        num_vars = 2
        opts = {'limits': limits, 'num_quad_samples_1d': 100}
        var_trans_1 = RosenblattTransformation(joint_density, num_vars, opts)
        var_trans_2 = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        var_trans = TransformationComposition([var_trans_1, var_trans_2])

        samples = var_trans.map_from_canonical_space(true_canonical_samples)
        assert np.allclose(true_samples, samples)

        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(true_canonical_samples, canonical_samples)
    def test_compute_rotation_using_moments(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 2
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        poly_moments = \
            compute_polynomial_moments_using_tensor_product_quadrature(
                pce.basis_matrix, 2*degree, num_vars,
                univariate_quadrature_rule)

        apc1 = APC(moments=poly_moments)
        apc1.configure(pce_opts)
        apc1.set_indices(indices)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        apc2 = APC(compute_moment_matrix_function)
        apc2.configure(pce_opts)
        apc2.set_indices(indices)
        assert np.allclose(apc1.R_inv, apc2.R_inv)
Example #30
0
    def test_uniform_2d_initial_and_subset_points(self):
        """
        Interpolate a set of points, by first selecting all initial points
        which are NOT degenerate then adding a subset of the remaining points.

        CHECK: Orthogonal least interpolation produces an interpolant but does
        not approximate the function exactly.

        x in U[0,1]^2
        """
        
        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        pce_opts = {'poly_type':'jacobi','alpha_poly':0,'beta_poly':0,
                    'var_trans':var_trans}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False,
                    'enforce_all_initial_points_used':False,
                    'enforce_ordering_of_initial_points':False}

        basis_generator = lambda num_vars, degree: (
            degree+1,compute_tensor_product_level_indices(num_vars,degree))

        # define target function
        model = lambda x: np.asarray(
            [0.5*(3*x[0]**2-1) + 0.5*(3*x[1]**2-1) + x[0]*x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points( 2, var_trans, 'CC' )
        initial_pts = get_tensor_product_points( 1, var_trans, 'CC' )
        helper_least_factorization(
            pts, model, var_trans, pce_opts, oli_opts,
            basis_generator,initial_pts=initial_pts,max_num_pts=12)