def test_group_like_terms(self):
        num_vars = 2; degree = 2

        # define two set of indices that have a non-empty intersection
        indices1 = compute_hyperbolic_indices(num_vars, degree, 1.0)
        indices2 = compute_hyperbolic_indices(num_vars, degree-1, 1.0)
        num_indices1 = indices1.shape[1]
        coeffs = np.arange(num_indices1+indices2.shape[1])
        indices1 = np.hstack((indices1,indices2))

        # make it so coefficients increase by 1 with lexiographical order of
        # combined indices
        indices = indices1[:,argsort_indices_leixographically(indices1)]
        coeffs, indices = group_like_terms(coeffs, indices)

        # Check that only unique indices remain
        assert indices.shape[1]==num_indices1
        #print_indices(indices,num_vars)
        true_indices = np.asarray([[0,0],[0,1],[1,0],[0,2],[1,1],[2,0]]).T
        sorted_idx = argsort_indices_leixographically(indices)
        assert np.allclose(true_indices,indices[:,sorted_idx])

        # check that the coefficients of the unique indices are the sum of
        # all original common indices
        true_coeffs = [1,5,9,6,7,8]
        assert np.allclose(coeffs[sorted_idx][:,0],true_coeffs)
    def test_multiply_pce(self):
        np.random.seed(1)
        np.set_printoptions(precision=16)
        univariate_variables = [stats.norm(), stats.uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 1, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))

        # coef1 = np.random.normal(0,1,(poly1.indices.shape[1],1))
        # coef2 = np.random.normal(0,1,(poly2.indices.shape[1],1))
        coef1 = np.arange(poly1.indices.shape[1])[:, np.newaxis]
        coef2 = np.arange(poly2.indices.shape[1])[:, np.newaxis]
        poly1.set_coefficients(coef1)
        poly2.set_coefficients(coef2)

        poly3 = poly1 * poly2
        samples = generate_independent_random_samples(variable, 10)
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))

        for order in range(4):
            poly = poly1**order
            assert np.allclose(poly(samples), poly1(samples)**order)
Example #3
0
    def test_multiply_multivariate_polynomials(self):
        num_vars = 2
        degree1 = 1
        degree2 = 2

        indices1 = compute_hyperbolic_indices(num_vars, degree1, 1.0)
        coeffs1 = np.ones((indices1.shape[1], 1), dtype=float)
        indices2 = compute_hyperbolic_indices(num_vars, degree2, 1.0)
        coeffs2 = 2.0*np.ones((indices2.shape[1], 1), dtype=float)

        indices, coeffs = multiply_multivariate_polynomials(
            indices1, coeffs1, indices2, coeffs2)

        samples = np.random.uniform(-1, 1, (num_vars, indices.shape[1]*3))
        values = monomial_basis_matrix(indices1, samples).dot(coeffs1)* \
            monomial_basis_matrix(indices2, samples).dot(coeffs2)
        
        true_indices = compute_hyperbolic_indices(
            num_vars, degree1+degree2, 1.0)
        basis_mat = monomial_basis_matrix(true_indices, samples)
        true_coeffs = np.linalg.lstsq(basis_mat, values, rcond=None)[0]
        true_indices, true_coeffs = compress_and_sort_polynomial(
            true_coeffs, true_indices)
        indices, coeffs = compress_and_sort_polynomial(coeffs, indices)
        assert np.allclose(true_indices, indices)
        assert np.allclose(true_coeffs, coeffs)
Example #4
0
    def test_compute_downward_closed_indices(self):
        num_vars, degree = [2, 5]
        downward_closed_indices = compute_downward_closed_indices(
            num_vars, partial(total_degree_admissibility_criteria, degree))
        total_degree_indices = compute_hyperbolic_indices(num_vars, degree, 1)
        assert np.allclose(
            sort_indices_lexiographically(total_degree_indices),
            sort_indices_lexiographically(downward_closed_indices))

        num_vars, degree = [5, 5]
        downward_closed_indices = compute_downward_closed_indices(
            num_vars, partial(pnorm_admissibility_criteria, degree, 0.4))
        pnorm_indices = compute_hyperbolic_indices(num_vars, degree, 0.4)
        assert np.allclose(
            sort_indices_lexiographically(pnorm_indices),
            sort_indices_lexiographically(downward_closed_indices))

        num_vars, degree = [2, 5]
        anisotropic_weights = np.asarray([1, 2])
        min_weight = np.asarray(anisotropic_weights).min()
        admissibility_criteria = partial(anisotropic_admissibility_criteria,
                                         anisotropic_weights, min_weight,
                                         degree)
        downward_closed_indices = compute_downward_closed_indices(
            num_vars, admissibility_criteria)
        anisotropic_indices = compute_anisotropic_indices(
            num_vars, degree, anisotropic_weights)
        assert np.allclose(
            sort_indices_lexiographically(anisotropic_indices),
            sort_indices_lexiographically(downward_closed_indices))
    def test_multiply_multivariate_orthonormal_polynomial_expansions(self):
        univariate_variables = [stats.norm(), stats.uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        degree1, degree2 = 3, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        max_degrees1 = poly1.indices.max(axis=1)
        max_degrees2 = poly2.indices.max(axis=1)
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        indices, coefs = \
            multiply_multivariate_orthonormal_polynomial_expansions(
                product_coefs_1d, poly1.get_indices(),
                poly1.get_coefficients(), poly2.get_indices(),
                poly2.get_coefficients())

        poly3 = get_polynomial_from_variable(variable)
        poly3.set_indices(indices)
        poly3.set_coefficients(coefs)

        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))
Example #6
0
    def test_analytical_moment_based_apc_orthonormality_identity(self):
        """
        Test that when the correct orthonormal basis is used and integrated
        using quadrature that the rotation matrix is the identity. Test sets 
        user domain to be different to canonical domain
        """
        num_vars = 1
        alpha_stat = 1
        beta_stat = 1
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = random_var_trans.map_from_canonical_space(
                x[np.newaxis, :])[0, :]
            return x, w

        # Test qr factorization to compute rotation matrix
        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        assert np.allclose(pce.R_inv, np.eye(pce.R_inv.shape[0]))

        # Test cholesky factorization to compute rotation matrix
        def compute_grammian_function(basis_matrix_function, indices):
            num_samples = 10 * degree
            basis_matrix = compute_moment_matrix_using_tensor_product_quadrature(
                basis_matrix_function,
                num_samples,
                num_vars,
                univariate_quadrature_rule=univariate_quadrature_rule)
            return basis_matrix.T.dot(basis_matrix)

        pce_chol = APC(compute_grammian_function=compute_grammian_function)
        pce_chol.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce_chol.set_indices(indices)

        assert np.allclose(pce_chol.R_inv, np.eye(pce_chol.R_inv.shape[0]))
Example #7
0
    def test_moments_of_active_subspace(self):
        num_vars = 3
        num_active_vars = 2
        degree = 2
        A = np.random.normal(0, 1, (num_vars, num_vars))
        Q, R = np.linalg.qr(A)
        W1 = Q[:, :num_active_vars]

        as_poly_indices = compute_hyperbolic_indices(num_active_vars, degree,
                                                     1.0)
        moments = moments_of_active_subspace(W1.T, as_poly_indices,
                                             monomial_mean_uniform_variables)

        sorted_idx = argsort_indices_leixographically(as_poly_indices)
        # (ax+by+cz)^2=a^2 x^2 + 2 a b x y + 2 a c x z + b^2 y^2 + 2 b c y z + c^2 z^2
        # int (ax+by+cz)^2*1/2dx = a^2*1/3 + b^2*1/3 + c^2*1/3
        true_moments = [1, 0, 0,
                        # notice how if W1 has colums with unit norm np.sum(W1[:,1]**2) will always be one.
                        np.sum(W1[:, 1]**2)*1./3.,
                        1./3.*(W1[0, 0]*W1[0, 1]+W1[1, 0] * \
                               W1[1, 1]+W1[2, 0]*W1[2, 1]),
                        np.sum(W1[:, 0]**2)*1./3.]
        assert np.allclose(moments[sorted_idx], true_moments)

        num_vars = 3
        num_active_vars = 2
        degree = 4
        A = np.random.normal(0, 1, (num_vars, num_vars))
        Q, R = np.linalg.qr(A)
        W1 = Q[:, :num_active_vars]
        W1[:, 0] = [1, 2, 3]
        W1[:, 1] = [4, 5, 6]
        as_poly_indices = compute_hyperbolic_indices(num_active_vars, degree,
                                                     1.0)
        moments = moments_of_active_subspace(W1.T, as_poly_indices,
                                             monomial_mean_uniform_variables)
        sorted_idx = argsort_indices_leixographically(as_poly_indices)
        a, b, c = W1[:, 0]
        d, e, f = W1[:, 1]
        dummy = np.inf  # yet to analytically compute true moments for these indices
        true_moments = np.array([
            1, 0, 0,
            np.sum(W1[:, 1]**2) * 1. / 3., 1. / 3. * (c * f + b * e + a * d),
            np.sum(W1[:, 0]**2) * 1. / 3., 0., 0., 0., 0.,
            (3 * d**4 + 3 * e**4 + 10 * e**2 * f**2 + 3 * f**4 + 10 * d**2 *
             (e**2 + f**2)) / 15., dummy, dummy, dummy,
            (3 * a**4 + 3 * b**4 + 10 * b**2 * c**2 + 3 * c**4 + 10 * a**2 *
             (b**2 + c**2)) / 15.
        ])
        moments = moments[sorted_idx]
        # ignore dummy values until I compute them analytically
        I = np.where(true_moments != np.Inf)[0]
        assert np.allclose(moments[I], true_moments[I])
Example #8
0
    def test_set_difference_2d_array(self):
        num_vars = 2
        level1 = 1
        p = 1.0
        indices1 = compute_hyperbolic_indices(num_vars, level1, p)

        level2 = 2
        indices2 = compute_hyperbolic_indices(num_vars, level2, p)

        indices = set_difference(indices1, indices2)

        true_indices = np.asarray([[2, 0], [0, 2], [1, 1]]).T
        assert np.allclose(indices, true_indices)
Example #9
0
 def test_monomial_variance_uniform_variables(self):
     num_vars = 2
     degree = 1
     indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
     coeffs = np.ones((indices.shape[1]))
     squared_indices = compute_hyperbolic_indices(num_vars, 2 * degree, 1.0)
     squared_coeffs = np.ones((squared_indices.shape[1]))
     true_variance = monomial_mean_uniform_variables(
         squared_indices, squared_coeffs) - monomial_mean_uniform_variables(
             indices, coeffs)**2
     assert np.allclose(
         monomial_variance_uniform_variables(indices, coeffs),
         true_variance)
Example #10
0
    def test_add_polynomials(self):
        num_vars = 2
        degree = 2

        # define two set of indices that have a non-empty intersection
        indices1 = compute_hyperbolic_indices(num_vars, degree, 1.0)
        indices1 = indices1[:, argsort_indices_leixographically(indices1)]
        coeffs1 = np.arange(indices1.shape[1])[:, np.newaxis]
        indices2 = compute_hyperbolic_indices(num_vars, degree-1, 1.0)
        indices2 = indices2[:, argsort_indices_leixographically(indices2)]
        coeffs2 = np.arange(indices2.shape[1])[:, np.newaxis]

        indices, coeffs = add_polynomials(
            [indices2, indices1], [coeffs2, coeffs1])

        # check that the coefficients of the new polynomial are the union
        # of the original polynomials
        true_indices = np.asarray(
            [[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0]]).T
        sorted_idx = argsort_indices_leixographically(indices)
        assert np.allclose(true_indices, indices[:, sorted_idx])

        # check that the coefficients of the new polynomials are the sum of
        # all original polynomials
        true_coeffs = np.asarray([[0, 2, 4, 3, 4, 5]]).T
        assert np.allclose(coeffs[sorted_idx], true_coeffs)

        num_vars = 2
        degree = 2

        # define two set of indices that have a non-empty intersection
        indices3 = compute_hyperbolic_indices(num_vars, degree+1, 1.0)
        indices3 = indices3[:, argsort_indices_leixographically(indices3)]
        coeffs3 = np.arange(indices3.shape[1])[:, np.newaxis]

        indices, coeffs = add_polynomials(
            [indices2, indices1, indices3], [coeffs2, coeffs1, coeffs3])

        # check that the coefficients of the new polynomial are the union
        # of the original polynomials
        true_indices = np.asarray(
            [[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0], [0, 3], [1, 2], [2, 1], [3, 0]]).T
        sorted_idx = argsort_indices_leixographically(indices)
        assert np.allclose(true_indices, indices[:, sorted_idx])

        # check that the coefficients of the new polynomials are the sum of
        # all original polynomials
        true_coeffs = np.asarray([[0, 3, 6, 6, 8, 10, 6, 7, 8, 9]]).T
        assert np.allclose(coeffs[sorted_idx], true_coeffs)
Example #11
0
    def setup_sd_opt_problem(self, SDOptProblem):
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices

        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        nsamples = 4
        degree = 2
        samples = np.random.normal(0, 1, (1, nsamples))
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        basis_matrix = pce.basis_matrix(samples)
        probabilities = np.ones((nsamples)) / nsamples

        sd_opt_problem = SDOptProblem(basis_matrix, values[:, 0], values[:, 0],
                                      probabilities)
        return sd_opt_problem
Example #12
0
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in 
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        univariate_pdf = lambda x: beta.pdf(x, a=alpha_stat, b=beta_stat)
        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        #print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
Example #13
0
    def test_leja_objective_2d(self):
        num_vars = 2
        alpha_stat, beta_stat = [2, 2]
        #alpha_stat,beta_stat = [1,1]

        weight_function, weight_function_deriv, poly = self.setup(
            num_vars, alpha_stat, beta_stat)

        leja_sequence = np.array([[-1.0, -1.0], [1.0, 1.0]]).T
        degree = 1
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        # sort lexographically to make testing easier
        I = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, I]
        poly.set_indices(indices[:, :2])
        new_indices = indices[:, 2:3]

        coeffs = compute_coefficients_of_leja_interpolant(
            leja_sequence, poly, new_indices, weight_function)

        sample = np.asarray([0.5, -0.5])[:, np.newaxis]
        func = partial(leja_objective,
                       leja_sequence=leja_sequence,
                       poly=poly,
                       new_indices=new_indices,
                       coeff=coeffs,
                       weight_function=weight_function,
                       weight_function_deriv=weight_function_deriv)
        fd_eps = 1e-7
        fd_deriv = compute_finite_difference_derivative(func,
                                                        sample,
                                                        fd_eps=fd_eps)

        residual, jacobian = leja_objective_and_gradient(sample,
                                                         leja_sequence,
                                                         poly,
                                                         new_indices,
                                                         coeffs,
                                                         weight_function,
                                                         weight_function_deriv,
                                                         deriv_order=1)

        grad = np.dot(jacobian.T, residual)
        assert np.allclose(fd_deriv, grad, atol=fd_eps * 100)

        num_samples = 20
        samples = np.linspace(-1, 1, num_samples)
        samples = cartesian_product([samples] * num_vars)
        objective_vals = func(samples)
        f, ax = plt.subplots(1, 1, figsize=(8, 6))
        X = samples[0, :].reshape(num_samples, num_samples)
        Y = samples[1, :].reshape(num_samples, num_samples)
        Z = objective_vals.reshape(num_samples, num_samples)
        cset = ax.contourf(X,
                           Y,
                           Z,
                           levels=np.linspace(Z.min(), Z.max(), 30),
                           cmap=None)
        plt.colorbar(cset)
        plt.plot(leja_sequence[0, :], leja_sequence[1, :], 'ko', ms=20)
    def test_sample_based_apc_orthonormality(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        num_moment_samples = 10000
        moment_matrix_samples = generate_independent_random_samples(
            random_var_trans.variable, num_moment_samples)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_from_samples, samples=moment_matrix_samples)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)

        num_samples = 10000
        samples = generate_independent_random_samples(
            random_var_trans.variable, num_samples)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T, basis_matrix) / num_samples,
                           np.eye(basis_matrix.shape[1]),
                           atol=1e-1)
Example #15
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
Example #16
0
    def test_substitute_polynomial_for_variables_in_single_basis_term(self):
        """
        Substitute 
          y1 = (1+x1+x2+x1*x2)
          y2 = (2+2*x1+2*x1*x3)
        into 
          y3 = y1*x4**3*y2    (test1)

        Global ordering of variables in y3
        [y1,x4,y2] = [x1,x2,x4,x1,x3]
        Only x4ant unique variables so reduce to
        [x1,x2,x4,x3]
        """
        def y1(x):
            x1, x2 = x[:2, :]
            return 1+x1+x2+x1*x2

        def y2(x):
            x1, x3 = x[[0, 2], :]
            return 2+2*x1+2*x1*x3

        def y3(x):
            x4 = x[3, :]
            y1, y2 = x[4:, :]
            return y1**2*x4**3*y2
        
        global_var_idx = [np.array([0, 1]), np.array([0, 2])]
        indices_in = [np.array([[0, 0], [1, 0], [0, 1], [1, 1]]).T,
                      np.array([[0, 0], [1, 0], [1, 1]]).T]
        coeffs_in = [np.ones((indices_in[0].shape[1], 1)),
                     2*np.ones((indices_in[1].shape[1], 1))]

        basis_index = np.array([[2, 3, 1]]).T
        basis_coeff = np.array([[1]])
        var_indices = np.array([0, 2])
        new_indices, new_coeffs = \
            substitute_polynomials_for_variables_in_single_basis_term(
                indices_in, coeffs_in, basis_index, basis_coeff, var_indices,
                global_var_idx)
        assert new_coeffs.shape[0] == new_indices.shape[1]
        assert new_indices.shape[1] == 21

        nvars = 4
        degree = 10 # degree needs to be high enough to be able to exactly
        # represent y3 which is the composition of lower degree polynomimals
        true_indices = compute_hyperbolic_indices(nvars, degree, 1)
        nsamples = true_indices.shape[1]*3
        samples = np.random.uniform(-1, 1, (nvars, nsamples))
        values1 = y1(samples)
        values2 = y2(samples)
        values = y3(np.vstack([samples, values1[None, :], values2[None, :]]))
        basis_mat = monomial_basis_matrix(true_indices, samples)
        true_coef = np.linalg.lstsq(basis_mat, values[:, None], rcond=None)[0]
        true_indices, true_coef = compress_and_sort_polynomial(
            true_coef, true_indices)
        new_indices, new_coeffs = compress_and_sort_polynomial(
            new_coeffs, new_indices)
        assert np.allclose(new_indices, true_indices)
        # print((true_coef, new_coeffs))
        assert np.allclose(true_coef, new_coeffs)
Example #17
0
    def test_fekete_rosenblatt_interpolation(self):
        np.random.seed(2)
        degree=3

        __,__,joint_density,limits = rosenblatt_example_2d(num_samples=1)
        num_vars=len(limits)//2

        rosenblatt_opts = {'limits':limits,'num_quad_samples_1d':20}
        var_trans_1 = RosenblattTransformation(
            joint_density,num_vars,rosenblatt_opts)
        # rosenblatt maps to [0,1] but polynomials of bounded variables
        # are in [-1,1] so add second transformation for this second mapping
        var_trans_2 = define_iid_random_variable_transformation(
            uniform(),num_vars)
        var_trans = TransformationComposition([var_trans_1, var_trans_2])

        poly = PolynomialChaosExpansion()
        poly.configure({'poly_type':'jacobi','alpha_poly':0.,
                        'beta_poly':0.,'var_trans':var_trans})
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)
        
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        canonical_samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(canonical_samples)
        assert np.allclose(
            canonical_samples,var_trans.map_to_canonical_space(samples))

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        # function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis]
        # values = function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(
            canonical_samples,values,data_structures)
        poly.set_coefficients(coef)
        
        assert np.allclose(poly(samples),values)

        # compare mean computed using quadrature and mean computed using
        # first coefficient of expansion. This is not testing that mean
        # is correct because rosenblatt transformation introduces large error
        # which makes it hard to compute accurate mean from pce or quadrature
        quad_w = get_quadrature_weights_from_fekete_samples(
            canonical_samples,data_structures)
        values_at_quad_x = values[:,0]
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),poly.mean())
Example #18
0
    def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
            self):
        num_vars = 2
        degree = 3
        # rv_params = [[6,2],[2,6]]
        rv_params = [[1, 1]]
        leja_basename = None
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, leja_basename)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.configure(poly_opts)
        poly.set_indices(indices)

        num_mixtures = len(rv_params)
        mixture_univariate_growth_rules = [leja_growth_rule] * num_mixtures
        grammian_matrix = \
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
                poly.basis_matrix, indices,
                mixture_univariate_quadrature_rules,
                mixture_univariate_growth_rules, num_vars)

        assert (np.all(np.isfinite(grammian_matrix)))

        if num_mixtures == 1:
            II = np.where(abs(grammian_matrix) > 1e-8)
            # check only non-zero inner-products are along diagonal, i.e.
            # for integrals of indices multiplied by themselves
            assert np.allclose(II, np.tile(np.arange(indices.shape[1]),
                                           (2, 1)))
Example #19
0
    def test_fekete_gauss_lobatto(self):
        num_vars=1
        degree=3
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.linspace(-1.,1.,n)[np.newaxis,:]

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        precond_func = lambda matrix, samples: 0.25*np.ones(matrix.shape[0])
        samples,_ = get_fekete_samples(
            poly.basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        assert samples.shape[1]==degree+1

        # The samples should be close to the Gauss-Lobatto samples
        gauss_lobatto_samples =  np.asarray(
            [-1.0, - 0.447213595499957939281834733746,
             0.447213595499957939281834733746, 1.0 ])
        assert np.allclose(np.sort(samples),gauss_lobatto_samples,atol=1e-1)
    def test_pce_jacobian(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [
            stats.beta(alpha_stat, beta_stat, 0, 1),
            stats.norm(-1, 2)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        sample = generate_independent_random_samples(variable, 1)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)

        jac = poly.jacobian(sample)
        fd_jac = approx_jacobian(lambda x: poly(x[:, np.newaxis])[0, :],
                                 sample[:, 0])
        assert np.allclose(jac, fd_jac)
Example #21
0
    def test_get_unrotated_basis_coefficients(self):
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat), num_vars)

        univariate_pdf = lambda x: beta.pdf(x, a=alpha_stat, b=beta_stat)
        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1.) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # set pce coefficients randomly
        pce.coefficients = np.random.normal(0., 1., (indices.shape[1], 1))

        unrotated_basis_coefficients = compute_coefficients_of_unrotated_basis(
            pce.coefficients, pce.R_inv)

        num_samples = 10
        samples = np.random.uniform(0., 1., (num_vars, num_samples))
        true_values = pce(samples)
        values = np.dot(pce.unrotated_basis_matrix(samples),
                        unrotated_basis_coefficients)
        assert np.allclose(values, true_values)
Example #22
0
    def test_compute_grammian_using_sparse_grid_quadrature(self):
        """
        Test compute_grammian_of_mixture_models_using_sparse_grid_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = {
            'alpha_poly': 0,
            'beta_poly': 0,
            'var_trans': pce_var_trans,
            'poly_type': 'jacobi'
        }

        random_var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)

        from pyapprox.mixture_model import \
            get_leja_univariate_quadrature_rules_of_beta_mixture,\
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature
        from pyapprox.univariate_quadrature import leja_growth_rule
        rv_params = [[alpha_stat, beta_stat]]
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params,leja_growth_rule,None)

        mixture_univariate_growth_rules = [leja_growth_rule]
        compute_grammian_function = partial(
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature,
            mixture_univariate_quadrature_rules=
            mixture_univariate_quadrature_rules,
            mixture_univariate_growth_rules=[leja_growth_rule],
            num_vars=num_vars)

        pce = APC(compute_grammian_function=compute_grammian_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # use Gauss quadrature for true distribution to integrate APC basis
        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        #print (np.dot(basis_matrix.T*weights,basis_matrix))
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
Example #23
0
    def test_coeffs_of_active_subspace_polynomial(self):
        num_vars = 3
        num_active_vars = 2
        degree = 4
        A = np.random.normal(0, 1, (num_vars, num_vars))
        Q, R = np.linalg.qr(A)
        W1 = Q[:, :num_active_vars]
        W1 = np.array([[1, 2, 3], [4, 5, 6]]).T
        as_poly_indices = compute_hyperbolic_indices(
            num_active_vars, degree, 1.0)
        sorted_as_poly_idx = argsort_indices_leixographically(as_poly_indices)

        # (dx+ey+fz)^2
        coeffs, indices = coeffs_of_active_subspace_polynomial(
            W1.T, as_poly_indices[:, sorted_as_poly_idx[3]])
        sorted_idx = argsort_indices_leixographically(indices)
        true_coeffs = np.array([W1[2, 1]**2, 2*W1[1, 1]*W1[2, 1], W1[1, 1]**2,
                                2*W1[0, 1]*W1[2, 1], 2*W1[0, 1]*W1[1, 1],
                                W1[0, 1]**2])
        assert np.allclose(true_coeffs, coeffs[sorted_idx])

        # (ax+by+cz)*(dx+ey+fz)=
        # a d x^2+a e x y+a f x z+b d x y+b e y^2+b f y z+c d x z+c e y z + c f z^2=
        # cfz^2 + (ce+bf)yz + bey^2 + (af+cd)xz + (ae+bd)xy + adx^2
        coeffs, indices = coeffs_of_active_subspace_polynomial(
            W1.T, as_poly_indices[:, sorted_as_poly_idx[4]])
        indices, coeffs = group_like_terms(coeffs, indices)
        sorted_idx = argsort_indices_leixographically(indices)
        a, b, c = W1[:, 0]
        d, e, f = W1[:, 1]
        true_coeffs = np.array([c*f, c*e+b*f, b*e, a*f+c*d, a*e+b*d, a*d])
        assert np.allclose(true_coeffs, coeffs[sorted_idx].squeeze())

        # (ax+by+cz)^4
        # a^4 x^4 + 4 a^3 c x^3 z + 4 b a^3 x^3 y + 6 a^2 c^2 x^2 z^2 + 12 b a^2 c x^2 y z + 6 b^2 a^2 x^2 y^2 + 4 a c^3 x z^3 + 12 b a c^2 x y z^2 + 12 b^2 a c x y^2 z + 4 b^3 a x y^3 + c^4 z^4 + 4 b c^3 y z^3 + 6 b^2 c^2 y^2 z^2 + 4 b^3 c y^3 z + b^4 y^4
        coeffs, indices = coeffs_of_active_subspace_polynomial(
            W1.T, as_poly_indices[:, sorted_as_poly_idx[14]])
        sorted_idx = argsort_indices_leixographically(indices)
        #print_sorted_indices(indices, num_vars, sorted_idx)
        true_coeffs = np.array(
            [c**4, 4.*b*c**3, 6.*b**2*c**2, 4.*b**3*c, b**4, 4*a*c**3,
             12.*b*a*c**2, 12.*b**2*a*c, 4.*b**3*a, 6.*a**2*c**2,
             12.*b*a**2*c, 6*b**2*a**2, 4*a**3*c, 4*b*a**3, a**4])
        assert np.allclose(true_coeffs, coeffs[sorted_idx])

        # (dx+ey+fz)^4
        # d^4 x^4 + 4 d^3 f x^3 z + 4 e d^3 x^3 y + 6 d^2 f^2 x^2 z^2 + 12 e d^2 f x^2 y z + 6 e^2 d^2 x^2 y^2 + 4 d f^3 x z^3 + 12 e d f^2 x y z^2 + 12 e^2 d f x y^2 z + 4 e^3 d x y^3 + f^4 z^4 + 4 e f^3 y z^3 + 6 e^2 f^2 y^2 z^2 + 4 e^3 f y^3 z + e^4 y^4
        coeffs, indices = coeffs_of_active_subspace_polynomial(
            W1.T, as_poly_indices[:, sorted_as_poly_idx[10]])
        sorted_idx = argsort_indices_leixographically(indices)
        true_coeffs = np.array(
            [f**4, 4.*e*f**3, 6.*e**2*f**2, 4.*e**3*f, e**4, 4*d*f**3,
             12.*e*d*f**2, 12.*e**2*d*f, 4.*e**3*d, 6.*d**2*f**2,
             12.*e*d**2*f, 6*e**2*d**2, 4*d**3*f, 4*e*d**3, d**4])
        assert np.allclose(true_coeffs, coeffs[sorted_idx])
    def test_lu_leja_interpolation(self):
        num_vars = 2
        degree = 15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        def generate_candidate_samples(n): return np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        def precond_func(matrix, samples): return christoffel_weights(matrix)
        samples, data_structures = get_lu_leja_samples(
            poly.canonical_basis_matrix, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max() <= 1 and samples.min() >= 0.

        c = np.random.uniform(0., 1., num_vars)
        c *= 20/c.sum()
        w = np.zeros_like(c)
        w[0] = np.random.uniform(0., 1., 1)
        genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        values = genz_function(samples)

        # Ensure coef produce an interpolant
        coef = interpolate_lu_leja_samples(samples, values, data_structures)

        # Ignore basis functions (columns) that were not considered during the
        # incomplete LU factorization
        poly.set_indices(poly.indices[:, :num_leja_samples])
        poly.set_coefficients(coef)

        assert np.allclose(poly(samples), values)

        quad_w = get_quadrature_weights_from_lu_leja_samples(
            samples, data_structures)
        values_at_quad_x = values[:, 0]

        # will get closer if degree is increased
        # print (np.dot(values_at_quad_x,quad_w),genz_function.integrate())
        assert np.allclose(
            np.dot(values_at_quad_x, quad_w), genz_function.integrate(),
            atol=1e-4)
Example #25
0
    def test_multiply_multivariate_polynomials(self):
        num_vars = 2
        degree1 = 1
        degree2 = 2

        indices1 = compute_hyperbolic_indices(num_vars, degree1, 1.0)
        coeffs1 = np.ones((indices1.shape[1]), dtype=float)
        indices2 = compute_hyperbolic_indices(num_vars, degree2, 1.0)
        coeffs2 = 2.0 * np.ones((indices2.shape[1]), dtype=float)

        indices, coeffs = multiply_multivariate_polynomials(
            indices1, coeffs1, indices2, coeffs2)
        indices = indices[:, argsort_indices_leixographically(indices)]

        true_indices = compute_hyperbolic_indices(num_vars, degree1 + degree2,
                                                  1.0)
        true_indices = \
            true_indices[:,argsort_indices_leixographically(true_indices)]
        assert np.allclose(true_indices, indices)
        true_coeffs = np.array([2, 4, 4, 4, 4, 6, 2, 4, 4, 2])
        assert np.allclose(true_coeffs, coeffs)
Example #26
0
    def test_argsort_indices_leixographically(self):
        num_vars = 2
        degree = 2
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        sorted_idx = argsort_indices_leixographically(indices)

        sorted_indices = indices[:, sorted_idx]
        true_sorted_indices = np.array([[0, 0], [0, 1], [1, 0], [0, 2], [1, 1],
                                        [2, 0]]).T
        assert np.allclose(sorted_indices, true_sorted_indices)

        num_vars = 3
        degree = 2
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        sorted_idx = argsort_indices_leixographically(indices)

        sorted_indices = indices[:, sorted_idx]
        true_sorted_indices = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0],
                                        [1, 0, 0], [0, 0, 2], [0, 1, 1],
                                        [0, 2, 0], [1, 0, 1], [1, 1, 0],
                                        [2, 0, 0]]).T
        assert np.allclose(sorted_indices, true_sorted_indices)
Example #27
0
    def test_substitute_polynomials_for_variables_in_another_polynomial_II(self):
        global_var_idx = [np.array([0, 1, 2, 3, 4, 5]), np.array([0, 1, 2])]
        indices_in = [
            compute_hyperbolic_indices(global_var_idx[0].shape[0], 2, 1),
            compute_hyperbolic_indices(global_var_idx[1].shape[0], 3, 1)]
        coeffs_in = [np.ones((indices_in[0].shape[1], 1)),
                     2*np.ones((indices_in[1].shape[1], 1))]
        var_idx = np.array([0, 1]) # must be related to how variables
        # enter indices below
        indices = compute_hyperbolic_indices(3, 5, 1)
        coeffs = np.ones((indices.shape[1], 1))
        new_indices, new_coef = \
            substitute_polynomials_for_variables_in_another_polynomial(
                indices_in, coeffs_in, indices, coeffs, var_idx, global_var_idx)

        nsamples = 100
        nvars = np.unique(np.concatenate(global_var_idx)).shape[0] + (
            indices.shape[0] - var_idx.shape[0])
        samples = np.random.uniform(-1, 1, (nvars, nsamples))
        validation_samples = np.random.uniform(-1, 1, (nvars, 1000))
        validation_values1 =  monomial_basis_matrix(
            indices_in[0], validation_samples[global_var_idx[0], :]).dot(
                coeffs_in[0])
        validation_values2 = monomial_basis_matrix(
            indices_in[1], validation_samples[global_var_idx[1], :]).dot(
                coeffs_in[1])
        # inputs to polynomial which are not themselves polynomials
        other_global_var_idx = np.setdiff1d(
            np.arange(nvars), np.unique(np.concatenate(global_var_idx)))
        print(other_global_var_idx)
        validation_values = np.dot(
            monomial_basis_matrix(
                indices,
                np.vstack([validation_values1.T, validation_values2.T,
                           validation_samples[other_global_var_idx, :], ])),
            coeffs)
        assert np.allclose(validation_values, monomial_basis_matrix(
            new_indices, validation_samples).dot(new_coef))
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
    def test_add_pce(self):
        univariate_variables = [stats.norm(), stats.uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 2, 3
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        poly3 = poly1 + poly2 + poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) + 2 * poly2(samples))

        poly4 = poly1 - poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly4(samples), poly1(samples) - poly2(samples))
Example #30
0
    def test_compute_hyperbolic_indices(self):
        num_vars = 3
        level = 3
        p = 1.0
        indices = compute_hyperbolic_indices(num_vars, level, p)
        assert indices.shape[1] == nchoosek(num_vars + level, num_vars)

        num_vars = 4
        level = 3
        p = 0.5
        indices = compute_hyperbolic_indices(num_vars, level, p)
        assert np.all(np.sum(indices**p, axis=0)**(1.0 / float(p)) <= level)

        num_vars = 3
        level = 3
        p = 1.0
        indices = compute_hyperbolic_indices_itertools(num_vars, level, p)
        assert indices.shape[1] == nchoosek(num_vars + level, num_vars)

        num_vars = 4
        level = 3
        p = 0.5
        indices = compute_hyperbolic_indices_itertools(num_vars, level, p)
        assert np.all(np.sum(indices**p, axis=0)**(1.0 / float(p)) <= level)