예제 #1
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
예제 #2
0
    def setup_sd_opt_problem(self, SDOptProblem):
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices

        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        nsamples = 4
        degree = 2
        samples = np.random.normal(0, 1, (1, nsamples))
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        basis_matrix = pce.basis_matrix(samples)
        probabilities = np.ones((nsamples)) / nsamples

        sd_opt_problem = SDOptProblem(basis_matrix, values[:, 0], values[:, 0],
                                      probabilities)
        return sd_opt_problem
    def test_compute_moment_matrix_combination_sparse_grid(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        from pyapprox.univariate_quadrature import \
            clenshaw_curtis_in_polynomial_order, clenshaw_curtis_rule_growth
        quad_rule_opts = {
            'quad_rules': clenshaw_curtis_in_polynomial_order,
            'growth_rules': clenshaw_curtis_rule_growth,
            'unique_quadrule_indices': None
        }

        compute_grammian_function = partial(
            compute_grammian_matrix_using_combination_sparse_grid,
            var_trans=pce_var_trans,
            max_num_samples=100,
            density_function=density_function,
            quad_rule_opts=quad_rule_opts)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           compute_grammian_function(pce.basis_matrix, None))

        apc = APC(compute_grammian_function=compute_grammian_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        # print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
예제 #4
0
    def test_least_interpolation_lu_equivalence_in_1d(self):
        num_vars = 1
        alpha_stat = 2; beta_stat  = 5
        max_num_pts = 100
        
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi',}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)

        # univariate_beta_pdf = partial(beta.pdf,a=alpha_stat,b=beta_stat)
        # univariate_pdf = lambda x: univariate_beta_pdf(x)
        # preconditioning_function = partial(
        #     tensor_product_pdf,univariate_pdfs=univariate_pdf)
        from pyapprox.indexing import get_total_degree
        max_degree = get_total_degree(num_vars,max_num_pts)
        indices = compute_hyperbolic_indices(num_vars, max_degree, 1.)
        pce.set_indices(indices)
        
        from pyapprox.polynomial_sampling import christoffel_function
        preconditioning_function = lambda samples: 1./christoffel_function(
            samples,pce.basis_matrix)
    
        oli_solver.set_preconditioning_function(preconditioning_function)
        oli_solver.set_basis_generator(basis_generator)
        
        initial_pts = None
        candidate_samples = np.linspace(0.,1.,1000)[np.newaxis,:]

        oli_solver.factorize(
            candidate_samples, initial_pts,
            num_selected_pts = max_num_pts)

        oli_samples = oli_solver.get_current_points()

        from pyapprox.utilities import truncated_pivoted_lu_factorization
        pce.set_indices(oli_solver.selected_basis_indices)
        basis_matrix = pce.basis_matrix(candidate_samples)
        weights = np.sqrt(preconditioning_function(candidate_samples))
        basis_matrix = np.dot(np.diag(weights),basis_matrix)
        L,U,p = truncated_pivoted_lu_factorization(
            basis_matrix,max_num_pts)
        assert p.shape[0]==max_num_pts
        lu_samples = candidate_samples[:,p]

        assert np.allclose(lu_samples,oli_samples)

        L1,U1,H1 = oli_solver.get_current_LUH_factors()
        
        true_permuted_matrix = (pce.basis_matrix(lu_samples).T*weights[p]).T
        assert np.allclose(np.dot(L,U),true_permuted_matrix)
        assert np.allclose(np.dot(L1,np.dot(U1,H1)),true_permuted_matrix)
예제 #5
0
    def test_factorization_using_exact_algebra(self):

        num_vars = 2
        alpha_stat = 2; beta_stat  = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat,-2,1),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi'}

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))
            
        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)
        oli_solver.set_basis_generator(basis_generator)
        

        # Define 4 candidate points so no pivoting is necessary
        from numpy import sqrt, dot, sum, array, zeros
        from numpy.linalg import norm
        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                   [-1.,-1./sqrt(2.),0.,0.]] )

        U = np.zeros((4,4))

        factor_history = []

        # Build vandermonde matrix for all degrees ahead of time
        degree = 2
        indices = compute_hyperbolic_indices(num_vars,degree,1.)
        pce.set_indices(indices)
        V = pce.basis_matrix(candidate_pts)

        ##--------------------- ##
        ## S=1                  ##
        ##--------------------- ##

        #print 'V\n',V

        #print '################################'
        U1 = array([[V[0,1],V[0,2]],
                     [V[1,1]-V[0,1],V[1,2]-V[0,2]],
                     [V[2,1]-V[0,1],V[2,2]-V[0,2]],
                     [V[3,1]-V[0,1],V[3,2]-V[0,2]]])

        norms = [sqrt((V[1,1]-V[0,1])**2+(V[1,2]-V[0,2])**2),
                  sqrt((V[2,1]-V[0,1])**2+(V[2,2]-V[0,2])**2),
                  sqrt((V[3,1]-V[0,1])**2+(V[3,2]-V[0,2])**2)]
        U1[1,:] /= norms[0]
        #print 'U1\n',U1

        #print 'norms\n', norms

        magic_row = array([[(V[1,1]-V[0,1])/norms[0],(V[1,2]-V[0,2])/norms[0]]])
        #print 'magic_row\n',magic_row

        inner_products = array([(V[1,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[1,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[2,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[2,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[3,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[3,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0]])
        #print 'inner_products\n', inner_products

        v1 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]]]).T
        #print 'L\n',L

        Z = array([[V[0,1]*(V[1,1]-V[0,1])/norms[0]+V[0,2]*(V[1,2]-V[0,2])/norms[0]]])
        #print 'Z\n',Z

        U=array([[1,Z[0,0]],[0,1]])
        #print 'U\n',U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=2                  ##
        ##--------------------- ##

        #print '################################'
        U2 = array([[V[0,1],V[0,2]],
                     [(V[1,1]-V[0,1])/L[1,1],(V[1,2]-V[0,2])/L[1,1]],
                     [(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]],
                     [(V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1]]])

        #print 'U2\n',U2

        norms = [sqrt(((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])**2),
                 sqrt(((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])**2)]
        U2[2,:] /= norms[0]
        #print 'U2\n',U2

        #print 'norms\n', norms

        magic_row = array([(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                            (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]])/norms[0]
        #print 'magic_row', magic_row

        inner_products = [norms[0],((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])*((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])/norms[0]+((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])*((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])/norms[0]]
        #print 'inner_products',inner_products

        v2 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]],[0,0,v2[0],v2[1]]]).T
        #print 'L\n',L

        Z = [V[0,1]/norms[0]*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              V[0,2]/norms[0]*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]),
              (V[1,1]-V[0,1])/(L[1,1]*norms[0])*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              (V[1,2]-V[0,2])/(L[1,1]*norms[0])*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])]
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 3,3 ) ); U[:2,:2] = U_prev
        U[:2,2] = Z; U[2,2]=1
        #print 'U\n', U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=3                  ##
        ##--------------------- ##

        #print '################################'
        U3 = array([[V[0,3],V[0,4],V[0,5]],
                     [(V[1,3]-V[0,3])/L[1,1],(V[1,4]-V[0,4])/L[1,1],(V[1,5]-V[0,5])/L[1,1]],
                     [((V[2,3]-V[0,3])-L[2,1]*(V[1,3]-V[0,3])/L[1,1])/L[2,2],((V[2,4]-V[0,4])-L[2,1]*(V[1,4]-V[0,4])/L[1,1])/L[2,2],((V[2,5]-V[0,5])-L[2,1]*(V[1,5]-V[0,5])/L[1,1])/L[2,2]],
                     [(V[3,3]-V[0,3])-L[3,1]*(V[1,3]-V[0,3])/L[1,1]-L[3,2]/L[2,2]*(V[2,3]-V[0,3]-L[2,1]/L[1,1]*(V[1,3]-V[0,3])),(V[3,4]-V[0,4])-L[3,1]*(V[1,4]-V[0,4])/L[1,1]-L[3,2]/L[2,2]*(V[2,4]-V[0,4]-L[2,1]/L[1,1]*(V[1,4]-V[0,4])),(V[3,5]-V[0,5])-L[3,1]*(V[1,5]-V[0,5])/L[1,1]-L[3,2]/L[2,2]*(V[2,5]-V[0,5]-L[2,1]/L[1,1]*(V[1,5]-V[0,5]))]])

        norms = [norm(U3[3,:])]

        U3[3,:] /= norms[0]
        #print 'U3\n', U3

        #print 'norms\n', norms

        magic_row = array([U3[3,:]])
        #print 'magic_row', magic_row

        inner_products = [norms[0]]
        #print 'inner_products\n', inner_products

        L_prev = L.copy(); L = zeros( (4,4) ); L[:,:3] = L_prev;
        L[3,3] = inner_products[0]
        #print 'L\n', L

        Z = dot( U3[:3,:3], magic_row.T )
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 4,4 ) ); U[:3,:3] = U_prev
        U[:3,3] = Z.squeeze(); U[3,3]=1
        #print 'U\n',U
        #assert False

        factor_history.append((L,U))


        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                [-1.,-1./sqrt(2.),0.,0.]] )

        # define target function
        model = lambda x: np.asarray([x[0]**2 + x[1]**2 +  x[0]*x[1]]).T

        #num_starting_pts = 5
        num_starting_pts = 1
        initial_pts = None
        oli_solver.factorize(
            candidate_pts, initial_pts, num_selected_pts=num_starting_pts )


        L,U,H=oli_solver.get_current_LUH_factors()
        #print 'L\n',L
        #print 'U\n',U
        #print 'H\n',H
        it = 0
        np.allclose(L[:1,:1],factor_history[it][0])
        np.allclose(U[:1,:1],factor_history[it][0])

        current_pts = oli_solver.get_current_points()
        current_vals = model(current_pts)

        num_pts = current_pts.shape[1]
        num_pts_prev = current_pts.shape[1]
        max_num_pts =  candidate_pts.shape[1]
        finalize = False
        while not finalize:
            if ( ( num_pts == max_num_pts-1) or
                (num_pts == candidate_pts.shape[1]) ):
                 finalize = True

            oli_solver.update_factorization(1)


            L,U,H=oli_solver.get_current_LUH_factors()
            #print '###########'
            #print 'L\n',L
            #print 'U\n',U
            #print 'H\n',H
            np.allclose(L,
                    factor_history[it][0][:L.shape[0],:L.shape[1]])
            np.allclose(U,
                    factor_history[it][1][:U.shape[0],:U.shape[1]])
            it += 1

            num_pts_prev = num_pts
            num_pts = oli_solver.num_points_added()
            if  ( num_pts > num_pts_prev ):
                #print 'number of points', num_pts
                current_pt = oli_solver.get_last_point_added()
                current_val = model(current_pt)
                current_pts = np.hstack(
                    ( current_pts, current_pt.reshape( current_pt.shape[0], 1 ) ) )
                current_vals = np.vstack( ( current_vals, current_val ) )
                pce = oli_solver.get_current_interpolant(
                    current_pts, current_vals)
                current_pce_vals = pce.value(current_pts)
                assert np.allclose(current_pce_vals, current_vals)