Esempio n. 1
0
def multinomial_coeffs_of_power_of_nd_linear_monomial(num_vars, degree):
    """ Compute the multinomial coefficients of the individual terms
    obtained  when taking the power of a linear polynomial
    (without constant term).

    Given a linear multivariate polynomial e.g.
    e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
    return the coefficients of each quadratic term, i.e.
    [1,2,2,1,2,1]

    Parameters
    ----------
    num_vars : integer
        the dimension of the multivariate polynomial
    degree : integer
        the power of the linear polynomial

    Returns
    -------
    coeffs: np.ndarray (num_terms)
        the multinomial coefficients of the polynomial obtained when
        raising the linear multivariate polynomial to the power=degree

    indices: np.ndarray (num_terms)
        the indices of the polynomial obtained when
        raising the linear multivariate polynomial to the power=degree
    """
    indices = compute_hyperbolic_level_indices(num_vars, degree, 1.0)
    coeffs = multinomial_coefficients(indices)
    return coeffs, indices
Esempio n. 2
0
def total_degree_basis_generator(num_vars, degree):
    """
    Generate all indices i such that ||i||_1=degree.
    This function is useful when computing oli_leja sequences
    """
    return (degree + 1,
            compute_hyperbolic_level_indices(num_vars, degree, 1.0))
Esempio n. 3
0
    def test_uniform_2d_subset_of_points(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^2                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, YES return subset of points         #
        # ----------------------------------------------------- #

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        pce_opts = {'poly_type':'jacobi','alpha_poly':0,'beta_poly':0,
                    'var_trans':var_trans}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}
        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        # define target function
        model = lambda x: np.asarray([x[0]**2 + x[1]**2 +  x[0]*x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points(1, var_trans, 'CC')
        helper_least_factorization(
            pts, model, var_trans, pce_opts, oli_opts, basis_generator,
            max_num_pts = 6, exact_mean=11./12. )
Esempio n. 4
0
    def test_uniform_3d_user_domain(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^3                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, no return subset of points          #
        # ----------------------------------------------------- #

        # Set PCE options
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        pce_opts = {'poly_type':'jacobi','alpha_poly':0,'beta_poly':0,
                    'var_trans':var_trans}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        # define target function
        model = lambda x: np.array(
            [np.sum( x**2,axis=0 )+x[0]*x[1]+x[1]*x[2]+x[0]*x[1]*x[2]]).T

        # define points to interpolate
        pts = get_tensor_product_points( 2, var_trans, 'CC' )
        helper_least_factorization(
            pts, model, var_trans, pce_opts, oli_opts,
            basis_generator, exact_mean=13./8. )
Esempio n. 5
0
    def test_uniform_2d_user_domain(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^2                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, no return subset of points          #
        # ----------------------------------------------------- #

        # Set PCE options
        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([x[0]**2 + x[1]**2 + x[0] * x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points(1, var_trans, 'CC')
        helper_least_factorization(pts,
                                   model,
                                   var_trans,
                                   pce_opts,
                                   oli_opts,
                                   basis_generator,
                                   exact_mean=11. / 12.)
Esempio n. 6
0
    def test_beta_2d_preconditioning(self):
        """
        Interpolate a set of points using preconditioing. First select
        all initial points then adding a subset of the remaining points.

        x in Beta(2,5)[0,1]^2
        """

        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # from scipy.special import beta as beta_fn
        # def beta_pdf(x,alpha_poly,beta_poly):
        #     values = (1.-x)**(alpha_poly) * (1.+x)**(beta_poly)
        #     values /= 2.**(beta_poly+alpha_poly+1)*beta_fn(
        #         beta_poly+1,alpha_poly+1)
        #     return values
        # univariate_pdf = partial(beta_pdf,alpha_poly=beta_stat-1,beta_poly=alpha_stat-1)

        univariate_beta_pdf = partial(beta.pdf, a=alpha_stat, b=beta_stat)

        def univariate_pdf(x):
            return univariate_beta_pdf((x + 1.) / 2.) / 2.

        preconditioning_function = partial(tensor_product_pdf,
                                           univariate_pdfs=univariate_pdf)

        # define target function
        def model(x):
            return np.asarray([(x[0]**2 - 1) + (x[1]**2 - 1) + x[0] * x[1]]).T

        # define points to interpolate
        pts = generate_independent_random_samples(var_trans.variable, 12)
        initial_pts = np.array([pts[:, 0]]).T

        helper_least_factorization(
            pts,
            model,
            var_trans,
            pce_opts,
            oli_opts,
            basis_generator,
            initial_pts=initial_pts,
            max_num_pts=12,
            preconditioning_function=preconditioning_function)
Esempio n. 7
0
def get_isotropic_sparse_grid_subspace_indices(num_vars,level):
    smolyak_coefficients = np.empty((0),dtype=float)
    sparse_grid_subspace_indices = np.empty((num_vars,0),dtype=int)
    for dd in range(min(num_vars,level+1)):
        subspace_indices_dd = compute_hyperbolic_level_indices(
            num_vars,level-dd,1.0)
        sparse_grid_subspace_indices = np.hstack(
            (sparse_grid_subspace_indices,subspace_indices_dd))
        subspace_coefficient = (-1.0)**(dd)*nchoosek(num_vars-1,dd)
        smolyak_coefficients = np.hstack((
            smolyak_coefficients,
            subspace_coefficient*np.ones(subspace_indices_dd.shape[1])))
    return sparse_grid_subspace_indices, smolyak_coefficients
Esempio n. 8
0
    def test_uniform_2d_degenerate_initial_and_subset_points(self):
        """
        Interpolate a set of points, by first selecting all initial points
        which are degenerate then adding a subset of the remaining points.

        CHECK: Orthogonal least interpolation produces an interpolant but does
        not approximate the function exactly.

        x in U[0,1]^2
        """

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {
            'verbosity': 0,
            'assume_non_degeneracy': False,
            'enforce_all_initial_points_used': True,
            'enforce_ordering_of_initial_points': True
        }

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([
                0.5 * (3 * x[0]**2 - 1) + 0.5 * (3 * x[1]**2 - 1) + x[0] * x[1]
            ]).T

        # define points to interpolate
        pts = get_tensor_product_points(2, var_trans, 'CC')
        initial_pts = get_tensor_product_points(1, var_trans, 'CC')
        self.assertRaises(Exception,
                          helper_least_factorization,
                          pts,
                          model,
                          var_trans,
                          pce_opts,
                          oli_opts,
                          basis_generator,
                          initial_pts=initial_pts,
                          max_num_pts=12,
                          use_preconditioning=1)
Esempio n. 9
0
    def test_adaptive_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 6
        alph = 5
        bet = 5.

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 3)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, 1, 1.0)
        pce.set_indices(indices)
        cond_tol = 1e2
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)

        for dd in range(2, degree):
            num_prev_samples = samples.shape[1]
            new_indices = compute_hyperbolic_level_indices(num_vars, dd, 1.)
            samples = increment_induced_samples_migliorati(
                pce, cond_tol, samples, indices, new_indices)
            indices = np.hstack((indices, new_indices))
            pce.set_indices(indices)
            new_samples = samples[:, num_prev_samples:]
            prev_samples = samples[:, :num_prev_samples]
            #fig,axs = plt.subplots(1,2,figsize=(2*8,6))
            #from pyapprox.visualization import plot_2d_indices
            #axs[0].plot(prev_samples[0,:],prev_samples[1,:],'ko');
            #axs[0].plot(new_samples[0,:],new_samples[1,:],'ro');
            #plot_2d_indices(indices,other_indices=new_indices,ax=axs[1]);
            #plt.show()

        samples = var_trans.map_from_canonical_space(samples)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.basis_matrix, samples)
        assert cond < cond_tol
Esempio n. 10
0
def get_coefficients_for_plotting(pce, qoi_idx):
    coeff = pce.get_coefficients()[:, qoi_idx]
    indices = pce.indices.copy()
    assert coeff.shape[0] == indices.shape[1]

    num_vars = pce.num_vars()
    degree = -1
    indices_dict = dict()
    max_degree = indices.sum(axis=0).max()
    for ii in range(indices.shape[1]):
        key = hash_array(indices[:, ii])
        indices_dict[key] = ii
    i = 0
    degree_breaks = []
    coeff_sorted = []
    degree_indices_set = np.empty((num_vars, 0))
    for degree in range(max_degree+1):
        nterms = nchoosek(num_vars+degree, degree)
        if nterms < 1e6:
            degree_indices = compute_hyperbolic_level_indices(
                num_vars, degree, 1.)
        else:
            'Could not plot coefficients of terms with degree >= %d' % degree
            break
        degree_indices_set = np.hstack((degree_indices_set, indices))
        for ii in range(degree_indices.shape[1]-1, -1, -1):
            index = degree_indices[:, ii]
            key = hash_array(index)
            if key in indices_dict:
                coeff_sorted.append(coeff[indices_dict[key]])
            else:
                coeff_sorted.append(0.0)
            i += 1
        degree_breaks.append(i)

    return np.array(coeff_sorted), degree_indices_set, degree_breaks
Esempio n. 11
0
    def test_least_interpolation_lu_equivalence_in_1d(self):
        num_vars = 1
        alpha_stat = 2; beta_stat  = 5
        max_num_pts = 100
        
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi',}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)

        # univariate_beta_pdf = partial(beta.pdf,a=alpha_stat,b=beta_stat)
        # univariate_pdf = lambda x: univariate_beta_pdf(x)
        # preconditioning_function = partial(
        #     tensor_product_pdf,univariate_pdfs=univariate_pdf)
        from pyapprox.indexing import get_total_degree
        max_degree = get_total_degree(num_vars,max_num_pts)
        indices = compute_hyperbolic_indices(num_vars, max_degree, 1.)
        pce.set_indices(indices)
        
        from pyapprox.polynomial_sampling import christoffel_function
        preconditioning_function = lambda samples: 1./christoffel_function(
            samples,pce.basis_matrix)
    
        oli_solver.set_preconditioning_function(preconditioning_function)
        oli_solver.set_basis_generator(basis_generator)
        
        initial_pts = None
        candidate_samples = np.linspace(0.,1.,1000)[np.newaxis,:]

        oli_solver.factorize(
            candidate_samples, initial_pts,
            num_selected_pts = max_num_pts)

        oli_samples = oli_solver.get_current_points()

        from pyapprox.utilities import truncated_pivoted_lu_factorization
        pce.set_indices(oli_solver.selected_basis_indices)
        basis_matrix = pce.basis_matrix(candidate_samples)
        weights = np.sqrt(preconditioning_function(candidate_samples))
        basis_matrix = np.dot(np.diag(weights),basis_matrix)
        L,U,p = truncated_pivoted_lu_factorization(
            basis_matrix,max_num_pts)
        assert p.shape[0]==max_num_pts
        lu_samples = candidate_samples[:,p]

        assert np.allclose(lu_samples,oli_samples)

        L1,U1,H1 = oli_solver.get_current_LUH_factors()
        
        true_permuted_matrix = (pce.basis_matrix(lu_samples).T*weights[p]).T
        assert np.allclose(np.dot(L,U),true_permuted_matrix)
        assert np.allclose(np.dot(L1,np.dot(U1,H1)),true_permuted_matrix)
Esempio n. 12
0
    def test_factorization_using_exact_algebra(self):

        num_vars = 2
        alpha_stat = 2; beta_stat  = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat,-2,1),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi'}

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))
            
        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)
        oli_solver.set_basis_generator(basis_generator)
        

        # Define 4 candidate points so no pivoting is necessary
        from numpy import sqrt, dot, sum, array, zeros
        from numpy.linalg import norm
        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                   [-1.,-1./sqrt(2.),0.,0.]] )

        U = np.zeros((4,4))

        factor_history = []

        # Build vandermonde matrix for all degrees ahead of time
        degree = 2
        indices = compute_hyperbolic_indices(num_vars,degree,1.)
        pce.set_indices(indices)
        V = pce.basis_matrix(candidate_pts)

        ##--------------------- ##
        ## S=1                  ##
        ##--------------------- ##

        #print 'V\n',V

        #print '################################'
        U1 = array([[V[0,1],V[0,2]],
                     [V[1,1]-V[0,1],V[1,2]-V[0,2]],
                     [V[2,1]-V[0,1],V[2,2]-V[0,2]],
                     [V[3,1]-V[0,1],V[3,2]-V[0,2]]])

        norms = [sqrt((V[1,1]-V[0,1])**2+(V[1,2]-V[0,2])**2),
                  sqrt((V[2,1]-V[0,1])**2+(V[2,2]-V[0,2])**2),
                  sqrt((V[3,1]-V[0,1])**2+(V[3,2]-V[0,2])**2)]
        U1[1,:] /= norms[0]
        #print 'U1\n',U1

        #print 'norms\n', norms

        magic_row = array([[(V[1,1]-V[0,1])/norms[0],(V[1,2]-V[0,2])/norms[0]]])
        #print 'magic_row\n',magic_row

        inner_products = array([(V[1,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[1,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[2,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[2,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[3,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[3,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0]])
        #print 'inner_products\n', inner_products

        v1 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]]]).T
        #print 'L\n',L

        Z = array([[V[0,1]*(V[1,1]-V[0,1])/norms[0]+V[0,2]*(V[1,2]-V[0,2])/norms[0]]])
        #print 'Z\n',Z

        U=array([[1,Z[0,0]],[0,1]])
        #print 'U\n',U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=2                  ##
        ##--------------------- ##

        #print '################################'
        U2 = array([[V[0,1],V[0,2]],
                     [(V[1,1]-V[0,1])/L[1,1],(V[1,2]-V[0,2])/L[1,1]],
                     [(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]],
                     [(V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1]]])

        #print 'U2\n',U2

        norms = [sqrt(((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])**2),
                 sqrt(((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])**2)]
        U2[2,:] /= norms[0]
        #print 'U2\n',U2

        #print 'norms\n', norms

        magic_row = array([(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                            (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]])/norms[0]
        #print 'magic_row', magic_row

        inner_products = [norms[0],((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])*((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])/norms[0]+((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])*((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])/norms[0]]
        #print 'inner_products',inner_products

        v2 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]],[0,0,v2[0],v2[1]]]).T
        #print 'L\n',L

        Z = [V[0,1]/norms[0]*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              V[0,2]/norms[0]*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]),
              (V[1,1]-V[0,1])/(L[1,1]*norms[0])*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              (V[1,2]-V[0,2])/(L[1,1]*norms[0])*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])]
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 3,3 ) ); U[:2,:2] = U_prev
        U[:2,2] = Z; U[2,2]=1
        #print 'U\n', U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=3                  ##
        ##--------------------- ##

        #print '################################'
        U3 = array([[V[0,3],V[0,4],V[0,5]],
                     [(V[1,3]-V[0,3])/L[1,1],(V[1,4]-V[0,4])/L[1,1],(V[1,5]-V[0,5])/L[1,1]],
                     [((V[2,3]-V[0,3])-L[2,1]*(V[1,3]-V[0,3])/L[1,1])/L[2,2],((V[2,4]-V[0,4])-L[2,1]*(V[1,4]-V[0,4])/L[1,1])/L[2,2],((V[2,5]-V[0,5])-L[2,1]*(V[1,5]-V[0,5])/L[1,1])/L[2,2]],
                     [(V[3,3]-V[0,3])-L[3,1]*(V[1,3]-V[0,3])/L[1,1]-L[3,2]/L[2,2]*(V[2,3]-V[0,3]-L[2,1]/L[1,1]*(V[1,3]-V[0,3])),(V[3,4]-V[0,4])-L[3,1]*(V[1,4]-V[0,4])/L[1,1]-L[3,2]/L[2,2]*(V[2,4]-V[0,4]-L[2,1]/L[1,1]*(V[1,4]-V[0,4])),(V[3,5]-V[0,5])-L[3,1]*(V[1,5]-V[0,5])/L[1,1]-L[3,2]/L[2,2]*(V[2,5]-V[0,5]-L[2,1]/L[1,1]*(V[1,5]-V[0,5]))]])

        norms = [norm(U3[3,:])]

        U3[3,:] /= norms[0]
        #print 'U3\n', U3

        #print 'norms\n', norms

        magic_row = array([U3[3,:]])
        #print 'magic_row', magic_row

        inner_products = [norms[0]]
        #print 'inner_products\n', inner_products

        L_prev = L.copy(); L = zeros( (4,4) ); L[:,:3] = L_prev;
        L[3,3] = inner_products[0]
        #print 'L\n', L

        Z = dot( U3[:3,:3], magic_row.T )
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 4,4 ) ); U[:3,:3] = U_prev
        U[:3,3] = Z.squeeze(); U[3,3]=1
        #print 'U\n',U
        #assert False

        factor_history.append((L,U))


        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                [-1.,-1./sqrt(2.),0.,0.]] )

        # define target function
        model = lambda x: np.asarray([x[0]**2 + x[1]**2 +  x[0]*x[1]]).T

        #num_starting_pts = 5
        num_starting_pts = 1
        initial_pts = None
        oli_solver.factorize(
            candidate_pts, initial_pts, num_selected_pts=num_starting_pts )


        L,U,H=oli_solver.get_current_LUH_factors()
        #print 'L\n',L
        #print 'U\n',U
        #print 'H\n',H
        it = 0
        np.allclose(L[:1,:1],factor_history[it][0])
        np.allclose(U[:1,:1],factor_history[it][0])

        current_pts = oli_solver.get_current_points()
        current_vals = model(current_pts)

        num_pts = current_pts.shape[1]
        num_pts_prev = current_pts.shape[1]
        max_num_pts =  candidate_pts.shape[1]
        finalize = False
        while not finalize:
            if ( ( num_pts == max_num_pts-1) or
                (num_pts == candidate_pts.shape[1]) ):
                 finalize = True

            oli_solver.update_factorization(1)


            L,U,H=oli_solver.get_current_LUH_factors()
            #print '###########'
            #print 'L\n',L
            #print 'U\n',U
            #print 'H\n',H
            np.allclose(L,
                    factor_history[it][0][:L.shape[0],:L.shape[1]])
            np.allclose(U,
                    factor_history[it][1][:U.shape[0],:U.shape[1]])
            it += 1

            num_pts_prev = num_pts
            num_pts = oli_solver.num_points_added()
            if  ( num_pts > num_pts_prev ):
                #print 'number of points', num_pts
                current_pt = oli_solver.get_last_point_added()
                current_val = model(current_pt)
                current_pts = np.hstack(
                    ( current_pts, current_pt.reshape( current_pt.shape[0], 1 ) ) )
                current_vals = np.vstack( ( current_vals, current_val ) )
                pce = oli_solver.get_current_interpolant(
                    current_pts, current_vals)
                current_pce_vals = pce.value(current_pts)
                assert np.allclose(current_pce_vals, current_vals)
Esempio n. 13
0
 def basis_generator(num_vars, degree):
     return (degree + 1,
             compute_hyperbolic_level_indices(num_vars, degree, 1.0))