Пример #1
0
    def test_pseudospectral_approximation_tensor(self):
        def expfun(x):
            return np.exp(x[0] + x[1]) + 0.5 * np.cos(x[0] * 2 * np.pi)

        # Compare actual function with polynomial approximation
        s = Parameter(lower=-1, upper=1, points=6)
        T = IndexSet('Tensor grid', [5, 5])
        uq = Polynomial([s, s], T)
        num_elements = 10
        coefficients, index_set, evaled_pts = uq.getPolynomialCoefficients(
            expfun)
        pts, x1, x2 = meshgrid(-1.0, 1.0, num_elements, num_elements)
        Approx = uq.getPolynomialApproximation(expfun, pts, coefficients)
        A = np.reshape(Approx, (num_elements, num_elements))
        fun = evalfunction(pts, expfun)

        # Now plot this surface
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        surf = ax.plot_surface(x1,
                               x2,
                               A,
                               rstride=1,
                               cstride=1,
                               cmap=cm.winter,
                               linewidth=0,
                               antialiased=False,
                               alpha=0.5)
        ax.scatter(x1, x2, fun, 'ko')
        ax.set_zlim(0, 10)
        ax.set_xlabel('x1')
        ax.set_ylabel('x2')
        ax.set_zlabel('Response')

        fig.colorbar(surf, shrink=0.5, aspect=5)
Пример #2
0
def gradients_univariate():
    
    # Parameters!
    pt = 8
    x1 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt, derivative_flag=1)
    x2 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt, derivative_flag=1)
    parameters = [x1, x2]
    dims = len(parameters)

    # Basis selection!
    hyperbolic_cross = IndexSet("Total order", orders=[pt-1,pt-1])
    esq = EffectiveSubsampling(parameters, hyperbolic_cross)
    A , p, w = esq.getAmatrix()
    C = esq.getCmatrix()

    # Matrix sizes
    m, n  = A.shape
    print m, n
    print '*****************'
    m, n = C.shape
    print m, n
    
    # Now perform least squares!
    W = np.mat(np.diag(np.sqrt(w)))
    b = W.T * evalfunction(p, fun)
    d = evalgradients(p, fungrad, 'vector')
    x = qr.solveLSQ(np.vstack([A, C]), np.vstack([b, d])  )
    print x
Пример #3
0
def gradients_univariate_subsampled():
    
    # Parameters!
    pt = 8
    x1 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt, derivative_flag=1)
    parameters = [x1]
    dims = len(parameters)

    # Basis selection!
    basis = IndexSet("Total order", orders=[pt-1])
    esq = EffectiveSubsampling(parameters, basis)
    A , p, w = esq.getAmatrix()
    C = esq.getCmatrix()

    # QR column pivotings
    P = qr.mgs_pivoting(A.T)
    
    # Now perform least squares!
    basis_terms_required = basis.getCardinality() 
    minimum_points = np.int( (basis_terms_required + dims)/(dims + 1.) )  
    nodes = P[0:minimum_points]
    A = getRows(A, nodes)
    C = getRows(C, nodes)

    #print 'Size of subsampled matrices!'
    #m, n = A.shape
    #print m , n
    #m, n = C.shape
    #print m, n
    
    # Subselection!
    w = w[nodes]
    p = p[nodes,:]

    W = np.mat(np.diag(np.sqrt(w)))
    b = W.T * evalfunction(p, fun)
    d = evalgradients(p, fungrad, 'vector')
    R = np.vstack([A, C])

    # Stacked least squares problem!
    #x = qr.solveLSQ(np.vstack([A, C]), np.vstack([b, d])  )
    #print '\n'
    #print 'Final Solution!'
    #print x


    #print A
    #print C
    #print b
    #print d
    
    # Direct Elimination least squares!
    x = qr.solveCLSQ(A, b, C, d)
Пример #4
0
def nogradients_univariate():
    
    # Parameters!
    pt = 6
    x1 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt)
    parameters = [x1, x1]

    # Effective subsampling object!
    esq = EffectiveSubsampling(parameters)
    
    # Solve the least squares problem
    A, p, w = esq.getAmatrix() # Is this always square??
    W = np.mat(np.diag(np.sqrt(w) ) )
    b = W.T  * evalfunction(p, fun)
    x = qr.solveLSQ(A,b)
    print x
Пример #5
0
def gradients_multivariate_subsampled():
    
    # Parameters!
    pt = 3
    x1 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt, derivative_flag=1)
    x2 = Parameter(param_type="Uniform", lower=-1.0, upper=1.0, points=pt, derivative_flag=1)
    parameters = [x1, x2]
    dims = len(parameters)

    # Basis selection!
    basis = IndexSet("Total order", orders=[pt-1,pt-1])
    esq = EffectiveSubsampling(parameters, basis)
    A , p, w = esq.getAmatrix()
    C = esq.getCmatrix()

    # QR column pivotings
    P = qr.mgs_pivoting(A.T)
    
    # Now perform least squares!
    basis_terms_required = basis.getCardinality() 
    minimum_points = np.int( (basis_terms_required + dims)/(dims + 1.) )  + 5 
    nodes = P[0:minimum_points]
    A = getRows(A, nodes)
    C = getRowsC(C, nodes, dims)

    m, n = A.shape
    #print m , n
    m, n = C.shape
    #print m, n

    w = w[nodes]
    p = p[nodes,:]
    #print p, w
    W = np.mat(np.diag(np.sqrt(w)))
    b = W.T * evalfunction(p, fun)
    d = evalgradients(p, fungrad, 'vector')
    R = np.vstack([A, C])
    print np.linalg.cond(R)
    print R
    print np.vstack([b, d])
    x = qr.solveLSQ(np.vstack([A, C]), np.vstack([b, d])  )
    print '\n'
    print x

    """
def run(value):
    def fun(x):
        return np.exp(x[0] + 0.5*x[1])

    def fungrad(x):
        return [ np.exp(x[0] + 0.5*x[1]),  0.5*np.exp(x[0] + 0.5*x[1])  ] 

    ###############################################################################################
    # Tensor grid solution!
    ################################################################################################
    value_large = 80
    x1 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value_large)
    x2 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value_large)
    uq = Polynomial([x1,x2])
    all_coefficients, all_indices, evaled_pts = uq.getPolynomialCoefficients(fun)
    
    """
    x,y,z, max_order = twoDgrid(all_coefficients, all_indices)
    z = np.log10(np.abs(z))
    Zm = np.ma.masked_where(np.isnan(z),z)
    plt.pcolor(y,x, Zm, cmap='jet', vmin=-16, vmax=0)
    plt.title('SPAM coefficients')
    plt.xlabel('i1')
    plt.ylabel('i2')
    plt.colorbar()
    plt.xlim(0,max_order)
    plt.ylim(0,max_order)
    plt.show()
    sys.exit()
    """

    ################################################################################
    # Now with gradients and reducing the number of basis terms
    ################################################################################
    x1 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value, derivative_flag=1)
    x2 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value, derivative_flag=1)
    parameters = [x1, x2]
    hyperbolic_cross = IndexSet("Hyperbolic basis", orders=[value-1,value-1], q=1.0)
    esq = EffectiveSubsampling(parameters, hyperbolic_cross)

    # 1. Determine least number of subsamples required!
    minimum_subsamples = esq.least_no_of_subsamples_reqd() 
    esq.set_no_of_evals(minimum_subsamples)

    # 2. Store function & gradient values!
    fun_values = evalfunction(esq.subsampled_quadrature_points, fun)
    grad_values = evalgradients(esq.subsampled_quadrature_points, fungrad, 'matrix')

    # 3. Compute coefficients & errors using the two techniques!
    #print esq.no_of_basis_terms
    #esq.prune(np.rint( 0.2 * esq.no_of_basis_terms))
    #print esq.no_of_basis_terms
    #print '\n'
    x , cond =  esq.computeCoefficients(fun_values, grad_values, 'weighted')
    x_DE, cond_DE =  esq.computeCoefficients(fun_values, grad_values, 'constrainedDE')
    x_NS, cond_NS =  esq.computeCoefficients(fun_values, grad_values, 'constrainedNS')

    error , notused = compute_errors(all_coefficients, all_indices, x, esq.index_set.getIndexSet() )
    error_DE, notused = compute_errors(all_coefficients, all_indices, x_DE, esq.index_set.getIndexSet() )
    error_NS, notused = compute_errors(all_coefficients, all_indices, x_NS, esq.index_set.getIndexSet() )

    return error, error_DE, error_NS, cond, cond_DE, cond_NS, esq.no_of_basis_terms, minimum_subsamples
def run(value):
    def fun(x):
        return np.exp(x[0] + 0.5*x[1])

    def fungrad(x):
        # 0 mean error with a standard deviation of 1e-5
        return [ np.exp(x[0] + 0.5*x[1])+ 0.00001 * np.random.rand(1) ,  0.5*np.exp(x[0] + 0.5*x[1]) + 0.00001 * np.random.rand(1) ] 

    ###############################################################################################
    # Tensor grid solution!
    ################################################################################################
    value_large = 80
    x1 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value_large)
    x2 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value_large)
    uq = Polynomial([x1,x2])
    all_coefficients, all_indices, evaled_pts = uq.getPolynomialCoefficients(fun)
    
    """
    x,y,z, max_order = twoDgrid(all_coefficients, all_indices)
    z = np.log10(np.abs(z))
    Zm = np.ma.masked_where(np.isnan(z),z)
    plt.pcolor(y,x, Zm, cmap='jet', vmin=-16, vmax=0)
    plt.title('SPAM coefficients')
    plt.xlabel('i1')
    plt.ylabel('i2')
    plt.colorbar()
    plt.xlim(0,max_order)
    plt.ylim(0,max_order)
    plt.show()
    sys.exit()
    """

    ################################################################################
    # Now with gradients and reducing the number of basis terms
    ################################################################################
    x1 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value, derivative_flag=1)
    x2 = Parameter(param_type="Uniform", lower=-1, upper=1, points=value, derivative_flag=1)
    parameters = [x1, x2]
    hyperbolic_cross = IndexSet("Hyperbolic basis", orders=[value-1,value-1], q=1.0)
    esq = EffectiveSubsampling(parameters, hyperbolic_cross)

    # 1. Determine least number of subsamples required!
    minimum_subsamples = esq.least_no_of_subsamples_reqd() 
    esq.set_no_of_evals(minimum_subsamples)

    # 2. Store function & gradient values!
    error_store = 0
    error_DE_store = 0
    error_NS_store = 0

    for k in range(0, 25):
        fun_values = evalfunction(esq.subsampled_quadrature_points, fun)
        grad_values = evalgradients(esq.subsampled_quadrature_points, fungrad, 'matrix')
        x , cond =  esq.computeCoefficients(fun_values, grad_values, 'weighted')
        x_DE, cond_DE =  esq.computeCoefficients(fun_values, grad_values, 'constrainedDE')
        x_NS, cond_NS =  esq.computeCoefficients(fun_values, grad_values, 'constrainedNS')

        error, error_vec = compute_errors(all_coefficients, all_indices, x, esq.index_set.getIndexSet() )
        error_DE, error_vecDE = compute_errors(all_coefficients, all_indices, x_DE, esq.index_set.getIndexSet() )
        error_NS, error_vecNS = compute_errors(all_coefficients, all_indices, x_NS, esq.index_set.getIndexSet() )
        
        error_store = error_store + error
        error_DE_store = error_DE_store + error_DE
        error_NS_store = error_NS_store + error_NS

        #plt.semilogy(x, 'kx', markersize=15, linewidth=3, label='Weighted')
        #plt.semilogy(x_DE, 'bo', markersize=14, linewidth=3, label='Constrained-DE', alpha=0.5)
        #plt.semilogy(x_NS, 'gs', markersize=10, linewidth=3, label='Constrained-NS', alpha=0.4)

        plt.semilogy(error_vec, 'kx', markersize=15, linewidth=3, label='Weighted')
        plt.semilogy(error_vecDE, 'bo', markersize=14, linewidth=3, label='Constrained-DE', alpha=0.5)
        plt.semilogy(error_vecNS, 'gs', markersize=10, linewidth=3, label='Constrained-NS', alpha=0.4)
    
    plt.xlabel(r'Basis terms', fontsize=16)
    plt.ylabel(r'Coefficient errors', fontsize=16)
    plt.savefig('Errors.eps', format='eps', dpi=200, bbox_inches='tight')

    print error_store / 50.0
    print error_DE_store / 50.0
    print error_NS_store / 50.0

    #fig = plt.figure()
    #ax = plt.subplot(111)
    #plt.semilogy(error_vec, 'r<', markersize=15, linewidth=3, label='Weighted')
    #plt.semilogy(error_vecDE, 'bo', markersize=14, linewidth=3, label='Constrained-DE', alpha=0.5)
    #plt.semilogy(error_vecNS, 'gs', markersize=10, linewidth=3, label='Constrained-NS', alpha=0.4)
    #plt.xlabel(r'Basis terms', fontsize=16)
    #plt.ylabel(r'Coefficient error', fontsize=16)
    #plt.legend(loc=1)
    #plt.savefig('Errors.eps', format='eps', dpi=600, bbox_inches='tight')


    return error, error_DE, error_NS, cond, cond_DE, cond_NS, esq.no_of_basis_terms, minimum_subsamples