Example #1
0
def calculateKernelBasisFunctionsMC(kernel, numBasis, mcPoints):
    """ 
    Calculates basis functions using Monte Carlo and Nystrom method.
    
    Parameters
    ---------- 
    numBasis : int 
        calculate numBasis eigenvectors
    mcPoints : ndarray 
        Monte Carlo points
    
    Returns
    -------
    eigv : 1darray 
        eigenvalues numBasisx1
    eigve : ndarray  
        eigenvectors mcPoints x numBasis

    Notes
    -----

    """
    linearOpVersion = 1
    nMC = mcPoints.shape[0]
    numberEigenVectors = int(min(numBasis, nMC))

    #print "looking for ", numberEigenVectors, "eigenvalues"e
    if linearOpVersion:
        #covm = calculateCovarianceMatrix(kernel, mcPoints)
        covop = lambda v, k=kernel, p=mcPoints: covTimesV(v, k, p)
        A = LinearOperator( (nMC, nMC), matvec=covop, \
            dtype=float)
        eigv, eigve = sparpack.eigsh(A,
                                     k=numberEigenVectors,
                                     maxiter=10 * numberEigenVectors)
    else:
        #print "calculateCovariance Matrix "
        covm = calculateCovarianceMatrix(kernel, mcPoints)
        eigv, eigve = sparpack.eigsh(covm,
                                     k=numberEigenVectors,
                                     maxiter=50 * numberEigenVectors)

    #print "completed eigenvalue decomposition "
    #sort in decending order
    eigv = eigv[::-1]
    eigve = eigve[:, ::-1]

    # perform nystrom
    eigve = eigve * np.sqrt(float(nMC))
    eigv = eigv / float(nMC)

    return eigv, eigve
Example #2
0
def calculateKernelBasisFunctionsMC(kernel, numBasis, mcPoints):
    """ 
    Calculates basis functions using Monte Carlo and Nystrom method.
    
    Parameters
    ---------- 
    numBasis : int 
        calculate numBasis eigenvectors
    mcPoints : ndarray 
        Monte Carlo points
    
    Returns
    -------
    eigv : 1darray 
        eigenvalues numBasisx1
    eigve : ndarray  
        eigenvectors mcPoints x numBasis

    Notes
    -----

    """
    linearOpVersion = 1
    nMC = mcPoints.shape[0]   
    numberEigenVectors = int(min(numBasis,nMC))        

    #print "looking for ", numberEigenVectors, "eigenvalues"e
    if linearOpVersion:
        #covm = calculateCovarianceMatrix(kernel, mcPoints)
        covop = lambda v, k=kernel, p=mcPoints: covTimesV(v, k, p)
        A = LinearOperator( (nMC, nMC), matvec=covop, \
            dtype=float)
        eigv, eigve= sparpack.eigsh(A,
                 k=numberEigenVectors, maxiter=10*numberEigenVectors)
    else:
        #print "calculateCovariance Matrix "
        covm= calculateCovarianceMatrix(kernel, mcPoints)
        eigv, eigve= sparpack.eigsh(covm,
                 k=numberEigenVectors, maxiter = 50*numberEigenVectors)

    
    #print "completed eigenvalue decomposition "
    #sort in decending order
    eigv = eigv[::-1]
    eigve = eigve[:, ::-1]
    
    # perform nystrom
    eigve = eigve*np.sqrt(float(nMC))
    eigv = eigv/float(nMC)
    
    return eigv, eigve
Example #3
0
def _sparse_spectral(A, dim=2):
    # Input adjacency matrix A
    # Uses sparse eigenvalue solver from scipy
    # Could use multilevel methods here, see Koren "On spectral graph drawing"
    import numpy as np
    from scipy.sparse import spdiags
    from scipy.sparse.linalg.eigen import eigsh

    try:
        nnodes, _ = A.shape
    except AttributeError:
        msg = "sparse_spectral() takes an adjacency matrix as input"
        raise nx.NetworkXError(msg)

    # form Laplacian matrix
    data = np.asarray(A.sum(axis=1).T)
    D = spdiags(data, 0, nnodes, nnodes)
    L = D - A

    k = dim + 1
    # number of Lanczos vectors for ARPACK solver.What is the right scaling?
    ncv = max(2 * k + 1, int(np.sqrt(nnodes)))
    # return smallest k eigenvalues and eigenvectors
    eigenvalues, eigenvectors = eigsh(L, k, which='SM', ncv=ncv)
    index = np.argsort(eigenvalues)[1:k]  # 0 index is zero eigenvalue
    return np.real(eigenvectors[:, index])
Example #4
0
def _sparse_spectral(A, dim=2):
    # Input adjacency matrix A
    # Uses sparse eigenvalue solver from scipy
    # Could use multilevel methods here, see Koren "On spectral graph drawing"
    try:
        import numpy as np
        from scipy.sparse import spdiags
    except ImportError:
        raise ImportError("_sparse_spectral() requires scipy & numpy: http://scipy.org/ ")
    try:
        from scipy.sparse.linalg.eigen import eigsh
    except ImportError:
        # scipy <0.9.0 names eigsh differently
        from scipy.sparse.linalg import eigen_symmetric as eigsh
    try:
        nnodes, _ = A.shape
    except AttributeError:
        raise nx.NetworkXError("sparse_spectral() takes an adjacency matrix as input")

    # form Laplacian matrix
    data = np.asarray(A.sum(axis=1).T)
    D = spdiags(data, 0, nnodes, nnodes)
    L = D - A

    k = dim + 1
    # number of Lanczos vectors for ARPACK solver.What is the right scaling?
    ncv = max(2 * k + 1, int(np.sqrt(nnodes)))
    # return smallest k eigenvalues and eigenvectors
    eigenvalues, eigenvectors = eigsh(L, k, which="SM", ncv=ncv)
    index = np.argsort(eigenvalues)[1:k]  # 0 index is zero eigenvalue
    return np.real(eigenvectors[:, index])
Example #5
0
    def get_eigenValsVecs(self):

        nDOF = self.space.dim()  # number of dof
        d = self.mesh.geometry().dim()
        c4dof = self.space.tabulate_dof_coordinates().reshape(nDOF, d)

        u = TrialFunction(self.space)
        v = TestFunction(self.space)

        MassM = assemble(u * v * dx)
        M = MassM.array()

        c0 = np.repeat(c4dof, nDOF, axis=0)
        c1 = np.tile(c4dof, [nDOF, 1])
        r = np.linalg.norm(c0 - c1, axis=1)
        C = self.cov_scal * np.exp(-r ** 2 / self.cov_lenght ** 2)  # covariance
        C.shape = [nDOF, nDOF]

        A = np.dot(M, np.dot(C, M))
        w, v = eigsh(A, self.nModes, M)

        dof2vert = self.space.dofmap().dofs()
        eigenVectors = np.array([z[dof2vert] for z in v.T])

        eigenVectors = list(eigenVectors)
        eigenValues = list(w)

        return eigenValues[::-1], eigenVectors[::-1]
Example #6
0
This can be done by projection with inner products:

y_1*y_4 = a_1 y_1*y_1 + a_2 y_1*y_2 + a_3 y_1*y_3
y_2*y_4 = a_1 y_2*y_1 + a_2 y_2*y_2 + a_3 y_2*y_3
y_3*y_4 = a_1 y_3*y_1 + a_2 y_3*y_2 + a_3 y_3*y_3

We see that we have to solve a linear system with the
covariance matrix M_ij = y_i*y_j . To find the most relevant features
and reduce dimensionality, we use a PCA with only the highest eigenvalues.
We center the covariance around the mean, i.e. subtract ymean before.
"""
ymean = np.mean(ytrain, 0)
dytrain = ytrain - ymean
M = dytrain @ dytrain.T

w, Q = eigsh(M, 10)
plt.figure()
plt.plot(w)

plt.figure()
for i in range(len(w)):
    plt.plot(w[i]*Q[:,i] @ dytrain)
plt.title('10 most significant eigenvectors')
plt.xlabel('Independent variable x')
plt.ylabel('Eigenvectors scaled by eigenvalue')
#%% Testing

utest = 0.7
ytest = f(utest, x)
b = np.empty(ntrain)
for i in range(ntrain):
plt.plot(atrain[:, 0], atrain[:, 1], 'x')
plt.axis('equal')
plt.title('Weights')

# %%
from scipy.sparse.linalg.eigen import eigsh

neig = 5

M = np.empty([2 * ntrain, 2 * ntrain])

M[:ntrain, :ntrain] = dqtrain @ dqtrain.T
M[:ntrain, ntrain:] = dqtrain @ dptrain.T
M[ntrain:, :ntrain] = dptrain @ dqtrain.T
M[ntrain:, ntrain:] = dptrain @ dptrain.T
w, Q = eigsh(M, neig)
Q = Q[:, :-neig - 1:-1]
w = w[:-neig - 1:-1]

plt.figure()
plt.semilogy(w)

for i in range(len(w)):
    plt.figure()
    qeig = Q[:ntrain, i] @ dqtrain
    peig = Q[ntrain:, i] @ dptrain
    plt.figure()
    plt.plot(qeig, peig)
    plt.xlabel('qeig')
    plt.ylabel('peig')
Example #8
0
plt.xlabel('Independent variable t')
plt.ylabel('Output f(t;u)')
plt.title(f'Output f(t;u) for different u')
#%%
utrain, utest, ytrain, ytest = train_test_split(indata,
                                                outdata,
                                                test_size=0.95)

ntrain = utrain.shape[0]
ntest = utest.shape[1]

ymean = np.mean(ytrain, 0)
dytrain = ytrain - ymean
M = dytrain @ dytrain.T

w, Q = eigsh(M, 22)
plt.figure()
plt.semilogy(w[::-1] / w[-1])
plt.xlabel('Number')
plt.ylabel('Relative size')
plt.title('Eigenvalue decay')

neig = 5
Q = Q[:, :-neig - 1:-1]
w = w[:-neig - 1:-1]

plt.figure()
for i in range(len(w)):
    efunc = Q[:, i] @ dytrain
    plt.plot(-efunc * np.sign(efunc[0]))  # Sign-flipped eigenfunctions
    #plt.plot(efunc)  # Eigenfunctions