コード例 #1
0
def kcca(X, Y, k, evs=5, epsilon=1e-6):
    '''
    Kernel CCA.
    
    :param X:    data matrix, each column represents a data point
    :param Y:    lime-lagged data, each column y_i is x_i mapped forward by the dynamical system
    :param k:    kernel
    :param evs:  number of eigenvalues/eigenvectors
    :epsilon:    regularization parameter
    :return:     nonlinear transformation of the data X
    '''
    G_0 = _kernels.gramian(X, k)
    G_1 = _kernels.gramian(Y, k)

    # center Gram matrices
    n = X.shape[1]
    I = _np.eye(n)
    N = I - 1 / n * _sp.ones((n, n))
    G_0 = N @ G_0 @ N
    G_1 = N @ G_1 @ N

    A = _sp.linalg.solve(G_0 + epsilon*I, G_0, assume_a='sym') \
      @ _sp.linalg.solve(G_1 + epsilon*I, G_1, assume_a='sym')

    d, V = sortEig(A, evs)
    return (d, V)
コード例 #2
0
def kpca(X, k, evs=5):
    '''
    Kernel PCA.
    
    :param X:    data matrix, each column represents a data point
    :param k:    kernel
    :param evs:  number of eigenvalues/eigenvectors
    :return:     data X projected onto principal components
    '''
    G = _kernels.gramian(X, k)  # Gram matrix

    # center Gram matrix
    n = X.shape[1]
    N = _np.eye(n) - 1 / n * _sp.ones((n, n))
    G = N @ G @ N
    d, V = sortEig(G, evs)
    return (d, V)
コード例 #3
0
ファイル: kgedmdTest.py プロジェクト: sklus/d3s
def gaussianG00G10(X, b, sigma, k):
    # Compute Gram matrices G_00 and G_10, assume sigma is fixed. Faster than standard implementation.
    if k.__class__.__name__ != 'gaussianKernel':
        print('Error: Only implemented for Gaussian kernel.')
        return
    m = X.shape[1]
    G_00 = kernels.gramian(X, k)
    G_10 = np.zeros((m, m))
    B = b(X).T @ X
    s = np.squeeze(sigma(X[:, 0, None]), 2)
    a = s @ s.T
    tra = np.trace(a)
    for i in range(m):
        for j in range(m):
            G_10[i, j] = -1/k.sigma**2 * (B[i, i] - B[i, j]) \
                        + 0.5*(1/k.sigma**4 * np.dot(X[:, i]-X[:, j], a @ (X[:, i]-X[:, j])) - 1/k.sigma**2 * tra )
    G_10 = G_10 * G_00
    return (G_00, G_10)
コード例 #4
0
def kedmd(X, Y, k, epsilon=0, evs=5, operator='P'):
    '''
    Kernel EDMD for the Koopman or Perron-Frobenius operator. The matrices X and Y
    contain the input data.

    :param k:        kernel, see d3s.kernels
    :param epsilon:  regularization parameter
    :param evs:      number of eigenvalues/eigenvectors
    :param operator: 'K' for Koopman or 'P' for Perron-Frobenius (note that the default is P here)
    :return:         eigenvalues d and eigenfunctions evaluated in X
    '''
    if isinstance(X, list):  # e.g., for strings
        n = len(X)
    else:
        n = X.shape[1]

    G_0 = _kernels.gramian(X, k)
    G_1 = _kernels.gramian2(X, Y, k)
    if operator == 'K': G_1 = G_1.T

    A = _sp.linalg.pinv(G_0 + epsilon * _np.eye(n), rcond=1e-15) @ G_1
    d, V = sortEig(A, evs)
    if operator == 'K': V = G_0 @ V
    return (d, V)
コード例 #5
0
ファイル: kgedmdTest.py プロジェクト: sklus/d3s
#%% generate data
m = 1000
X = Omega.rand(m)
Y = b(X)
Z = sigma(X)

# define kernel
#k = kernels.polynomialKernel(7)
k = kernels.gaussianKernel(0.5)

#%% apply kernel generator EDMD
epsilon = 0.1
S = np.einsum('ijk,ljk->ilk', Z, Z)  # sigma \cdot sigma^T

G_00 = kernels.gramian(X, k)
G_10 = np.zeros((m, m))
for i in range(m):
    for j in range(m):
        G_10[i, j] = Y[:, i].T @ k.diff(X[:, i], X[:, j]) + 0.5 * np.sum(
            S[:, :, i] * k.ddiff(X[:, i], X[:, j]), axis=(0, 1))

A, _, _, _ = np.linalg.lstsq(G_00, G_10, rcond=1e-12)
d, V = algorithms.sortEig(A, evs=m, which='LM')
W = kernels.gramian2(Omega.midpointGrid(), X, k) @ V

#%% plot eigenvalues
plt.figure()
plt.plot(d, '.')
plt.title('Spectrum')
コード例 #6
0
m = X.shape[1]

#%% plot boundary
plt.figure(1)
plt.clf()
plt.scatter(X[0, :], X[1, :])
plt.scatter(X[0, isBoundary], X[1, isBoundary], c='red')

#%% define kernel
d = 2
sigma = 0.1
k = kernels.gaussianKernel(sigma)
k_a = akernels.antisymmetrizedKernel(k, d)

#%% set boundary conditions
G_00 = kernels.gramian(X, k_a)
N = G_00[isBoundary, :].T

#%% solve BVP
epsilon = 1e-12

G_10 = np.zeros((m, m))
for i in range(m):
    for j in range(m):
        G_10[i, j] = -h**2/(2*m0) * k_a.laplace(X[:, i], X[:, j]) # no term related to the potential since V is 0 inside the box

d, V = ceig.hcgeig(G_10, G_00 + epsilon*np.eye(m), N)

# evaluate eigenfunctions in midpoints of the grid
Omega2 = domain.discretization(bounds, np.array([30, 30]))
c, _ = Omega2.vertexGrid()