def createSurrogateModel(modelData, data): X = data['X'] Y = data['dX'] if modelData.nMonomials == 0: psi = Identity P = np_.array(range(X[0].shape[1])) else: psi = observables.monomials(modelData.nMonomials) P = np_.array(range(1, X[0].shape[1] + 1)) for i in range(len(X)): PsiX = psi(X[i].T) if modelData.nMonomials == 0: dPsiY = Y[i].T else: dPsiY = np_.einsum('ijk,jk->ik', psi.diff(X[i].T), Y[i].T) Gi = PsiX @ PsiX.T Ai = PsiX @ dPsiY.T Ki = sp_.linalg.pinv(Gi) @ Ai if i == 0: K = np_.zeros([Ki.shape[0], Ki.shape[1], len(X)], dtype=float) A = np_.zeros([Ki.shape[0], Ki.shape[1], len(X)], dtype=float) G = np_.zeros([Ki.shape[0], Ki.shape[1], len(X)], dtype=float) m = np_.zeros([len(X)], dtype=float) K[:, :, i] = Ki.transpose() A[:, :, i] = Ai G[:, :, i] = Gi m[i] = float(X[i].shape[0]) setattr(modelData, 'psi', psi) setattr(modelData, 'K', K) setattr(modelData, 'A', A) setattr(modelData, 'G', G) setattr(modelData, 'm', m) setattr(modelData, 'P', P) return modelData
#%% plot eigenvalues plt.figure() plt.plot(d, '.') plt.title('Spectrum') #%% plot eigenfunctions ind, = np.where(d < -0.1) for i in ind[:6]: plt.figure() Omega.plot(np.real(W[:, i] / np.amax(abs(W[:, i])))) plt.ylim((-1, 1)) plt.title('lambda = %f' % np.real(d[i])) #%% direct kernelization for kernels with finite-dimensional feature space psi = observables.monomials(5) PsiX = psi(X) dPsiY = np.einsum('ijk,jk->ik', psi.diff(X), Y) if not (Z is None): # stochastic dynamical system n = PsiX.shape[0] # number of basis functions ddPsiX = psi.ddiff(X) # secplond-order derivatives S = np.einsum('ijk,ljk->ilk', Z, Z) # sigma \cdot sigma^T for i in range(n): dPsiY[i, :] += 0.5 * np.sum(ddPsiX[i, :, :, :] * S, axis=(0, 1)) G_00 = PsiX.T @ PsiX G_10 = dPsiY.T @ PsiX A = sp.linalg.pinv(G_00 + epsilon * np.eye(m), rcond=1e-15) @ G_10 d, V = algorithms.sortEig(A, evs=m, which='SM') V = G_00 @ V
import numpy as np import os import d3s.observables as observables import auxiliary.algorithms as algorithms from auxiliary.kramers_moyal import kramersmoyal # Maximum degree of monomials degree = 3 data_path = 'data/raw/' save_path = 'data/processed/matrix_' # Define observables psi = observables.monomials(degree) for filename in os.listdir(data_path): if filename[-3::] != 'npz': continue with np.load(data_path + filename) as data: print('Available in this data set:', data.files) trajectory = data['trajectory'] x_init = data['x_init'] num_agents = data['num_agents'] num_testpoints = data['num_trainingpoints'] num_samples = data['num_samples'] T_max = data['T_max'] gamma = data['gamma']
def __init__(self): self.p = observables.monomials(2)
# define domain bounds = sp.array([[-2, 2], [-2, 2]]) boxes = sp.array([50, 50]) Omega = domain.discretization(bounds, boxes) # define system gamma = -0.8 delta = -0.7 def b(x): return np.array([gamma * x[0, :], delta * (x[1, :] - x[0, :]**2)]) # define observables psi = observables.monomials(8) # generate data X = Omega.rand(1000) # generate test points Y = b(X) # apply generator EDMD evs = 8 # number of eigenvalues/eigenfunctions to be computed K, d, V = algorithms.gedmd(X, Y, None, psi, evs=evs, operator='K') # printMatrix(K, 'K_gEDMD') printVector(np.real(d), 'd_gEDMD') V[:, 1] /= V[3, 1] V[:, 3] /= V[10, 3] V[:, 4] /= V[6, 4] # normalize eigenvectors for convenience for i in range(evs):
def data_sde(L, p, x_init, T_max, t_step, seed=None): """ Euler-Maruyama scheme to simulate the reduced SDE Parameters ---------- L : ndarray Koopman generator approximation matrix. p : int Degree of highest-order monomial. x_init : ndarray Initial state. T_max : float Time horizon for simulation. t_step : float Time step size. seed : int, optional Seed of random process. The default is None. Returns ------- C : ndarray Trajectory of the SDE simulated for interval [0, T_max]. """ if seed is not None: np.random.seed(seed) num_types = len(x_init) # Define observables psi = observables.monomials(p) # Number of timestep to be simulated num_timesteps = int(np.round(T_max / t_step)) + 1 C = np.zeros([num_types - 1, num_timesteps]) C[:, 0] = x_init[0:num_types - 1] for k in range(num_timesteps - 1): # Get random number Wt = np.random.normal(0, 1, num_types - 1) # Evaluate drift and diffusion in X given the data-driven L X = C[:, k] X = np.expand_dims(X, axis=1) Psi_c = psi(X) # Calculate drift b_c = L[:, 1:num_types].T @ Psi_c b_c = np.squeeze(b_c) # Calculate diffusion a_c_11 = L[:, 3].T @ Psi_c - 2 * b_c[0] * X[0, :] a_c_12 = L[:, 4].T @ Psi_c - b_c[0] * X[1, :] - b_c[1] * X[0, :] a_c_22 = L[:, 5].T @ Psi_c - 2 * b_c[1] * X[1, :] A = np.empty([num_types - 1, num_types - 1]) A[0, 0] = a_c_11 A[1, 0] = a_c_12 A[0, 1] = a_c_12 A[1, 1] = a_c_22 # The matrix A can have eigenvalues that are numerically (close to) # zero. If the Cholesky decomposition cannot be computed, the diffusion # term sigma will not be updated in this step try: __ = sigma.shape except NameError: sigma = np.zeros_like(A) try: sigma = sp.linalg.cholesky(A, lower=True) except Exception as excp: pass C[:, k + 1] = C[:, k] + t_step * b_c + np.sqrt(t_step) * sigma @ Wt # Calculate missing last entrie C = np.vstack((C, np.zeros(num_timesteps))) C[2, :] = 1 - C[0, :] - C[1, :] return C