Пример #1
0
    def test_header(self):
        """test header"""

        # suppress print output
        self._original_stdout = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        utl.header(title='test', subtitle='test')
        sys.stdout.close()
        sys.stdout = self._original_stdout
Пример #2
0

def plotrgb(tensor):
    """Plot RGB fractals.

    Parameters
    ----------
    tensor: ndarray
        3-dimensional tensor representing the RGB image
    """

    ax.imshow(tensor)
    plt.axis('off')


utl.header(title='Tensor-generated fractals')

# multisponges
# ------------

start_time = utl.progress('Generating multisponges', 0)
multisponge = []
for i in range(2, 4):
    for j in range(1, 4):
        multisponge.append(mdl.multisponge(i, j))
        utl.progress('Generating multisponges', 100 * ((i - 2) * 3 + j) / 6, cpu_time=_time.time() - start_time)

# Cantor dusts
# ------------

start_time = utl.progress('Generating Cantor dusts', 0)
Пример #3
0
.. [1] P. Gelß. "The Tensor-Train Format and Its Applications: Modeling and Analysis of Chemical Reaction
       Networks, Catalytic Processes, Fluid Flows, and Brownian Dynamics", Freie Universität Berlin, 2017
.. [2] P. Gelß, S. Matera, C. Schütte, "Solving the master equation without kinetic Monte Carlo: Tensor train
       approximations for a CO oxidation model", Journal of Computational Physics 314 (2016) 489–502
.. [3] P. Gelß, S. Klus, S. Matera, C. Schütte, "Nearest-neighbor interaction systems in the tensor-train format",
       Journal of Computational Physics 341 (2017) 140-162
"""

import scikit_tt.tensor_train as tt
import scikit_tt.models as mdl
import scikit_tt.solvers.evp as evp
import scikit_tt.solvers.ode as ode
import scikit_tt.utils as utl
import matplotlib.pyplot as plt

utl.header(title='CO oxidation')

# parameters
order = 20
p_CO_exp = [
    -4, -3.5, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,
    0.7, 0.8, 0.9, 1, 1.5, 2
]

# TT ranks for approximations
#           - EVP -           |            - IEM -
R = [3, 4, 5, 5, 6, 6, 9, 11] + [12]

# TODO: FIND FURTHER RANKS

# define array for turn-over frequencies
Пример #4
0
            # define tensor train to compute mean concentration of jth species
            cores = [
                np.ones([1, series[0].row_dims[k], 1, 1])
                for k in range(series[0].order)
            ]
            cores[j] = np.zeros([1, series[0].row_dims[j], 1, 1])
            cores[j][0, :, 0, 0] = np.arange(series[0].row_dims[j])
            tensor_mean = TT(cores)

            # define entry of mean
            mean[i, j] = series[i].transpose() @ tensor_mean

    return mean


utl.header(title='Signaling cascade')

# parameters
# ----------

order = 20
tt_rank = 4
qtt_rank = 12
step_sizes = [1] * 300
qtt_modes = [[2] * 6] * order
threshold = 1e-14

# operator in TT format
# ---------------------

operator = mdl.signaling_cascade(order)
Пример #5
0
    data = []
    trajectory_lengths = []
    number_of_trajectories = 6

    # loop over trajectories
    for i in range(number_of_trajectories):
        # exclude first and last dihedral pair and downsample data
        trajectory = np.load(directory + "DihedralTimeSeries_" + str(i) + ".npy")[::downsampling_rate, 2:12]
        trajectory_lengths.append(trajectory.shape[0])
        data.append(trajectory.T)
    data = np.hstack(data)

    return data, trajectory_lengths

# title
utl.header(title='Deca-alanine')

# define basis functions
basis_list = []
for i in range(5):
    basis_list.append([tdt.ConstantFunction(), tdt.PeriodicGaussFunction(2 * i, -2, 0.8),
                       tdt.PeriodicGaussFunction(2 * i, 1, 0.5)])
    basis_list.append([tdt.ConstantFunction(), tdt.PeriodicGaussFunction(2 * i + 1, -0.5, 0.8),
                       tdt.PeriodicGaussFunction(2 * i + 1, 0, 4), tdt.PeriodicGaussFunction(2 * i + 1, 2, 0.8)])

# parameters
downsampling_rate = 500
lag_times_int = np.array([1, 2, 4, 8, 10, 12])
lag_times_phy = 1e-3 * downsampling_rate * lag_times_int
lag_times_msm = 1e-3 * np.array([100, 200, 500, 1000, 2000, 4000, 6000])
eps_list = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
Пример #6
0
        len_m = len(str_m)
        str_c = str("%.2f" % classification_rate + '%')
        len_c = len(str_c)
        str_t = str("%.2f" % cpu_time + 's')
        len_t = len(str_t)
        print(str_m + (20 - len_m) * ' ' + str_c + (27 - len_c - len_t) * ' ' +
              str_t)
        classification_rates.append(classification_rate)
        cpu_times.append(cpu_time)

    print(' ')

    return classification_rates, cpu_times


utl.header(title='MNIST/FMNIST')

# data paths
mnist_reduced = '/srv/public/data/mnist/MNIST_reduced.npz'
mnist_full = '/srv/public/data/mnist/MNIST_full.npz'
fmnist_reduced = '/srv/public/data/mnist/FMNIST_reduced.npz'
fmnist_full = '/srv/public/data/mnist/FMNIST_full.npz'

print('MNIST(14x14) with kernel-based MANDy:\n')
classification_rates, cpu_times = classification_mandy(mnist_reduced, 5000,
                                                       60001, 5000)

print('MNIST(28x28) with kernel-based MANDy:\n')
classification_rates, cpu_times = classification_mandy(mnist_full, 5000, 60001,
                                                       5000)
Пример #7
0
            x_tmp = np.array([-6 + i * dist, -6 + j * dist])[:, None]
            z[i, j] = tdt.basis_decomposition(x_tmp, basis_list).matricize().dot(eigenvector)

    plt.imshow(z, cmap='seismic', vmin=-1, vmax=1)
    plt.colorbar()
    plt.xticks([0, (grid_size - 1) / 2, grid_size - 1], [-6, 0, 6])
    plt.yticks([0, (grid_size - 1) / 2, grid_size - 1], [-6, 0, 6])
    plt.xlim([0, grid_size - 1])
    plt.ylim([0, grid_size - 1])
    plt.xlabel(r'$x_1$')
    plt.ylabel(r'$x_2$')
    plt.rcParams.update({'axes.grid': True})


# title
utl.header(title='Radial potential')

# set plot parameters
plt.rc('text', usetex=True)
plt.rc('font', family='sans')
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams.update({'font.size': 20})
plt.rcParams.update({'figure.autolayout': True})
plt.rcParams.update({'axes.grid': True})

# plot potential
plot_potential()

# parameters
directory = os.path.dirname(os.path.realpath(__file__)) + '/data/'
number_of_boxes = 100
Пример #8
0
        # Save results to file:
        dic = {}
        dic["lag_times"] = lag_times
        dic["eigenvalues"] = eigenvalues
        dic["cpu_time"] = cpu_time
        np.savez_compressed(
            directory + "Results_NTL9_HOCUR_d" + str(dimensions[i]) + ".npz",
            **dic)

        utl.progress('Apply AMUSEt (HOCUR)',
                     100 * (i + 1) / len(dimensions),
                     cpu_time=_time.time() - start_time)


# title
utl.header(title='NTL9')

# set plot parameters
plt.rc('text', usetex=True)
plt.rc('font', family='sans')
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams.update({'font.size': 20})
plt.rcParams.update({'figure.autolayout': True})
plt.rcParams.update({'axes.grid': True})

# data directory (3GB trajectory data, TICA results, etc. not included)
directory = '/srv/public/data/ntl9/'

# plot basis functions
dimension = 666
downsampling_rate = 1
Пример #9
0
      Networks, Catalytic Processes, Fluid Flows, and Brownian Dynamics", Freie Universität Berlin, 2017
"""

import numpy as np
import scipy.sparse.linalg as splin
from scikit_tt.tensor_train import TT
import scikit_tt.tensor_train as tt
import scikit_tt.data_driven.perron_frobenius as pf
import scikit_tt.solvers.evp as evp
import scikit_tt.utils as utl
import matplotlib.pyplot as plt
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
import scipy.io as io

utl.header(title='Quadruple-well potential')

# parameters
# ----------

simulations = 100
n_states = 25
number_ev = 3

# load data obtained by applying Ulam's method
# --------------------------------------------


utl.progress('Load data', 0, dots=39)
transitions = io.loadmat("data/QuadrupleWell3D_25x25x25_100.mat")["indices"]  # load data
utl.progress('Load data', 100, dots=39)
Пример #10
0
----------
..[1] P. Gelß. "The Tensor-Train Format and Its Applications: Modeling and Analysis of Chemical Reaction 
      Networks, Catalytic Processes, Fluid Flows, and Brownian Dynamics", Freie Universität Berlin, 2017
"""

import numpy as np
import scipy.sparse.linalg as splin
from scikit_tt.tensor_train import TT
import scikit_tt.tensor_train as tt
import scikit_tt.data_driven.perron_frobenius as pf
import scikit_tt.solvers.evp as evp
import scikit_tt.utils as utl
import matplotlib.pyplot as plt
import scipy.io as io

utl.header(title='Triple-well potential')

# parameters
# ----------

simulations = 500
n_states = 50
number_ev = 3

# load data obtained by applying Ulam's method
# --------------------------------------------

utl.progress('Load data', 0, dots=39)
transitions = io.loadmat("data/TripleWell2D_500.mat")["indices"]
utl.progress('Load data', 100, dots=39)
Пример #11
0
    for j in range(number_of_snapshots):
        derivatives[0, j] = snapshots[1, j] - 2 * snapshots[0, j] + 0.7 * (
            (snapshots[1, j] - snapshots[0, j])**3 - snapshots[0, j]**3)
        for i in range(1, number_of_oscillators - 1):
            derivatives[i, j] = snapshots[
                i + 1, j] - 2 * snapshots[i, j] + snapshots[i - 1, j] + 0.7 * (
                    (snapshots[i + 1, j] - snapshots[i, j])**3 -
                    (snapshots[i, j] - snapshots[i - 1, j])**3)
        derivatives[-1, j] = -2 * snapshots[-1, j] + snapshots[
            -2, j] + 0.7 * (-snapshots[-1, j]**3 -
                            (snapshots[-1, j] - snapshots[-2, j])**3)

    return snapshots, derivatives


utl.header(title='MANDy - Fermi-Pasta-Ulam problem', subtitle='Example 2')

# model parameters
psi = [lambda t: 1, lambda t: t, lambda t: t**2, lambda t: t**3]
p = len(psi)

# snapshot parameters
snapshots_min = 500
snapshots_max = 6000
snapshots_step = 500

# dimension parameters
d_min = 3
d_max = 20

# define arrays for CPU times and relative errors
Пример #12
0
            # define tensor train to compute average number of cars
            cores = [
                np.ones([1, series[0].row_dims[k], 1, 1])
                for k in range(series[0].order)
            ]
            cores[j] = np.zeros([1, series[0].row_dims[j], 1, 1])
            cores[j][0, :, 0, 0] = np.arange(series[0].row_dims[j])
            tensor_mean = TT(cores)

            # define entry of average_noc
            average_noc[i, j] = series[i].transpose() @ tensor_mean

    return average_noc


utl.header(title='Toll station')

# parameters
# ----------

number_of_lanes = 20
maximum_number_of_cars = 9
initial_number_of_cars = 5
integration_time = 30

# construct operator
# ---------------------

operator = mdl.toll_station(number_of_lanes, maximum_number_of_cars)

# construct initial distribution
Пример #13
0
    # find eigenvalues
    eigenvalues, eigenvectors = lin.eig(reduced_matrix, overwrite_a=True, check_finite=False)

    # sort eigenvalues
    ind = np.argsort(eigenvalues)[::-1]
    dmd_eigenvalues = eigenvalues[ind]

    # compute modes
    dmd_modes = y_data @ v.T @ np.diag(np.reciprocal(s)) @ eigenvectors[:, ind] @ np.diag(
        np.reciprocal(dmd_eigenvalues))

    return dmd_eigenvalues, dmd_modes


utl.header(title='TDMD - von Kármán vortex street')

# load data
path = os.path.dirname(os.path.realpath(__file__))
data = np.load(path + "/data/karman_snapshots.npz")['snapshots']
number_of_snapshots = data.shape[-1] - 1

# tensor-based approach
# ---------------------

# thresholds for orthonormalizations
thresholds = [0, 1e-7, 1e-5, 1e-3]

start_time = utl.progress('applying TDMD for different thresholds', 0)

# construct x and y tensors and convert to TT format
Пример #14
0
        for q in range(p):
            cores[q][0, :, 0, 0] = [1] + [psi[q](theta[r]) for r in range(theta.shape[0])]
        psi_x = TT(cores)
        psi_x = psi_x.full().reshape(np.prod(psi_x.row_dims), 1)

        rhs = psi_x.transpose() @ xi
        rhs = rhs.reshape(rhs.size)
        return rhs

    sol = spint.solve_ivp(approximated_dynamics, [0, time], x_0, method='BDF', t_eval=np.linspace(0, time, m))
    sol = sol.y

    return sol


utl.header(title='MANDy - Kuramoto model')

# model parameters
# ----------------

# number of oscillators
d = 100

# initial distribution
x_0 = 2 * np.pi * np.random.rand(d) - np.pi

# natural frequencies
w = np.linspace(-5, 5, d)

# basis functions
psi = [lambda t: np.sin(t), lambda t: np.cos(t)]
Пример #15
0
            # define tensor train to compute mean concentration of jth species
            cores = [
                np.ones([1, series[0].row_dims[k], 1, 1])
                for k in range(series[0].order)
            ]
            cores[j] = np.zeros([1, series[0].row_dims[j], 1, 1])
            cores[j][0, :, 0, 0] = np.arange(series[0].row_dims[j])
            tensor_mean = TT(cores)

            # define entry of mean
            mean[i, j] = series[i].transpose() @ tensor_mean

    return mean


utl.header(title='Two-step destruction')

# parameters
# ----------

m = 3
step_sizes = [0.001] * 100 + [0.1] * 9 + [1] * 9
qtt_rank = 10
max_rank = 25

# construct operator in TT format and convert to QTT format
# ---------------------------------------------------------

operator = mdl.two_step_destruction(
    1, 2, 1, m).tt2qtt([[2] * m] + [[2] * (m + 1)] + [[2] * m] + [[2] * m],
                       [[2] * m] + [[2] * (m + 1)] + [[2] * m] + [[2] * m],