示例#1
0
def sod(operator, initial_value, step_sizes, threshold=1e-12, max_rank=50, normalize=2, progress=True):
    """Second order differencing (time-symmetrized explicit Euler) for linear differential equations in the TT format

    Parameters
    ----------
    operator: instance of TT class
        TT operator of the differential equation
    initial_value: instance of TT class
        initial value of the differential equation
    step_sizes: list of floats
        step sizes
    threshold: float, optional
        threshold for reduced SVD decompositions, default is 1e-12
    max_rank: int, optional
        maximum rank of the solution, default is 50
    normalize: int (0, 1, or 2)
        no normalization if 0, otherwise the solution is normalized in terms of Manhattan or Euclidean norm in each step
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    solution: list of instances of the TT class
        numerical solution of the differential equation
    """

    # return current time
    start_time = utl.progress('Running time-symmetrized explicit Euler method', 0, show=progress)

    # initialize solution
    solution = [initial_value]

    # begin loop over time steps
    # --------------------------

    for i in range(len(step_sizes)):

        if i == 0: # initialize: one expl. Euler backwards in time
            solution_prev = (tt.eye(operator.row_dims) - step_sizes[i]*operator).dot(solution[0])
        else:
            solution_prev = solution[i-1].copy()

        # compute next time step from current and previous time step
        tt_tmp = solution_prev + 2*step_sizes[i]*operator.dot(solution[i])

        # truncate ranks of the solution
        tt_tmp = tt_tmp.ortho(threshold=threshold, max_rank=max_rank)

        # normalize solution
        if normalize > 0:
            tt_tmp = (1 / tt_tmp.norm(p=normalize)) * tt_tmp

        # append solution
        solution.append(tt_tmp.copy())

        # print progress
        utl.progress('Running time-symmetrized explicit Euler method', 100 * (i + 1) / len(step_sizes), show=progress,
                     cpu_time=_time.time() - start_time)

    return solution
示例#2
0
def construct_tdm(data, basis_list):
    """Construct transformed data matrices.

    Parameters
    ----------
    data: ndarray
        snapshot matrix
    basis_list: list of lists of lambda functions
        list of basis functions in every mode

    Returns
    -------
    psi_x: instance of TT class
        transformed data matrix corresponding to x
    psi_y: instance of TT class
        transformed data matrix corresponding to y
    """

    # extract snapshots for x and y
    x = data[:, :-1]
    y = data[:, 1:]

    # construct psi_x and psi_y
    start_time = utl.progress('Construct transformed data matrices', 0)
    psi_x = tdt.basis_decomposition(x, basis_list).transpose(cores=2).matricize()
    utl.progress('Construct transformed data matrices', 50, cpu_time=_time.time() - start_time)
    psi_y = tdt.basis_decomposition(y, basis_list).transpose(cores=2).matricize()
    utl.progress('Construct transformed data matrices', 100, cpu_time=_time.time() - start_time)

    return psi_x, psi_y
示例#3
0
def explicit_euler(operator, initial_value, step_sizes, threshold=1e-12, max_rank=50, normalize=1, progress=True):
    """Explicit Euler method for linear differential equations in the TT format

    Parameters
    ----------
    operator: instance of TT class
        TT operator of the differential equation
    initial_value: instance of TT class
        initial value of the differential equation
    step_sizes: list of floats
        step sizes for the application of the implicit Euler method
    threshold: float, optional
        threshold for reduced SVD decompositions, default is 1e-12
    max_rank: int, optional
        maximum rank of the solution, default is 50
    normalize: int (0, 1, or 2)
        no normalization if 0, otherwise the solution is normalized in terms of Manhattan or Euclidean norm in each step
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    solution: list of instances of the TT class
        numerical solution of the differential equation
    """

    # return current time
    start_time = utl.progress('Running explicit Euler method', 0, show=progress)

    # define solution
    solution = [initial_value]

    # begin explicit Euler method
    # ---------------------------

    for i in range(len(step_sizes)):
        # compute next time step
        tt_tmp = (tt.eye(operator.row_dims) + step_sizes[i] * operator).dot(solution[i])

        # truncate ranks of the solution
        tt_tmp = tt_tmp.ortho(threshold=threshold, max_rank=max_rank)

        # normalize solution
        if normalize > 0:
            tt_tmp = (1 / tt_tmp.norm(p=normalize)) * tt_tmp

        # append solution
        solution.append(tt_tmp.copy())

        # print progress
        utl.progress('Running explicit Euler method', 100 * (i + 1) / len(step_sizes), show=progress,
                     cpu_time=_time.time() - start_time)

    return solution
示例#4
0
def approximate(x, psi_x, thresholds: list, max_ranks: list):
    """Approximate psi_x using HOSVD and HOCUR, respectively.

    Parameters
    ----------
    x: array
        snapshot matrix
    psi_x: array
        transformed data matrix
    thresholds: list of floats
        tresholds for HOSVD
    max_ranks: list of ints
        maximum ranks for HOCUR

    Returns
    -------
    ranks_hosvd: list of lists of ints
        ranks of the approximations computed with HOSVD
    errors_hosvd: list of floats
        relative errors of the approximations computed with HOSVD
    ranks_hocur: list of lists of ints
        ranks of the approximations computed with HOCUR
    errors_hocur: list of floats
        relative errors of the approximations computed with HOCUR
    """

    # define returns
    ranks_hosvd = []
    errors_hosvd = []
    ranks_hocur = []
    errors_hocur = []

    start_time = utl.progress('Approximation in TT format', 0)

    # reshape psi_x into tensor
    psi_x_full = psi_x.reshape([number_of_boxes, number_of_boxes, 1, 1, 1, psi_x.shape[1]])

    # approximate psi_x using HOSVD
    for i in range(len(thresholds)):
        psi_approx = TT(psi_x_full, threshold=thresholds[i])
        ranks_hosvd.append(psi_approx.ranks)
        errors_hosvd.append(np.linalg.norm(psi_x_full - psi_approx.full()) / np.linalg.norm(psi_x_full))
        utl.progress('Approximation in TT format', 100 * (i + 1) / 6, cpu_time=_time.time() - start_time)

    # approximate psi_x using HOCUR
    for i in range(len(max_ranks)):
        psi_approx = tdt.hocur(x, basis_list, max_ranks[i], repeats=3, multiplier=100, progress=False)
        ranks_hocur.append(psi_approx.ranks)
        errors_hocur.append(np.linalg.norm(psi_x - psi_approx.transpose(cores=2).matricize()) / np.linalg.norm(psi_x))
        utl.progress('Approximation in TT format', 100 * (i + 4) / 6, cpu_time=_time.time() - start_time)

    return ranks_hosvd, errors_hosvd, ranks_hocur, errors_hocur
示例#5
0
def amuse(psi_x, psi_y, threshold):
    """AMUSE (matrix case).

    Parameters
    ----------
    psi_x: array
        transformed data matrix
    psi_y: array
        transformed data matrix
    threshold: float
        threshold for SVD

    Returns
    -------
    eigenvalues: array
        EDMD eigenvalues
    eigenvectors: array
        EDMD eigenvectors
    """

    start_time = utl.progress('Apply AMUSE', 0)

    # construct reduced matrix
    # noinspection PyTupleAssignmentBalance
    u, s, v = splin.svd(psi_x,
                        full_matrices=False,
                        overwrite_a=True,
                        check_finite=False,
                        lapack_driver='gesvd')
    indices = np.where(s > threshold)[0]
    u = u[:, indices]
    s = s[indices]
    v = v[indices, :]
    s_inv = np.diag(np.reciprocal(s))
    reduced_matrix = v.dot(psi_y.T).dot(u).dot(s_inv)

    utl.progress('Apply AMUSE', 50, cpu_time=_time.time() - start_time)

    # solve eigenvalue problem and compute EDMD eigenvectors
    eigenvalues, eigenvectors = np.linalg.eig(reduced_matrix)
    idx = (np.abs(eigenvalues - 1)).argsort()[:3]
    eigenvalues = np.real(eigenvalues[idx])
    eigenvectors = u.dot(s_inv).dot(np.real(eigenvectors[:, idx]))

    utl.progress('Apply AMUSE', 100, cpu_time=_time.time() - start_time)

    return eigenvalues, eigenvectors
示例#6
0
def load_data(path, downsampling_rate, contact_indices, progress=True):
    """Load trajectory data.

    Parameters
    ----------
    path: string
        path where the files are stored
    downsampling_rate: int
        only consider trajectories numbers 0, downsampling_rate, 2*downsampling_rate, ...
    contact_indices: ndarray of ints
        only extract a given subset of indices

    Returns
    -------
    data: ndarray
        data matrix
    trajectory_lengths: list of ints
        number of snapshots in each trajectory
    """

    # define list of files
    files = [path + "Contact_Traj_%d.npz" % i for i in range(4)]

    # load data
    start_time = utl.progress('Load trajectory data', 0, show=progress)
    m = 0
    data = []
    trajectory_lengths = []
    for i in range(4):
        trajectory = np.load(
            files[i])["feature_traj"][contact_indices, ::downsampling_rate]
        data.append(trajectory)
        trajectory_lengths.append(trajectory.shape[1])
        m += trajectory.shape[1]
        utl.progress('Load trajectory data (m=' + str(m) + ')',
                     100 * (i + 1) / 4,
                     cpu_time=_time.time() - start_time,
                     show=progress)
    data = np.hstack(data)

    return data, trajectory_lengths
示例#7
0
    def test_progress(self):
        """test progress"""

        # suppress print output
        self._original_stdout = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        utl.progress('test', 0)
        utl.progress('test', 50)
        utl.progress('test', 100)
        sys.stdout.close()
        sys.stdout = self._original_stdout
示例#8
0
    def ortho_left(self, start_index=0, end_index=None, threshold=0, max_rank=np.infty, progress=False,
                   string='Left-orthonormalization'):
        """left-orthonormalization of tensor trains

        Parameters
        ----------
        start_index: int, optional
            start index for orthonormalization, default is 0
        end_index: int, optional
            end index for orthonormalization, default is the index of the penultimate core
        threshold: float, optional
            threshold for reduced SVD decompositions, default is 0
        max_rank: int, optional
            maximum rank of the left-orthonormalized tensor train, default is np.infty
        progress: boolean, optional
            whether to show progress bar, default is False
        string: string, optional
            title of the progress bar if progress is True

        Returns
        -------
        tt_ortho: instance of TT class
            left-orthonormalized representation of self

        Raises
        ------
        TypeError
            if start_index or end_index are not integers
        ValueError
            if threshold is less than 0
        ValueError
            if max_rank is not a positive integer
        """

        # show progress
        start_time = utl.progress(string, 0, show=progress)

        # set end_index to the index of the penultimate core if not otherwise defined
        if end_index is None:
            end_index = self.order - 2

        if isinstance(start_index, (int, np.integer)) and isinstance(end_index, (int, np.integer)):

            if isinstance(threshold, (int, np.integer, float, np.float)) and threshold >= 0:

                if (isinstance(max_rank, (int, np.integer)) and max_rank > 0) or max_rank == np.infty:

                    for i in range(start_index, end_index + 1):

                        # apply SVD to ith TT core
                        [u, s, v] = linalg.svd(
                            self.cores[i].reshape(self.ranks[i] * self.row_dims[i] * self.col_dims[i],
                                                  self.ranks[i + 1]),
                            full_matrices=False, overwrite_a=True, check_finite=False, lapack_driver='gesvd')

                        # rank reduction
                        if threshold != 0:
                            indices = np.where(s / s[0] > threshold)[0]
                            u = u[:, indices]
                            s = s[indices]
                            v = v[indices, :]
                        if max_rank != np.infty:
                            u = u[:, :np.minimum(u.shape[1], max_rank)]
                            s = s[:np.minimum(s.shape[0], max_rank)]
                            v = v[:np.minimum(v.shape[0], max_rank), :]

                        # define updated rank and core
                        self.ranks[i + 1] = u.shape[1]
                        self.cores[i] = u.reshape(self.ranks[i], self.row_dims[i], self.col_dims[i], self.ranks[i + 1])

                        # shift non-orthonormal part to next core
                        self.cores[i + 1] = np.tensordot(np.diag(s).dot(v), self.cores[i + 1], axes=(1, 0))

                        # show progress
                        utl.progress(string + '... r=' + str(self.ranks[i + 1]),
                                     100 * (i - start_index + 1) / (end_index - start_index + 1),
                                     cpu_time=_time.time() - start_time, show=progress)

                    return self

                else:
                    raise ValueError('Maximum rank must be a positive integer.')

            else:
                raise ValueError('Threshold must be greater or equal 0.')

        else:
            raise TypeError('Start and end indices must be integers.')
示例#9
0
# dimension parameters
d_min = 3
d_max = 20

# define arrays for CPU times and relative errors
rel_errors = np.zeros([int((snapshots_max - snapshots_min) / snapshots_step) + 1, int(d_max - d_min) + 1])

# compare CPU times of tensor-based and matrix-based approaches
for i in range(d_min, d_max + 1):

    print('Number of osciallators: ' + str(i))
    print('-' * (24 + len(str(i))) + '\n')

    # construct exact solution in TT and matrix format
    utl.progress('Construct exact solution in TT format', 0, dots=3)
    xi_exact = mdl.fpu_coefficients(i)
    utl.progress('Construct exact solution in TT format', 100, dots=3)

    # generate data
    utl.progress('Generate test data', 0, dots=22)
    [x, y] = mdl.fermi_pasta_ulam(i, snapshots_max)
    utl.progress('Generate test data', 100, dots=22)

    for j in range(snapshots_min, snapshots_max + snapshots_step, snapshots_step):
        # storing indices
        ind_1 = rel_errors.shape[0] - 1 - int((j - snapshots_min) / snapshots_step)
        ind_2 = int((i - d_min))

        utl.progress('Running MANDy', 100 * (rel_errors.shape[0] - ind_1 - 1) / rel_errors.shape[0], dots=27)
示例#10
0
def amuset(z, eigenvalues_amuse, eigenvectors_amuse, thresholds: list, max_ranks: list):
    """AMUSEt (tensor case)

    Parameters
    ----------
    z: array
        snapshot matrix
    eigenvalues_amuse: array
        eigenvalues computed by AMUSE
    eigenvectors_amuse
        eigenvectors computed by AMUSE
    thresholds: list of floats
        thresholds for AMUSEt using HOSVD
    max_ranks: list of ints
        maximum ranks for AMUSEt using HOCUR

    Returns
    -------
    ev_errors_hosvd: list of arrays
        relative errors of approximate eigenvalues (HOSVD)
    et_errors_hosvd: list of arrays
        relative errors of approximate eigentensors (HOSVD)
    ev_errors_hocur: list of arrays
        relative errors of approximate eigenvalues (HOCUR)
    et_errors_hocur: list of arrays
        relative errors of approximate eigentensors (HOCUR)
    et_last_hocur: instance of TT class
        eigentensors of last HOCUR approach
    """

    # define returns
    ev_errors_hosvd = []
    et_errors_hosvd = []
    ev_errors_hocur = []
    et_errors_hocur = []

    start_time = utl.progress('Apply AMUSEt', 0)
    eigentensors = []

    for i in range(len(thresholds) - 1):

        # apply AMUSEt using HOSVD
        eigenvalues, eigentensors = tedmd.amuset_hosvd(z, np.arange(0, z.shape[1] - 1), np.arange(1, z.shape[1]),
                                                 basis_list, threshold=thresholds[i + 1])
        # matricize eigentensors
        eigentensors = eigentensors.transpose(cores=2).matricize()

        # compute relative errors of the eigenvalues
        error_list = []
        for j in range(2):
            error_list.append(np.abs(eigenvalues[j] - eigenvalues_amuse[j]) / np.abs(eigenvalues_amuse[j]))
        ev_errors_hosvd.append(np.array(error_list))

        # compute relative errors of the eigentensors
        error_list = []
        for j in range(2):
            error_list.append(np.minimum(
                np.linalg.norm(eigentensors[:, j] - eigenvectors_amuse[:, j]) / np.linalg.norm(
                    eigenvectors_amuse[:, j]),
                np.linalg.norm(eigentensors[:, j] + eigenvectors_amuse[:, j]) / np.linalg.norm(
                    eigenvectors_amuse[:, j])))
        et_errors_hosvd.append(np.array(error_list))

        utl.progress('Apply AMUSEt', 100 * (i + 1) / 5, cpu_time=_time.time() - start_time)

    for i in range(len(max_ranks)):

        # apply AMUSEt using HOCUR
        eigenvalues, eigentensors = tedmd.amuset_hocur(z, np.arange(0, z.shape[1] - 1), np.arange(1, z.shape[1]), basis_list, max_rank=max_ranks[i], multiplier=100)

        # matricize eigentensors
        eigentensors = eigentensors.transpose(cores=2).matricize()

        # compute relative errors of the eigenvalues
        error_list = []
        for j in range(2):
            error_list.append(np.abs(eigenvalues[j] - eigenvalues_amuse[j]) / np.abs(eigenvalues_amuse[j]))
        ev_errors_hocur.append(np.array(error_list))

        # compute relative errors of the eigentensors
        error_list = []
        for j in range(2):
            error_list.append(np.minimum(
                np.linalg.norm(eigentensors[:, j] - eigenvectors_amuse[:, j]) / np.linalg.norm(
                    eigenvectors_amuse[:, j]),
                np.linalg.norm(eigentensors[:, j] + eigenvectors_amuse[:, j]) / np.linalg.norm(
                    eigenvectors_amuse[:, j])))
        et_errors_hocur.append(np.array(error_list))

        utl.progress('Apply AMUSEt', 100 * (i + 3) / 5, cpu_time=_time.time() - start_time)

    # define et_last_hocur
    et_last_hocur = eigentensors

    return ev_errors_hosvd, et_errors_hosvd, ev_errors_hocur, et_errors_hocur, et_last_hocur
示例#11
0
# integration time and number of snapshots
time = 1020
m = 10201

# threshold
threshold = 0

# recovering of the dynamics
# --------------------------

print('Recovering of the dynamics')
print('-' * 50)

# construct exact solution in TT and matrix format
utl.progress('Construct exact solution in TT format', 0)
xi_exact = mdl.kuramoto_coefficients(d, w)
utl.progress('Construct exact solution in TT format', 100)

# generate data
utl.progress('Generate test data', 0, dots=22)
[x, y] = mdl.kuramoto(x_0, w, time, m)
utl.progress('Generate test data', 100, dots=22)

# apply MANDy (function-major)
utl.progress('Running MANDy (eps=' + str(threshold) + ')', 0, dots=20 - len(str(threshold)))
with utl.timer() as time:
    xi = mandy.mandy_fm(x, y, psi, threshold=threshold)
utl.progress('Running MANDy (eps=' + str(threshold) + ')', 100, dots=20 - len(str(threshold)))

# CPU time and relative error
示例#12
0
import matplotlib.pyplot as plt
import scipy.io as io

utl.header(title='Triple-well potential')

# parameters
# ----------

simulations = 500
n_states = 50
number_ev = 3

# load data obtained by applying Ulam's method
# --------------------------------------------

utl.progress('Load data', 0, dots=39)
transitions = io.loadmat("data/TripleWell2D_500.mat")["indices"]
utl.progress('Load data', 100, dots=39)

# construct TT operator
# ---------------------

utl.progress('Construct operator', 0, dots=30)
operator = pf.perron_frobenius_2d(transitions, [n_states, n_states],
                                  simulations)
utl.progress('Construct operator', 100, dots=30)

# approximate leading eigenfunctions of the Perron-Frobenius and Koopman operator
# -------------------------------------------------------------------------------

initial = tt.ones(operator.row_dims, [1] * operator.order, ranks=11)
示例#13
0
import numpy as np
import scipy.linalg as splin
import scikit_tt.data_driven.mandy as mandy
import scikit_tt.models as mdl
import scikit_tt.utils as utl
import matplotlib.pyplot as plt

utl.header(title='MANDy - Fermi-Pasta-Ulam problem', subtitle='Example 1')

# model parameters
number_of_oscillators = 10
psi = [lambda t: 1, lambda t: t, lambda t: t**2, lambda t: t**3]
p = len(psi)

# construct exact solution in TT and matrix format
utl.progress('Construct exact solution in TT format', 0, dots=7)
xi_exact = mdl.fpu_coefficients(number_of_oscillators)
utl.progress('Construct exact solution in TT format', 100, dots=7)
utl.progress('Construct exact solution in matrix format', 0)
xi_exact_mat = xi_exact.full().reshape(
    [p**number_of_oscillators, number_of_oscillators])
utl.progress('Construct exact solution in matrix format', 100)

# snapshot parameters
snapshots_min = 1000
snapshots_max = 6000
snapshots_step = 500

# maximum number of snapshots for matrix approach
snapshots_mat = 5000
示例#14
0
simulations = 100
n_states = 25
number_ev = 3

# load data obtained by applying Ulam's method
# --------------------------------------------

directory = os.path.dirname(os.path.realpath(__file__))
transitions = np.load(directory +
                      '/data/quadruple_well_transitions.npz')['transitions']

# construct TT operator
# ---------------------

start_time = utl.progress('Construct operator', 0)
operator = ulam.ulam_3d(transitions, [n_states] * 3, simulations)
utl.progress('Construct operator', 100, cpu_time=_time.time() - start_time)

# approximate leading eigenfunctions
# ----------------------------------

start_time = utl.progress('Approximate eigenfunctions in the TT format', 0)
initial = tt.uniform(operator.row_dims, ranks=[1, 20, 10, 1])
eigenvalues, eigenfunctions = evp.als(operator,
                                      initial,
                                      number_ev=number_ev,
                                      repeats=2)
utl.progress('Approximate eigenfunctions in the TT format',
             100,
             cpu_time=_time.time() - start_time)
示例#15
0
def adaptive_step_size(operator, initial_value, initial_guess, time_end, step_size_first=1e-10, repeats=1,
                       solver='solve',
                       error_tol=1e-1, closeness_tol=0.5, step_size_min=1e-14, step_size_max=10, closeness_min=1e-3,
                       factor_max=2, factor_safe=0.9, second_method='two_step_Euler', normalize=1, progress=True):
    """Adaptive step size method

    Parameters
    ----------
    operator: instance of TT class
        TT operator of the differential equation
    initial_value: instance of TT class
        initial value of the differential equation
    initial_guess: instance of TT class
        initial guess for the first step
    time_end: float
        time point to which the ODE should be integrated
    step_size_first: float, optional
        first time step, default is 1e-10
    repeats: int, optional
        number of repeats of the ALS in each iteration step, default is 1
    solver: string, optional
        algorithm for obtaining the solutions of the micro systems, can be 'solve' or 'lu', default is 'solve'
    error_tol: float, optional
        tolerance for relative local error, default is 1e-1
    closeness_tol: float, optional
        tolerance for relative change in the closeness to the stationary distribution, default is 0.5
    step_size_min: float, optional
        minimum step size, default is 1e-14
    step_size_max: float, optional
        maximum step size, default is 10
    closeness_min: float, optional
        minimum closeness value, default is 1e-3
    factor_max: float, optional
        maximum factor for step size adaption, default is 2
    factor_safe: float, optional
        safety factor for step size adaption, default is 0.9
    second_method: string, optional
        which higher-order method should be used, can be 'two_step_Euler' or 'trapezoidal_rule', default is
        'two_step_Euler'
    normalize: int (0, 1, or 2)
        no normalization if 0, otherwise the solution is normalized in terms of Manhattan or Euclidean norm in each step
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    solution: list of instances of the TT class
        numerical solution of the differential equation
    """

    # return current time
    start_time = utl.progress('Running adaptive step size method', 0, show=progress)

    # define solution
    solution = [initial_value]

    # define variable for integration
    t_2 = []

    # set closeness variables
    closeness_pre = (operator.dot(initial_value)).norm()

    # define tensor train for solving the systems of linear equations
    t_tmp = initial_guess

    # set time and step size
    time = 0
    time_steps = [0]
    step_size = step_size_first

    # begin integration
    # -----------------

    while (time < time_end) and (closeness_pre > closeness_min) and (step_size > step_size_min):

        # first method
        t_1 = sle.als(tt.eye(operator.row_dims) - step_size * operator, t_tmp.copy(), solution[-1], solver=solver,
                      repeats=repeats)
        t_1 = (1 / t_1.norm(p=1)) * t_1

        # second method
        if second_method == 'two_step_Euler':
            t_2 = sle.als(tt.eye(operator.row_dims) - 0.5 * step_size * operator, t_tmp.copy(), solution[-1],
                          solver=solver,
                          repeats=repeats)
            t_2 = sle.als(tt.eye(operator.row_dims) - 0.5 * step_size * operator, t_2.copy(), solution[-1],
                          solver=solver,
                          repeats=repeats)
        if second_method == 'trapezoidal_rule':
            t_2 = sle.als(tt.eye(operator.row_dims) - 0.5 * step_size * operator, t_tmp.copy(),
                          (tt.eye(operator.row_dims) + 0.5 * step_size * operator).dot(solution[-1]), solver=solver,
                          repeats=repeats)
        # normalize solution
        if normalize > 0:
            t_2 = (1 / t_2.norm(p=normalize)) * t_2

        # compute closeness to staionary distribution
        closeness = (operator.dot(t_1)).norm()

        # compute relative local error and closeness change
        local_error = (t_1 - t_2).norm() / t_1.norm()
        closeness_difference = (closeness - closeness_pre) / closeness_pre

        # compute factors for step size adaption
        factor_local = error_tol / local_error
        factor_closeness = closeness_tol / np.abs(closeness_difference)

        # compute new step size
        step_size_new = np.amin([factor_max, factor_safe * factor_local, factor_safe * factor_closeness]) * step_size

        # accept or reject step
        if (factor_local > 1) and (factor_closeness > 1):
            time = np.min([time + step_size, time_end])
            step_size = np.amin([step_size_new, time_end - time, step_size_max])
            solution.append(t_1.copy())
            time_steps.append(time)
            t_tmp = t_1
            utl.progress('Running adaptive step size method', 100 * time / time_end, show=progress,
                         cpu_time=_time.time() - start_time)
            closeness_pre = closeness
        else:
            step_size = step_size_new

    return solution, time_steps
示例#16
0
def tedmd_hocur(dimensions, downsampling_rate, integer_lag_times, max_rank,
                directory):
    """tEDMD using AMUSEt with HOSVD

    Parameters
    ----------
    dimensions: list[int]
        numbers of contact indices
    downsampling_rate: int
        downsampling rate for trajectory data
    integer_lag_times: list[int]
        integer lag times for application of tEDMD
    max_rank: int
        maximum rank for HOCUR
    directory: string
        directory data to load
    """

    # progress
    start_time = utl.progress('Apply AMUSEt (HOCUR)', 0)

    for i in range(len(dimensions)):

        # parameters
        time_step = 2e-3
        lag_times = time_step * downsampling_rate * integer_lag_times

        # define basis list
        basis_list = [[
            tdt.ConstantFunction(),
            tdt.GaussFunction(i, 0.285, 0.001),
            tdt.GaussFunction(i, 0.62, 0.01)
        ] for i in range(dimensions[i])]

        # progress
        utl.progress('Apply AMUSEt (HOCUR, p=' + str(dimensions[i]) + ')',
                     100 * i / len(dimensions),
                     cpu_time=_time.time() - start_time)

        # load contact indices (sorted by relevance)
        contact_indices = np.load(
            directory + 'ntl9_contact_indices.npz')['indices'][:dimensions[i]]

        # load data
        data, trajectory_lengths = load_data(directory,
                                             downsampling_rate,
                                             contact_indices,
                                             progress=False)

        # select snapshot indices for x and y data
        x_indices = []
        y_indices = []
        for j in range(len(integer_lag_times)):
            x_ind, y_ind = xy_indices(trajectory_lengths, integer_lag_times[j])
            x_indices.append(x_ind)
            y_indices.append(y_ind)

        # apply AMUSEt
        with utl.timer() as timer:
            eigenvalues, _ = tedmd.amuset_hocur(data,
                                                x_indices,
                                                y_indices,
                                                basis_list,
                                                max_rank=max_rank)
        cpu_time = timer.elapsed

        for j in range(len(integer_lag_times)):
            eigenvalues[j] = [eigenvalues[j][1]]

        # Save results to file:
        dic = {}
        dic["lag_times"] = lag_times
        dic["eigenvalues"] = eigenvalues
        dic["cpu_time"] = cpu_time
        np.savez_compressed(
            directory + "Results_NTL9_HOCUR_d" + str(dimensions[i]) + ".npz",
            **dic)

        utl.progress('Apply AMUSEt (HOCUR)',
                     100 * (i + 1) / len(dimensions),
                     cpu_time=_time.time() - start_time)
示例#17
0
    def __init__(self, x, threshold=0, max_rank=np.infty, progress=False, string=None):
        """
        Parameters
        ----------
        x: list of ndarrays or ndarray
            either a list of TT cores or a full tensor
        threshold: float, optional
            threshold for reduced SVD decompositions, default is 0
        max_rank: int, optional
            maximum rank of the left-orthonormalized tensor train, default is np.infty

        Raises
        ------
        TypeError
            if x is neither a list of ndarray nor a single ndarray
        ValueError
            if list elements of x are not 4-dimensional tensors or shapes do not match
        ValueError
            if number of dimensions of the ndarray x is not a multiple of 2
        """

        # initialize from list of cores
        if isinstance(x, list):

            # check if orders of list elements are correct
            if np.all([x[i].ndim == 4 for i in range(len(x))]):

                # check if ranks are correct
                if np.all([x[i].shape[3] == x[i + 1].shape[0] for i in range(len(x) - 1)]):

                    # define order, row dimensions, column dimensions, ranks, and cores
                    self.order = len(x)
                    self.row_dims = [x[i].shape[1] for i in range(self.order)]
                    self.col_dims = [x[i].shape[2] for i in range(self.order)]
                    self.ranks = [x[i].shape[0] for i in range(self.order)] + [x[-1].shape[3]]
                    self.cores = x

                    # rank reduction
                    if threshold != 0 or max_rank != np.infty:
                        self.ortho(threshold=threshold, max_rank=max_rank)

                else:
                    raise ValueError('Shapes of list elements do not match.')

            else:
                raise ValueError('List elements must be 4-dimensional arrays.')

        # initialize from full array   
        elif isinstance(x, np.ndarray):

            # check if order of ndarray is a multiple of 2
            if np.mod(x.ndim, 2) == 0:

                # show progress
                if string is None:
                    string = 'HOSVD'
                start_time = utl.progress(string, 0, show=progress)

                # define order, row dimensions, column dimensions, ranks, and cores
                order = len(x.shape) // 2
                row_dims = x.shape[:order]
                col_dims = x.shape[order:]
                ranks = [1] * (order + 1)
                cores = []

                # permute dimensions, e.g., for order = 4: p = [0, 4, 1, 5, 2, 6, 3, 7]
                p = [order * j + i for i in range(order) for j in range(2)]
                y = np.transpose(x, p).copy()

                # decompose the full tensor
                for i in range(order - 1):
                    # reshape residual tensor
                    m = ranks[i] * row_dims[i] * col_dims[i]
                    n = np.prod(row_dims[i + 1:]) * np.prod(col_dims[i + 1:])
                    y = np.reshape(y, [m, n])

                    # apply SVD in order to isolate modes
                    [u, s, v] = linalg.svd(y, full_matrices=False)

                    # rank reduction
                    if threshold != 0:
                        indices = np.where(s / s[0] > threshold)[0]
                        u = u[:, indices]
                        s = s[indices]
                        v = v[indices, :]
                    if max_rank != np.infty:
                        u = u[:, :np.minimum(u.shape[1], max_rank)]
                        s = s[:np.minimum(s.shape[0], max_rank)]
                        v = v[:np.minimum(v.shape[0], max_rank), :]

                    # define new TT core
                    ranks[i + 1] = u.shape[1]
                    cores.append(np.reshape(u, [ranks[i], row_dims[i], col_dims[i], ranks[i + 1]]))

                    # set new residual tensor
                    y = np.diag(s).dot(v)

                    # show progress
                    utl.progress(string, 100 * (i + 1) / order, cpu_time=_time.time() - start_time, show=progress)

                # define last TT core
                cores.append(np.reshape(y, [ranks[-2], row_dims[-1], col_dims[-1], 1]))

                # initialize tensor train
                self.__init__(cores)

                # show progress
                utl.progress(string, 100, cpu_time=_time.time() - start_time, show=progress)

            else:
                raise ValueError('Number of dimensions must be a multiple of 2.')

        else:
            raise TypeError('Parameter must be either a list of cores or an ndarray.')
示例#18
0
def implicit_euler(operator,
                   initial_value,
                   initial_guess,
                   step_sizes,
                   repeats=1,
                   tt_solver='als',
                   threshold=1e-12,
                   max_rank=np.infty,
                   micro_solver='solve',
                   progress=True):
    """Implicit Euler method for linear differential equations in the TT format

    Parameters
    ----------
    operator: instance of TT class
        TT operator of the differential equation
    initial_value: instance of TT class
        initial value of the differential equation
    initial_guess: instance of TT class
        initial guess for the first step
    step_sizes: list of floats
        step sizes for the application of the implicit Euler method
    repeats: int, optional
        number of repeats of the (M)ALS in each iteration step, default is 1
    tt_solver: string, optional
        algorithm for solving the systems of linear equations in the TT format, default is 'als'
    threshold: float, optional
        threshold for reduced SVD decompositions, default is 1e-12
    max_rank: int, optional
        maximum rank of the solution, default is infinity
    micro_solver: string, optional
        algorithm for obtaining the solutions of the micro systems, can be 'solve' or 'lu', default is 'solve'
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    solution: list of instances of the TT class
        numerical solution of the differential equation
    """

    # define solution
    solution = [initial_value]

    # define temporary tensor train
    tt_tmp = initial_guess

    # begin implicit Euler method
    # ---------------------------

    for i in range(len(step_sizes)):

        # solve system of linear equations for current time step
        if tt_solver == 'als':
            tt_tmp = sle.als(tt.eye(operator.row_dims) -
                             step_sizes[i] * operator,
                             tt_tmp,
                             solution[i],
                             solver=micro_solver,
                             repeats=repeats)
        if tt_solver == 'mals':
            tt_tmp = sle.mals(tt.eye(operator.row_dims) -
                              step_sizes[i] * operator,
                              tt_tmp,
                              solution[i],
                              solver=micro_solver,
                              threshold=threshold,
                              repeats=repeats,
                              max_rank=max_rank)

        # normalize solution
        tt_tmp = (1 / tt_tmp.norm(p=1)) * tt_tmp

        # append solution
        solution.append(tt_tmp.copy())

        # print progress
        if progress is True:
            utl.progress('Running implicit Euler method',
                         100 * i / (len(step_sizes) - 1))

    return solution
示例#19
0
d_max = 20

# define arrays for CPU times and relative errors
rel_errors = np.zeros([
    int((snapshots_max - snapshots_min) / snapshots_step) + 1,
    int(d_max - d_min) + 1
])

# compare CPU times of tensor-based and matrix-based approaches
for i in range(d_min, d_max + 1):

    print('Number of osciallators: ' + str(i))
    print('-' * (24 + len(str(i))) + '\n')

    # construct exact solution in TT and matrix format
    start_time = utl.progress('Construct exact solution in TT format', 0)
    xi_exact = mdl.fpu_coefficients(i)
    utl.progress('Construct exact solution in TT format',
                 100,
                 cpu_time=_time.time() - start_time)

    # generate data
    start_time = utl.progress('Generate test data', 0)
    [x, y] = fermi_pasta_ulam(i, snapshots_max)
    utl.progress('Generate test data', 100, cpu_time=_time.time() - start_time)

    start_time = utl.progress('Running MANDy', 0)
    for j in range(snapshots_min, snapshots_max + snapshots_step,
                   snapshots_step):
        # storing indices
        ind_1 = rel_errors.shape[0] - 1 - int(
示例#20
0

utl.header(title='TDMD - von Kármán vortex street')

# load data
path = os.path.dirname(os.path.realpath(__file__))
data = np.load(path + "/data/karman_snapshots.npz")['snapshots']
number_of_snapshots = data.shape[-1] - 1

# tensor-based approach
# ---------------------

# thresholds for orthonormalizations
thresholds = [0, 1e-7, 1e-5, 1e-3]

start_time = utl.progress('applying TDMD for different thresholds', 0)

# construct x and y tensors and convert to TT format
x = TT(data[:, :, 0:number_of_snapshots, None, None, None])
y = TT(data[:, :, 1:number_of_snapshots + 1, None, None, None])

# define lists
eigenvalues_tdmd = [None] * len(thresholds)
modes_tdmd = [None] * len(thresholds)

for i in range(len(thresholds)):
    # apply exact TDMD
    eigenvalues_tdmd[i], modes_tdmd[i] = tdmd.tdmd_exact(x, y, threshold=thresholds[i])

    # convert to full format for comparison and plotting
    modes_tdmd[i] = modes_tdmd[i].full()[:, :, :, 0, 0, 0]
示例#21
0
lag_times_phy = 1e-3 * downsampling_rate * lag_times_int
lag_times_msm = 1e-3 * np.array([100, 200, 500, 1000, 2000, 4000, 6000])
eps_list = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
rank_list = [20, 50, 100, 200, 500]

# data directory (data not included)
directory = "/srv/public/data/ala10/"

# load data (data not included)
data, trajectory_lengths = load_data(directory, downsampling_rate)

# construct index lists
x_list, y_list = get_index_lists(trajectory_lengths, lag_times_int)

# apply tEDMD with HOSVD
start_time = utl.progress('Apply AMUSEt (HOSVD)', 0)
timescales_hosvd = np.zeros((len(eps_list), len(lag_times_int), 3))
for i in range(len(eps_list)):
    utl.progress('Apply AMUSEt (HOSVD, eps=' + str("%.0e" % eps_list[i]) + ')', 100 * i / len(eps_list), cpu_time=_time.time() - start_time)
    eigenvalues, _ = tedmd.amuset_hosvd(data, x_list, y_list, basis_list, threshold=eps_list[i])
    for j in range(len(lag_times_phy)):
        timescales_hosvd[i, j, :] = -lag_times_phy[j] / np.log(eigenvalues[j][1:4])
utl.progress('Apply AMUSEt (HOSVD)', 100, cpu_time=_time.time() - start_time)

# apply tEDMD with HOCUR
start_time = utl.progress('Apply AMUSEt (HOCUR)', 0)
timescales_hocur = np.zeros((len(rank_list), len(lag_times_int), 3))
for i in range(len(rank_list)):
    utl.progress('Apply AMUSEt (HOCUR, rank=' + str(rank_list[i]) + ')', 100 * i / len(rank_list), cpu_time=_time.time() - start_time)
    eigenvalues, _ = tedmd.amuset_hocur(data, x_list, y_list, basis_list, max_rank=rank_list[i], multiplier=2)
    for j in range(len(lag_times_phy)):
示例#22
0
def arr(x_data,
        y_data,
        basis_list,
        initial_guess,
        repeats=1,
        rcond=10**-2,
        string='ARR',
        progress=True):
    """Alternating ridge regression on transformed data tensors.

    Approximates the solution of a ridge regression in the TT format. For details, see [1]_.

    Parameters
    ----------
    x_data: ndarray
        snapshot matrix which is transformed
    y_data: ndarray
        snapshot matrix for the right-hand side
    basis_list: list of lists of lambda functions
        list of basis functions in every mode
    initial_guess: instance of TT class
        initial guess for the solution of operator @ x = right_hand_side
    repeats: int, optional
        number of repeats of the ALS, default is 1
    rcond: float, optional
        cut-off ratio for singular values of the subproblems, parameter for NumPy's lstsq, default is 1e-2
    string: string
        string to show above progress bar
    progress: boolean, optional
        whether to show progress bar, default is True

    Returns
    -------
    solution: instance of TT class
        approximated solution of the regression problem

    References
    ----------
    ..[1] S. Klus, P. Gelß, "Tensor-Based Algorithms for Image Classification", Algorithms, 2019
    """

    # progress bar
    start_time = utl.progress(string, 0, show=progress)

    # define order of the transformed data tensor
    order = len(basis_list)

    # number of core optimizations and counter
    number_of_optimizations = y_data.shape[0] * repeats * (2 * order - 1)
    counter = 0

    if isinstance(initial_guess, list):
        solution = initial_guess
    else:
        # define list of solutions
        solution = [initial_guess.copy() for _ in range(y_data.shape[0])]

    x_data_org = x_data
    y_data_org = y_data
    m = x_data_org.shape[1]

    counter = 0

    for k in range(y_data.shape[0]):

        # define right-hand side and left stack
        rhs = y_data[k, :]
        stack_left = [None] * order
        stack_right = [None] * order

        # copy initial right stack and solution
        # construct right stacks for the left- and right-hand side
        for i in range(order - 1, -1, -1):
            __arr_construct_stack_right(i, stack_right, x_data, basis_list,
                                        solution[k])

        # define iteration number
        current_iteration = 1

        # begin ARR
        while current_iteration <= repeats:

            # first half sweep
            for i in range(order):

                # update left stack
                __arr_construct_stack_left(i, stack_left, x_data, basis_list,
                                           solution[k])

                if i < order - 1:
                    # construct micro system
                    micro_matrix = __arr_construct_micro_matrix(
                        i, stack_left, stack_right, x_data, basis_list,
                        solution[k])

                    # update solution
                    __arr_update_core(i, micro_matrix, rhs, solution[k], rcond,
                                      'forward')

                    # progress bar
                    counter += 1

            utl.progress(string,
                         100 * counter / number_of_optimizations,
                         cpu_time=_time.time() - start_time,
                         show=progress)

            # second half sweep
            for i in range(order - 1, -1, -1):
                # update right stack
                __arr_construct_stack_right(i, stack_right, x_data, basis_list,
                                            solution[k])

                # construct micro system
                micro_matrix = __arr_construct_micro_matrix(
                    i, stack_left, stack_right, x_data, basis_list,
                    solution[k])

                # update solution
                __arr_update_core(i, micro_matrix, rhs, solution[k], rcond,
                                  'backward')

                # progress bar
                counter += 1

            utl.progress(string,
                         100 * counter / number_of_optimizations,
                         cpu_time=_time.time() - start_time,
                         show=progress)

            # increase iteration number
            current_iteration += 1

    return solution
示例#23
0
def trapezoidal_rule(operator, initial_value, initial_guess, step_sizes, repeats=1, tt_solver='als', threshold=1e-12,
                     max_rank=np.infty, micro_solver='solve', normalize=1, progress=True):
    """Trapezoidal rule for linear differential equations in the TT format

    Parameters
    ----------
    operator: instance of TT class
        TT operator of the differential equation
    initial_value: instance of TT class
        initial value of the differential equation
    initial_guess: instance of TT class
        initial guess for the first step
    step_sizes: list of floats
        step sizes for the application of the trapezoidal rule
    repeats: int, optional
        number of repeats of the (M)ALS in each iteration step, default is 1
    tt_solver: string, optional
        algorithm for solving the systems of linear equations in the TT format, default is 'als'
    threshold: float, optional
        threshold for reduced SVD decompositions, default is 1e-12
    max_rank: int, optional
        maximum rank of the solution, default is infinity
    micro_solver: string, optional
        algorithm for obtaining the solutions of the micro systems, can be 'solve' or 'lu', default is 'solve'
    normalize: int (0, 1, or 2)
        no normalization if 0, otherwise the solution is normalized in terms of Manhattan or Euclidean norm in each step
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    solution: list of instances of the TT class
        numerical solution of the differential equation
    """

    # return current time
    start_time = utl.progress('Running trapezoidal rule', 0, show=progress)

    # define solution
    solution = [initial_value]

    # define temporary tensor train
    tt_tmp = initial_guess

    # begin trapezoidal rule
    # ----------------------

    for i in range(len(step_sizes)):

        # solve system of linear equations for current time step
        if tt_solver == 'als':
            tt_tmp = sle.als(tt.eye(operator.row_dims) - 0.5 * step_sizes[i] * operator, tt_tmp,
                             (tt.eye(operator.row_dims) + 0.5 * step_sizes[i] * operator).dot(solution[i]),
                             solver=micro_solver, repeats=repeats)
        if tt_solver == 'mals':
            tt_tmp = sle.mals(tt.eye(operator.row_dims) - 0.5 * step_sizes[i] * operator, tt_tmp,
                              (tt.eye(operator.row_dims) + 0.5 * step_sizes[i] * operator).dot(solution[i]),
                              solver=micro_solver, repeats=repeats, threshold=threshold, max_rank=max_rank)

        # normalize solution
        if normalize > 0:
            tt_tmp = (1 / tt_tmp.norm(p=normalize)) * tt_tmp

        # append solution
        solution.append(tt_tmp.copy())

        # print progress
        utl.progress('Running trapezoidal rule', 100 * (i + 1) / len(step_sizes), show=progress,
                     cpu_time=_time.time() - start_time)

    return solution
示例#24
0
def symmetric_euler(operator,
                    initial_value,
                    step_sizes,
                    previous_value=None,
                    threshold=1e-12,
                    max_rank=50,
                    normalize=1,
                    progress=True):
    """
    Time-symmetrized explicit Euler ('second order differencing' in quantum mechanics) for linear differential
    equations in the TT format, see [1]_.

    Parameters
    ----------
    operator : TT
        TT operator of the differential equation
    initial_value : TT
        initial value of the differential equation
    step_sizes : list[float]
        step sizes
    previous_value: TT, optional, default is None
        previous step for symmetric Euler; if not given one explicit Euler step is computed backwards in time
    threshold : float, optional
        threshold for reduced SVD decompositions, default is 1e-12
    max_rank : int, optional
        maximum rank of the solution, default is 50
    normalize : {0, 1, 2}, optional
        no normalization if 0, otherwise the solution is normalized in terms of Manhattan or Euclidean norm in each step
    progress : bool, optional
        whether to show the progress of the algorithm or not, default is True

    Returns
    -------
    list[TT]
        numerical solution of the differential equation

    References
    ----------
    .. [1] A. Askar, A. S. Cakmak, "Explicit integration method for the time-dependent Schrodinger equation for
           collision problems", J. Chem. Phys. 68, 2794, 1978
    """

    # return current time
    start_time = utl.progress('Running time-symmetrized explicit Euler method',
                              0,
                              show=progress)

    # initialize solution
    solution = [initial_value]

    # begin loop over time steps
    # --------------------------

    for i in range(len(step_sizes)):

        if i == 0:  # initialize: one expl. Euler backwards in time if previous step is not given

            if previous_value == None:
                solution_prev = (tt.eye(operator.row_dims) -
                                 step_sizes[0] * operator).dot(solution[0])
            else:
                solution_prev = previous_value

            # normalize
            if normalize > 0:
                solution_prev = (
                    1 / solution_prev.norm(p=normalize)) * solution_prev

        else:
            solution_prev = solution[i - 1].copy()

        # compute next time step from current and previous time step
        tt_tmp = solution_prev + 2 * step_sizes[i] * operator.dot(solution[i])

        # truncate ranks of the solution
        tt_tmp = tt_tmp.ortho(threshold=threshold, max_rank=max_rank)

        # normalize solution
        if normalize > 0:
            tt_tmp = (1 / tt_tmp.norm(p=normalize)) * tt_tmp

        # append solution
        solution.append(tt_tmp.copy())

        # print progress
        utl.progress('Running time-symmetrized explicit Euler method',
                     100 * (i + 1) / len(step_sizes),
                     show=progress,
                     cpu_time=_time.time() - start_time)

    return solution
示例#25
0
    Parameters
    ----------
    tensor: ndarray
        3-dimensional tensor representing the RGB image
    """

    ax.imshow(rgb_fractals[i])
    plt.axis('off')


utl.header(title='Tensor-generated fractals')

# multisponges
# ------------

utl.progress('Generating multisponges', 0, dots=6)
multisponge = []
for i in range(2, 4):
    for j in range(1, 4):
        multisponge.append(frac.multisponge(i, j))
        utl.progress('Generating multisponges',
                     100 * ((i - 2) * 3 + j) / 6,
                     dots=6)

# Cantor dusts
# ------------

utl.progress('Generating Cantor dusts', 0, dots=6)
cantor_dust = []
for i in range(1, 4):
    for j in range(1, 4):
示例#26
0
def hocur(x,
          basis_list,
          ranks,
          repeats=1,
          multiplier=10,
          progress=True,
          string=None):
    """Higher-order CUR decomposition of transformed data tensors.

    Given a snapshot matrix x and a list of basis functions in each mode, construct a TT decomposition of the
    transformed data tensor Psi(x) using a higher-order CUR decomposition and maximum-volume subtensors. See [1]_, [2]_
    and [3]_ for details.

    Parameters
    ----------
    x: np.ndarray
        data matrix
    basis_list: list[list[function]]
        list of basis functions in every mode
    ranks: list[int] or int
        maximum TT ranks of the resulting TT representation; if type is int, then the ranks are set to
        [1, ranks, ..., ranks, 1]; note that - depending on the number of linearly independent rows/columns that have
        been found - the TT ranks may be reduced during the decomposition
    repeats: int, optional
        number of repeats, default is 1
    multiplier: int, optional
        multiply the number of initially chosen column indices (given by ranks) in order to increase the probability of
        finding a 'full' set of linearly independent columns; default is 10
    progress: bool, optional
        whether to show the progress of the algorithm or not, default is True
    string: string
        string to print; if None (default), then print 'HOCUR (repeats: <repeats>)'

    Returns
    -------
    psi: instance of TT class
        TT representation of the transformed data tensor

    References
    ----------
    .. [1] P. Gelß, S. Klus, J. Eisert, C. Schütte, "Multidimensional Approximation of Nonlinear Dynamical Systems",
           Journal of Computational and Nonlinear Dynamics 14, 2019
    .. [2] I. Oseledets, E. Tyrtyshnikov, "TT-cross approximation for multidimensional arrays", Linear Algebra and its
           Applications 432, 2010
    .. [3] S. A. Goreinov, I. V. Oseledets, D. V. Savostyanov, E. E. Tyrtyshnikov, N. L. Zamarashkin, "How to find a
           good submatrix", Matrix Methods: Theory, Algorithms, Applications, 2010
    """

    # parameters
    # ----------

    # number of snapshots
    m = x.shape[1]

    # number of modes
    p = len(basis_list)

    # mode dimensions
    n = [len(basis_list[k]) for k in range(p)] + [m]

    # ranks
    if not isinstance(ranks, list):
        ranks = [1] + [ranks for _ in range(len(n) - 1)] + [1]

    # initial definitions
    # -------------------

    # define initial lists of column indices
    col_inds = __hocur_first_col_inds(n, ranks, multiplier)

    # define list of cores
    cores = [None] * (p + 1)

    # show progress
    # -------------

    if string is None:
        string = 'HOCUR'
    start_time = utl.progress(string, 0, show=progress)

    # start decomposition
    # -------------------

    for k in range(repeats):

        row_inds = [None]

        # first half sweep
        for i in range(p):

            # extract submatrix
            y = __hocur_extract_matrix(x, basis_list, row_inds[i], col_inds[i])

            if k == 0:
                # find linearly independent columns
                cols = __hocur_find_li_cols(y)
                cols = cols[:ranks[i + 1]]
                y = y[:, cols]

            # find optimal rows
            rows = __hocur_maxvolume(y)  # type: list

            # adapt ranks if necessary
            ranks[i + 1] = len(rows)

            if i == 0:

                # store row indices for first dimensions
                row_inds.append([[rows[j]] for j in range(ranks[i + 1])])

            else:

                # convert rows to multi indices
                multi_indices = np.array(
                    np.unravel_index(rows, (ranks[i], n[i])))

                # store row indices for dimensions m_1, n_1, ..., m_i, n_i
                row_inds.append([
                    row_inds[i][multi_indices[0, j]] + [multi_indices[1, j]]
                    for j in range(ranks[i + 1])
                ])

            # define core
            if len(rows) < y.shape[1]:
                y = y[:, :len(rows)]
            u_inv = np.linalg.inv(y[rows, :].copy())
            cores[i] = y.dot(u_inv).reshape([ranks[i], n[i], 1, ranks[i + 1]])

            # show progress
            utl.progress(string + ' ... r=' + str(ranks[i + 1]),
                         100 * (k * 2 * p + i + 1) / (repeats * 2 * p),
                         cpu_time=_time.time() - start_time,
                         show=progress)

        # second half sweep
        for i in range(p, 0, -1):

            # extract submatrix
            y = __hocur_extract_matrix(x, basis_list, row_inds[i],
                                       col_inds[i]).reshape(
                                           [ranks[i], n[i] * ranks[i + 1]])

            # find optimal rows
            cols = __hocur_maxvolume(y.T)  # type: list

            # adapt ranks if necessary
            ranks[i] = len(cols)

            if i == p:

                # store row indices for first dimensions
                col_inds[p - 1] = [[cols[j]] for j in range(ranks[i])]

            else:

                # convert cols to multi indices
                multi_indices = np.array(
                    np.unravel_index(cols, (n[i], ranks[i + 1])))

                # store col indices for dimensions m_i, n_i, ... , m_d, n_d
                col_inds[i - 1] = [[multi_indices[0, j]] +
                                   col_inds[i][multi_indices[1, j]]
                                   for j in range(ranks[i])]

            # define TT core
            if len(cols) < y.shape[0]:
                y = y[:len(cols), :]
            u_inv = np.linalg.inv(y[:, cols].copy())
            cores[i] = u_inv.dot(y).reshape([ranks[i], n[i], 1, ranks[i + 1]])

            # show progress
            utl.progress(string,
                         100 * ((k + 1) * 2 * p - i + 1) / (repeats * 2 * p),
                         cpu_time=_time.time() - start_time,
                         show=progress)

        # define first core
        y = __hocur_extract_matrix(x, basis_list, None, col_inds[0])
        cores[0] = y.reshape([1, n[0], 1, ranks[1]])

    # construct tensor train
    # ----------------------

    psi = TT(cores)

    return psi
示例#27
0
    Parameters
    ----------
    tensor: ndarray
        3-dimensional tensor representing the RGB image
    """

    ax.imshow(tensor)
    plt.axis('off')


utl.header(title='Tensor-generated fractals')

# multisponges
# ------------

start_time = utl.progress('Generating multisponges', 0)
multisponge = []
for i in range(2, 4):
    for j in range(1, 4):
        multisponge.append(mdl.multisponge(i, j))
        utl.progress('Generating multisponges', 100 * ((i - 2) * 3 + j) / 6, cpu_time=_time.time() - start_time)

# Cantor dusts
# ------------

start_time = utl.progress('Generating Cantor dusts', 0)
cantor_dust = []
for i in range(1, 4):
    for j in range(1, 4):
        cantor_dust.append(mdl.cantor_dust(i, j))
        utl.progress('Generating Cantor dusts', 100 * ((i - 1) * 3 + j) / 9, cpu_time=_time.time() - start_time)
示例#28
0
        derivatives[-1, j] = -2 * snapshots[-1, j] + snapshots[
            -2, j] + 0.7 * (-snapshots[-1, j]**3 -
                            (snapshots[-1, j] - snapshots[-2, j])**3)

    return snapshots, derivatives


utl.header(title='MANDy - Fermi-Pasta-Ulam problem', subtitle='Example 1')

# model parameters
number_of_oscillators = 10
psi = [lambda t: 1, lambda t: t, lambda t: t**2, lambda t: t**3]
p = len(psi)

# construct exact solution in TT and matrix format
start_time = utl.progress('Construct exact solution in TT format', 0)
xi_exact = mdl.fpu_coefficients(number_of_oscillators)
utl.progress('Construct exact solution in TT format',
             100,
             cpu_time=_time.time() - start_time)
start_time = utl.progress('Construct exact solution in matrix format', 0)
xi_exact_mat = xi_exact.full().reshape(
    [p**number_of_oscillators, number_of_oscillators])
utl.progress('Construct exact solution in matrix format',
             100,
             cpu_time=_time.time() - start_time)

# number of repeats
repeats = 1

# snapshot parameters