Esempio n. 1
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    if PLOTTING:
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from mpl_toolkits.mplot3d import Axes3D

    nsucc = 0
    nfail = 0

    ####
    # Test Tensor Wrapper
    ####
    def f(x, params=None):
        if x.ndim == 1:
            return np.sum(x)
        if x.ndim == 2:
            return np.sum(x, axis=1)

    dims = [11, 21, 31]
    X = [
        np.linspace(1., 10., dims[0]),
        np.linspace(1, 20., dims[1]),
        np.linspace(1, 30., dims[2])
    ]
    XX = np.array(list(itertools.product(*X)))
    F = f(XX).reshape(dims)

    tw = DT.TensorWrapper(f, X, None, dtype=float)

    if F[5,10,15] == tw[5,10,15] and \
            np.all(F[1,2,:] == tw[1,2,:]) and \
            np.all(F[3:5,2:3,20:24] == tw[3:5,2:3,20:24]):
        print_ok("TTcross: Tensor Wrapper")
        nsucc += 1
    else:
        print_fail("TTcross: TensorWrapper")
        nfail += 1

    ####
    # Test Maxvol
    ####
    maxvoleps = 1e-2
    pass_maxvol = True
    N = 100

    i = 0
    while pass_maxvol == True and i < N:
        i += 1
        A = npr.random(600).reshape((100, 6))
        (I, AsqInv, it) = DT.maxvol(A, delta=maxvoleps)
        if np.max(np.abs(np.dot(A, AsqInv))) > 1. + maxvoleps:
            pass_maxvol = False

    if pass_maxvol == True:
        print_ok('TTcross: Maxvol')
        nsucc += 1
    else:
        print_fail('TTcross: Maxvol at it=%d' % i)
        nsucc += 1

    ####
    # Test Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    pass_lowrankapprox = True
    N = 10

    i = 0
    logging.info(
        "(rows,cols,rank) FroA, FroErr, FroErr/FroA, maxAAinv, maxAinvA")
    while pass_lowrankapprox == True and i < N:
        i += 1
        size = npr.random_integers(10, 100, 2)
        r = npr.random_integers(max(1, np.min(size) - 10), np.min(size))
        A = npr.random(np.prod(size)).reshape(size)
        (I, J, AsqInv, it) = DT.lowrankapprox(A,
                                              r,
                                              delta=delta,
                                              maxvoleps=maxvoleps)

        AAinv = np.max(np.abs(np.dot(A[:, J], AsqInv)))
        AinvA = np.max(np.abs(np.dot(AsqInv, A[I, :])))
        FroErr = npla.norm(np.dot(A[:, J], np.dot(AsqInv, A[I, :])) - A, 'fro')
        FroA = npla.norm(A, 'fro')
        logging.info(
            "(%d,%d,%d) %f, %f, %f %f %f" %
            (size[0], size[1], r, FroA, FroErr, FroErr / FroA, AAinv, AinvA))
        if AAinv > 1. + maxvoleps:
            pass_maxvol = False

    if pass_maxvol == True:
        print_ok('TTcross: Random Low Rank Approx')
        nsucc += 1
    else:
        print_fail('TTcross: Random Low Rank Approx at it=%d' % i)
        nsucc += 1

    ####
    # Sin*Cos Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5

    size = (100, 100)
    r = 1

    # Build up the 2d tensor wrapper
    def f(X, params):
        return np.sin(X[0]) * np.cos(X[1])

    X = [
        np.linspace(0, 2 * np.pi, size[0]),
        np.linspace(0, 2 * np.pi, size[1])
    ]
    TW = DT.TensorWrapper(f, X, None, dtype=float)

    # Compute low rank approx
    (I, J, AsqInv, it) = DT.lowrankapprox(TW,
                                          r,
                                          delta=delta,
                                          maxvoleps=maxvoleps)
    fill = TW.get_fill_level()

    Fapprox = np.dot(TW[:, J].reshape((TW.shape[0], len(J))),
                     np.dot(AsqInv, TW[I, :].reshape((len(I), TW.shape[1]))))
    FroErr = npla.norm(Fapprox - TW[:, :], 'fro')
    if FroErr < 1e-12:
        print_ok(
            'TTcross: sin(x)*cos(y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTcross: sin(x)*cos(y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1

    if PLOTTING:
        plt.figure(figsize=(12, 7))
        plt.subplot(1, 2, 1)
        plt.imshow(TW[:, :])
        plt.subplot(1, 2, 2)
        plt.imshow(Fapprox)

    ####
    # Sin(x+y) Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5

    size = (100, 100)
    r = 2

    # Build up the 2d tensor wrapper
    def f(X, params):
        return np.sin(X[0]) * np.cos(X[1])

    def f(X, params):
        return np.sin(X[0] + X[1])

    X = [
        np.linspace(0, 2 * np.pi, size[0]),
        np.linspace(0, 2 * np.pi, size[1])
    ]
    TW = DT.TensorWrapper(f, X, None, dtype=float)

    # Compute low rank approx
    (I, J, AsqInv, it) = DT.lowrankapprox(TW,
                                          r,
                                          delta=delta,
                                          maxvoleps=maxvoleps)
    fill = TW.get_fill_level()

    Fapprox = np.dot(TW[:, J], np.dot(AsqInv, TW[I, :]))
    FroErr = npla.norm(Fapprox - TW[:, :], 'fro')
    if FroErr < 1e-12:
        print_ok('TTcross: sin(x+y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
                 (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTcross: sin(x+y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1

    if PLOTTING:
        plt.figure(figsize=(12, 7))
        plt.subplot(1, 2, 1)
        plt.imshow(TW[:, :])
        plt.subplot(1, 2, 2)
        plt.imshow(Fapprox)

    ####
    # Sin(x)*cos(y)*Sin(z) TTcross Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    size = (10, 10, 10)

    # Build up the 2d tensor wrapper
    def f(X, params):
        return np.sin(X[0]) * np.cos(X[1]) * np.sin(X[2])

    X = [
        np.linspace(0, 2 * np.pi, size[0]),
        np.linspace(0, 2 * np.pi, size[1]),
        np.linspace(0, 2 * np.pi, size[2])
    ]
    TW = DT.TensorWrapper(f, X, dtype=float)

    # Compute low rank approx
    TTapprox = DT.TTvec(TW)
    TTapprox.build(method='ttcross', eps=eps, mv_eps=maxvoleps, delta=delta)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.gt, crossRanks[1:-1],
            TTapprox.rounding(eps=delta).ranks()[1:-1]))

    FroErr = mla.norm(
        TTapprox.to_tensor() -
        TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])],
        'fro')
    if FroErr < eps:
        print_ok(
            'TTcross: sin(x)*cos(y)*sin(z) Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTcross: sin(x)*cos(y)*sin(z) Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1

    if PLOTTING:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2])
        ax.set_xlabel('x')
        ax.set_ylabel('y')
        ax.set_zlabel('z')

        # Get last used idxs
        Is = TTapprox.Is
        Js = TTapprox.Js
        ndim = len(X)
        dims = [len(Xi) for Xi in X]
        idxs = []
        for k in range(len(Is) - 1, -1, -1):
            for i in range(len(Is[k])):
                for j in range(len(Js[k])):
                    for kk in range(dims[k]):
                        idxs.append(Is[k][i] + (kk, ) + Js[k][j])

        last_idxs = np.array(idxs)
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r')

        plt.show(block=False)

    print_summary("TTcross", nsucc, nfail)

    return (nsucc, nfail)
Esempio n. 2
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    import numpy as np
    import numpy.linalg as npla
    import itertools
    import time

    import TensorToolbox as DT
    import TensorToolbox.multilinalg as mla

    if PLOTTING:
        from matplotlib import pyplot as plt

    nsucc = 0
    nfail = 0

    ###################################################################
    # Test timings and comp. rate for compression of Laplace-like op.
    ###################################################################
    eps = 0.001
    Ns = 2**np.arange(4, 7, 1, dtype=int)
    ds = 2**np.arange(4, 6, dtype=int)
    timing = np.zeros((len(Ns), len(ds)))
    comp_rate = np.zeros((len(Ns), len(ds)))
    for i_N, N in enumerate(Ns):
        D = np.diag(-np.ones((N - 1)), -1) + np.diag(-np.ones(
            (N - 1)), 1) + np.diag(2 * np.ones((N)), 0)
        I = np.eye(N)
        D_flat = D.flatten()
        I_flat = I.flatten()
        for i_d, d in enumerate(ds):
            sys.stdout.write('N=%d   , d=%d      [STARTED]\r' % (N, d))
            sys.stdout.flush()
            # Canonical form of n-dimentional Laplace operator
            CPtmp = []  # U[i][alpha,k] = U_i(alpha,k)
            for i in range(d):
                CPi = np.empty((d, N**2))
                for alpha in range(d):
                    if i != alpha:
                        CPi[alpha, :] = I_flat
                    else:
                        CPi[alpha, :] = D_flat
                CPtmp.append(CPi)
            CP = DT.Candecomp(CPtmp)

            # Canonical to TT
            sys.stdout.write("\033[K")
            sys.stdout.write('N=%4d   , d=%3d      [CP->TT]\r' % (N, d))
            sys.stdout.flush()
            TT = DT.TTmat(CP, nrows=N, ncols=N)
            TT.build()
            TT_pre = TT.copy()
            pre_norm = mla.norm(TT_pre, 'fro')

            # Rounding TT
            sys.stdout.write("\033[K")
            sys.stdout.write('N=%4d   , d=%3d      [TT-round]\r' % (N, d))
            sys.stdout.flush()
            st = time.clock()
            TT.rounding(eps)
            end = time.clock()

            if np.max(TT.ranks()) != 2:
                print_fail(
                    "\033[K" +
                    "1.1 Compression Timing N=%4d   , d=%3d      [RANK ERROR]   Time: %f"
                    % (N, d, end - st))
                nfail += 1
            elif mla.norm(TT_pre - TT, 'fro') > eps * pre_norm:
                print_fail(
                    "\033[K" +
                    "1.1 Compression Timing N=%4d   , d=%3d      [NORM ERROR]   Time: %f"
                    % (N, d, end - st))
                nfail += 1
            else:
                print_ok(
                    "\033[K" +
                    "1.1 Compression Timing N=%4d   , d=%3d      [ENDED]   Time: %f"
                    % (N, d, end - st))
                nsucc += 1

            comp_rate[i_N, i_d] = float(TT.size()) / N**(2. * d)
            timing[i_N, i_d] = end - st

    # Compute scalings with respect to N and d
    if PLOTTING:
        d_sc = np.polyfit(np.log2(ds), np.log2(timing[-1, :]), 1)[0]
        N_sc = np.polyfit(np.log2(Ns), np.log2(timing[:, -1]), 1)[0]
        sys.stdout.write("Scaling: N^%f, d^%f\n" % (N_sc, d_sc))
        sys.stdout.flush()

        plt.figure(figsize=(14, 7))
        plt.subplot(1, 2, 1)
        plt.loglog(Ns, comp_rate[:, -1], 'o-', basex=2, basey=2)
        plt.grid()
        plt.xlabel('N')
        plt.ylabel('Comp. Rate TT/FULL')
        plt.subplot(1, 2, 2)
        plt.loglog(Ns, timing[:, -1], 'o-', basex=2, basey=2)
        plt.grid()
        plt.xlabel('N')
        plt.ylabel('Round Time (s)')
        plt.show(block=False)

        plt.figure(figsize=(14, 7))
        plt.subplot(1, 2, 1)
        plt.loglog(ds, comp_rate[-1, :], 'o-', basex=2, basey=2)
        plt.grid()
        plt.xlabel('d')
        plt.ylabel('Comp. Rate TT/FULL')
        plt.subplot(1, 2, 2)
        plt.loglog(ds, timing[-1, :], 'o-', basex=2, basey=2)
        plt.grid()
        plt.xlabel('d')
        plt.ylabel('Round Time (s)')
        plt.show(block=False)

        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        (NN, dd) = np.meshgrid(np.log2(Ns), np.log2(ds))
        T = timing.copy().T
        T[T == 0.] = np.min(T[np.nonzero(T)])
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.plot_surface(NN,
                        dd,
                        np.log2(T),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        plt.show(block=False)

    print_summary("TT Compression", nsucc, nfail)

    return (nsucc, nfail)
Esempio n. 3
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    import numpy as np
    import numpy.linalg as npla
    import itertools
    import time

    import TensorToolbox as DT
    import TensorToolbox.multilinalg as mla

    if PLOTTING:
        from matplotlib import pyplot as plt

    nsucc = 0
    nfail = 0

    #####################################################################################
    # Test matrix 2-norm on random matrices
    #####################################################################################
    span = np.array([0., 1.])
    d = 3
    nrows = 16
    ncols = 16
    if isinstance(nrows, int): nrows = [nrows for i in range(d)]
    if isinstance(ncols, int): ncols = [ncols for i in range(d)]
    eps = 1e-6
    round_eps = 1e-12

    # sys.stdout.write("Matrix 2-norm: Random\n  nrows=[%s],\n  ncols=[%s],  d=%3d      [START] \n" % (','.join(map(str,nrows)),','.join(map(str,ncols)),d))
    # sys.stdout.flush()

    # Construction of TT random matrix
    TT_RAND = DT.randmat(d, nrows, ncols)

    # Construct FULL random tensor
    FULL_RAND = TT_RAND.to_tensor()
    import itertools
    rowcol = list(
        itertools.chain(*[[ri, ci] for (ri, ci) in zip(nrows, ncols)]))
    FULL_RAND = np.reshape(FULL_RAND, rowcol)
    idxswap = list(range(0, 2 * d, 2))
    idxswap.extend(range(1, 2 * d, 2))
    FULL_RAND = np.transpose(FULL_RAND, axes=idxswap)
    FULL_RAND = np.reshape(FULL_RAND, (np.prod(nrows), np.prod(ncols)))

    # Check results
    tt_norm = mla.norm(TT_RAND, 2, round_eps=round_eps, eps=eps)
    full_norm = npla.norm(FULL_RAND, 2)
    if np.abs(tt_norm - full_norm) / npla.norm(FULL_RAND, 'fro') <= 0.02:
        print_ok(
            "3.1 Matrix 2-norm: Random  nrows=%s, ncols=%s , d=%3d  , TT-norm = %.5f , FULL-norm = %.5f"
            % (str(nrows), str(ncols), d, tt_norm, full_norm))
        nsucc += 1
    else:
        print_fail(
            "3.1 Matrix 2-norm: Random  nrows=%s, ncols=%s, d=%3d  , TT-norm = %.5f , FULL-norm = %.5f"
            % (str(nrows), str(ncols), d, tt_norm, full_norm), '')
        nfail += 1


# opt = 'a'
# while (opt != 'c' and opt != 's' and opt != 'q'):
#     print("Matrix-vector product test with Schrodinger operator:")
#     print("\t [c]: continue")
#     print("\t [s]: skip")
#     print("\t [q]: exit")
#     opt = sys.stdin.read(1)
#     if (opt ==  'q'):
#         exit(0)

# if opt == 'c':
#     #####################################################################################
#     # Test matrix-vector product by computing the smallest eigenvalue of the operator in
#     # "Tensor-Train decomposition" I.V.Oseledets
#     # "Algorithms in high dimensions" Beylkin and Mohlenkamp
#     #####################################################################################
#     span = np.array([0.,1.])
#     d = 2
#     N = 16
#     h = 1/float(N-1)
#     cv = 100.
#     cw = 5.
#     eps =1e-10

#     # Construction of TT Laplace operator
#     CPtmp = []
#     D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) )
#     I = np.eye(N)
#     D_flat = D.flatten()
#     I_flat = I.flatten()
#     for i in range(d):
#         CPi = np.empty((d,N**2))
#         for alpha in range(d):
#             if i != alpha:
#                 CPi[alpha,:] = I_flat
#             else:
#                 CPi[alpha,:] = D_flat
#         CPtmp.append(CPi)

#     CP_lap = DT.Candecomp(CPtmp)
#     TT_lap = DT.TTmat(CP_lap,nrows=N,ncols=N)
#     TT_lap.rounding(eps)
#     CPtmp = None
#     CP_lap = None

#     # Construction of TT Potential operator
#     CPtmp = []
#     X = np.linspace(span[0],span[1],N)
#     # B = np.diag(np.cos(2.*np.pi*X),0)
#     B = np.diag(np.cos(X),0)
#     I = np.eye(N)
#     B_flat = B.flatten()
#     I_flat = I.flatten()
#     for i in range(d):
#         CPi = np.empty((d,N**2))
#         for alpha in range(d):
#             if i != alpha:
#                 CPi[alpha,:] = I_flat
#             else:
#                 CPi[alpha,:] = B_flat
#         CPtmp.append(CPi)

#     CP_pot = DT.Candecomp(CPtmp)
#     TT_pot = DT.TTmat(CP_pot,nrows=N,ncols=N)
#     TT_pot.rounding(eps)
#     CPtmp = None
#     CP_pot = None

#     # Construction of TT electron-electron interaction
#     CPtmp_cos = []
#     CPtmp_sin = []
#     X = np.linspace(span[0],span[1],N)
#     # Bcos = np.diag(np.cos(2.*np.pi*X),0)
#     # Bsin = np.diag(np.sin(2.*np.pi*X),0)
#     Bcos = np.diag(np.cos(X),0)
#     Bsin = np.diag(np.sin(X),0)
#     I = np.eye(N)
#     # D_flat = D.flatten()
#     Bcos_flat = Bcos.flatten()
#     Bsin_flat = Bsin.flatten()
#     I_flat = I.flatten()

#     for i in range(d):
#         CPi_cos = np.zeros((d*(d-1)/2,N**2))
#         CPi_sin = np.zeros((d*(d-1)/2,N**2))
#         k=0
#         for alpha in range(d):
#             for beta in range(alpha+1,d):
#                 if alpha == i or beta == i :
#                     CPi_cos[k,:] = Bcos_flat
#                     CPi_sin[k,:] = Bsin_flat
#                 else:
#                     CPi_cos[k,:] = I_flat
#                     CPi_sin[k,:] = I_flat
#                 k += 1
#         CPtmp_cos.append(CPi_cos)
#         CPtmp_sin.append(CPi_sin)

#     CP_int_cos = DT.Candecomp(CPtmp_cos)
#     CP_int_sin = DT.Candecomp(CPtmp_sin)
#     TT_int_cos = DT.TTmat(CP_int_cos,nrows=N,ncols=N)
#     TT_int_sin = DT.TTmat(CP_int_sin,nrows=N,ncols=N)
#     TT_int_cos.rounding(eps)
#     TT_int_sin.rounding(eps)
#     TT_int = (TT_int_cos + TT_int_sin).rounding(eps)
#     CPtmp_cos = None
#     CPtmp_sin = None
#     CP_int_cos = None
#     CP_int_sin = None

#     # # Construction of TT Scholes-tensor
#     # CPtmp = []
#     # X = np.linspace(span[0],span[1],N)
#     # D = 1./(2*h) * (np.diag(np.ones(N-1),1) - np.diag(np.ones(N-1),-1))
#     # D[0,0] = -1./h
#     # D[0,1] = 1./h
#     # D[-1,-1] = 1./h
#     # D[-1,-2] = -1./h
#     # I = np.eye(N)
#     # D_flat = D.flatten()
#     # I_flat = I.flatten()
#     # for i in range(d):
#     #     CPi = np.zeros((d*(d-1)/2,N**2))
#     #     k = 0
#     #     for alpha in range(d):
#     #         for beta in range(alpha+1,d):
#     #             if alpha == i:
#     #                 CPi[k,:] = D_flat
#     #             elif beta == i:
#     #                 CPi[k,:] = D_flat
#     #             else:
#     #                 CPi[k,:] = I_flat
#     #             k += 1
#     #     CPtmp.append(CPi)

#     # CP_sch = DT.Candecomp(CPtmp)
#     # TT_sch = DT.TTmat(CP_sch,nrows=N,ncols=N)
#     # TT_sch.rounding(eps)

#     H = (TT_lap + TT_pot + TT_int).rounding(eps)
#     Cd = mla.norm(H,2)

#     # Identity tensor
#     TT_id = DT.eye(d,N)

#     Hhat = (Cd * TT_id - H).rounding(eps)

    print_summary("TT Norms", nsucc, nfail)

    return (nsucc, nfail)
Esempio n. 4
0
        return np.exp(-np.sum((X - params['X0'])**2.) / (2 * params['l']**2.))

    X = [np.linspace(0, 1., size[0])] * d
    TW = TT.TensorWrapper(f, X, params)

    # Compute low rank approx
    TTapprox = TT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps, kickrank=0)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print '[SUCCESS] QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (
            FroErr, 100. * np.float(fill) / np.float(TW.get_size()))
    else:
        print '[FAIL] QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (
            FroErr, 100. * np.float(fill) / np.float(TW.get_size()))

    if IS_PLOTTING and d == 2:
        # Get filled idxs
        fill_idxs = TW.get_fill_idxs()
        last_idxs = TTapprox.get_ttdmrg_eval_idxs()
Esempio n. 5
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    if PLOTTING:
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from mpl_toolkits.mplot3d import Axes3D

    nsucc = 0
    nfail = 0

    ####
    # Test Tensor Wrapper
    ####
    def f(x, params=None):
        if x.ndim == 1:
            return np.sum(x)
        if x.ndim == 2:
            return np.sum(x, axis=1)

    dims = [11, 21, 31]
    X = [
        np.linspace(1., 10., dims[0]),
        np.linspace(1, 20., dims[1]),
        np.linspace(1, 30., dims[2])
    ]
    XX = np.array(list(itertools.product(*X)))
    F = f(XX).reshape(dims)

    tw = DT.TensorWrapper(f, X, None)

    if F[5,10,15] == tw[5,10,15] and \
            np.all(F[1,2,:] == tw[1,2,:]) and \
            np.all(F[3:5,2:3,20:24] == tw[3:5,2:3,20:24]):
        print_ok("TTdmrgcross: Tensor Wrapper")
        nsucc += 1
    else:
        print_fail("TTdmrgcross: TensorWrapper")
        nsucc += 1

    ####
    # 1./(x+y+1) Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 2
    size = [100] * d

    # Build up the 2d tensor wrapper
    def f(X, params):
        return 1. / (X[0] + X[1] + 1.)

    X = [
        np.linspace(0, 2 * np.pi, size[0]),
        np.linspace(0, 2 * np.pi, size[1])
    ]
    TW = DT.TensorWrapper(f, X, None)

    # Compute low rank approx
    TTapprox = DT.TTvec(TW)
    TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'TTdmrgcross: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTdmrgcross: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1

    if PLOTTING:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        plt.figure()
        plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'o')

        plt.show(block=False)

    ####
    # sin(sum(x)) TTcross Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 3
    size = [20] * d

    # Build up the tensor wrapper
    def f(X, params):
        return np.sin(np.sum(X))

    X = [np.linspace(0, 2 * np.pi, size[0])] * d
    TW = DT.TensorWrapper(f, X)

    TTapprox = DT.TTvec(TW)
    TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(
        TTapprox.to_tensor() -
        TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])],
        'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'TTdmrgcross: sin(sum(x)) - d=3 - Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTdmrgcross: sin(sum(x)) - d=3 - Low Rank Approx (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1

    if PLOTTING and d == 3:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2])
        ax.set_xlabel('x')
        ax.set_ylabel('y')
        ax.set_zlabel('z')

        # Get last used idxs
        last_idxs = TTapprox.get_ttdmrg_eval_idxs()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r')

        plt.show(block=False)

    ####
    # 1/(sum(x)+1) TTdmrgcross Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 5
    size = [10] * d

    # Build up the 2d tensor wrapper
    def f(X, params):
        return 1. / (np.sum(X) + 1.)

    X = [np.linspace(0, 1, size[i]) for i in range(len(size))]
    TW = DT.TensorWrapper(f, X)

    # Compute low rank approx
    TTapprox = DT.TTvec(TW)
    TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    MaxErr = np.max(TTapprox.to_tensor() - A)
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'TTdmrgcross: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, MaxErr=%e, Fill=%.2f%%)'
            % (d, FroErr, MaxErr,
               100. * np.float(fill) / np.float(TW.get_size())))
        nsucc += 1
    else:
        print_fail(
            'TTdmrgcross: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, FroErr=%e, Fill=%.2f%%)'
            % (d, FroErr, MaxErr,
               100. * np.float(fill) / np.float(TW.get_size())))
        nfail += 1

    print_summary("TTdmrgcross", nsucc, nfail)

    return (nsucc, nfail)
    # Compute Fourier coefficients TT
    Vs = [P.GradVandermonde1D(X[i],size1D,0,norm=True)] * d
    TT_four = TTapprox.project(Vs,W)

    if size1D**len(nzdim) < 1e6:
        four_tens = np.zeros( tuple( [size1D+1 for i in range(len(nzdim))] ) )
        ls_tmp = [ range(size1D+1) for i in range(len(nzdim)) ]
        idx_tmp = itertools.product( *ls_tmp )
        for ii  in idx_tmp:
            ii_tt = [0]*d
            for jj, tti in enumerate(nzdim): ii_tt[tti] = ii[jj]
            ii_tt = tuple(ii_tt)
            four_tens[ii] = TT_four[ii_tt]

    print "TTcross: ranks: %s" % str( TTapprox.ranks() )
    print "TTcross: Frobenius norm TT_four:  %e" % mla.norm(TT_four,'fro')
    print "TTcross: Frobenius norm sub_tens: %e" % npla.norm(four_tens.flatten())

    # Check approximation error using MC
    MCestVarLimit = 1e-1
    MCestMinIter = 100
    MCestMaxIter = 1e4
    MCstep = 10000
    var = 1.
    dist = stats.uniform()
    DIST = RS.MultiDimDistribution([dist] * d)
    intf = []
    values = []
    while (len(values) < MCestMinIter or var > mean**2. * MCestVarLimit) and len(values) < MCestMaxIter:
        # Monte Carlo
        xx = np.asarray( DIST.rvs(MCstep) )
Esempio n. 7
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):
    # Test Weighted TT. Weights are uniform.

    logging.basicConfig(level=loglev)

    import numpy as np
    import numpy.linalg as npla
    import itertools
    import time

    import TensorToolbox as DT
    import TensorToolbox.multilinalg as mla

    if PLOTTING:
        from matplotlib import pyplot as plt

    nsucc = 0
    nfail = 0

    N = 16
    d = 3
    nrows = [N for i in range(d)]
    ncols = [N for i in range(d)]
    D = np.diag(-np.ones((N-1)),-1) + np.diag(-np.ones((N-1)),1) + np.diag(2*np.ones((N)),0)
    I = np.eye(N)
    Dd = np.zeros((N**d,N**d))

    for i in range(d):
        tmp = np.array([1])
        for j in range(d):
            if i != j:
                tmp = np.kron(tmp,I)
            else:
                tmp = np.kron(tmp,D)
        Dd += tmp

    if PLOTTING:
        plt.figure()
        plt.spy(Dd)
        plt.show(block=False)

    idxs = [range(N) for i in range(d)]
    MI = list(itertools.product(*idxs)) # Multi indices

    # Canonical form of n-dimentional Laplace operator
    D_flat = D.flatten()
    I_flat = I.flatten()

    # CP = np.empty((d,d,N**2),dtype=np.float64) 
    CPtmp = [] # U[i][alpha,k] = U_i(alpha,k)
    for i in range(d):
        CPi = np.empty((d,N**2))
        for alpha in range(d):
            if i != alpha:
                CPi[alpha,:] = I_flat
            else:
                CPi[alpha,:] = D_flat
        CPtmp.append(CPi)

    CP = DT.Candecomp(CPtmp)

    # Let's compare Dd[i,j] with its Canonical counterpart
    T_idx = (10,9) # Index in the tensor product repr.
    idxs = np.vstack( (np.asarray(MI[T_idx[0]]), np.asarray(MI[T_idx[1]])) ) # row 1 contains row multi-idx, row 2 contains col multi-idx for Tensor
    # Now if we take the columns of idxs we get the multi-indices for the CP.
    # Since in CP we flattened the array, compute the corresponding indices for CP.
    CP_idxs = idxs[0,:]*N + idxs[1,:]

    TT = DT.TTmat(CP,nrows=N,ncols=N)
    TT.build()

    if np.abs(Dd[T_idx[0],T_idx[1]] - CP[CP_idxs]) < 100.*np.spacing(1) and np.abs(CP[CP_idxs] - TT[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < 100.*np.spacing(1):
        print_ok("0.1 Weighted Tensor Test: Entry comparison (pre-rounding) FULL, CP, TT")
        nsucc += 1
    else:
        print_fail("0.1 Weighted Tensor Test: Entry comparison FULL, CP, TT")
        nfail += 1
        # print("  T      CP     TT")
        # print("%.5f  %.5f  %.5f" % (Dd[T_idx[0],T_idx[1]],CP[CP_idxs],TT[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]))

    # print("Space Tensor: %d" % np.prod(Dd.shape))
    # print("Space CP: %d" % CP.size())
    # print("Space TT: %d" % TT.size())

    ########################################
    # Multi-Linear Algebra
    ########################################

    # Sum by scalar
    CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))])
    W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.]
    TTa = DT.WTTvec(CPa,W)
    TTa.build(1e-13)
    TTb = TTa + 3.
    if np.abs(TTb[3,3,3] - 8.) < 1e-12:
        print_ok("0.2 Weighted Tensor Test: TT sum by scalar")
        nsucc += 1
    else:
        print_fail("0.2 Weighted Tensor Test: TT sum by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],8.))
        nfail += 1

    # Diff by scalar
    CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))])
    W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.]
    TTa = DT.WTTvec(CPa,W)
    TTa.build(1e-13)
    TTb = TTa - 3.
    if np.abs(TTb[3,3,3] - 2.) < 1e-12:
        print_ok("0.2 Weighted Tensor Test: TT diff by scalar")
        nsucc += 1
    else:
        print_fail("0.2 Weighted Tensor Test: TT diff by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],2.))
        nfail += 1

    # Mul by scalar
    CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))])
    W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.]
    TTa = DT.WTTvec(CPa,W)
    TTa.build(1e-13)
    TTb = TTa * 3.
    if np.abs(TTb[3,3,3] - 15.) < 1e-12:
        print_ok("0.2 Weighted Tensor Test: TT mul by scalar")
        nsucc += 1
    else:
        print_fail("0.2 Weighted Tensor Test: TT mul by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],15.))
        nfail += 1

    # Div by scalar
    CPa = DT.Candecomp([15.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))])
    W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.]
    TTa = DT.WTTvec(CPa,W)
    TTa.build(1e-13)
    TTb = TTa / 3.
    if np.abs(TTb[3,3,3] - 5.) < 1e-12:
        print_ok("0.2 Weighted Tensor Test: TT div by scalar")
        nsucc += 1
    else:
        print_fail("0.2 Weighted Tensor Test: TT div by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],5.))
        nfail += 1

    # Sum
    C = TT + TT
    if  C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - 2. * Dd[T_idx[0],T_idx[1]] <= 2e2 * np.spacing(1):
        print_ok("0.2 Weighted Tensor Test: TT sum")
        nsucc += 1
    else:
        print_fail("0.2 Weighted Tensor Test: TT sum", "TT[idx] + TT[idx] = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])
        nfail += 1

    C = TT * TT
    if  C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - Dd[T_idx[0],T_idx[1]]**2. <= 10.*np.spacing(1):
        print_ok("0.3 Weighted Tensor Test: TT mul")
        nsucc += 1
    else:
        print_fail("0.3 Weighted Tensor Test: TT mul", "TT[idx] * TT[idx] = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])
        nfail += 1

    # C *= (C+TT)
    # if  C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] == Dd[T_idx[0],T_idx[1]]**2. * (Dd[T_idx[0],T_idx[1]]**2.+Dd[T_idx[0],T_idx[1]]):
    #     print_ok("0.4 Weighted Tensor Test: TT operations")
    # else:
    #     print_fail("0.4 Weighted Tensor Test: TT operations", "(TT*TT)*(TT*TT+TT) = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])

    if np.abs(npla.norm(Dd,ord='fro')-mla.norm(TT,ord='fro')) < TT.size() * 100.*np.spacing(1):
        print_ok("0.5 Weighted Tensor Test: Frobenius norm (pre-rounding) FULL, TT")
        nsucc += 1
    else:
        print_fail("0.5 Weighted Tensor Test: Frobenius norm (pre-rounding) FULL, TT",
                   "                  T          TT\n"\
                       "Frobenius norm  %.5f         %.5f" % (npla.norm(Dd,ord='fro'), DT.norm(TT,ord='fro')))
        nfail += 1

    #######################################
    # Check TT-SVD
    #######################################

    # Contruct tensor form of Dd
    Dd_flat = np.zeros((N**(2*d)))
    for i in range(d):
        tmp = np.array([1])
        for j in range(d):
            if i != j:
                tmp = np.kron(tmp,I_flat)
            else:
                tmp = np.kron(tmp,D_flat)
        Dd_flat += tmp

    Dd_tensor= Dd_flat.reshape([N**2 for j in range(d)])

    TT_tensor = TT.to_tensor()

    # From Dd_tensor obtain a TT representation with accuracy eps
    eps = 0.001
    TT_svd = DT.TTmat(Dd_tensor,nrows=N,ncols=N)
    TT_svd.build(eps=eps)

    if np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1):
        print_ok("0.6 Weighted Tensor Test: Entry comparison FULL, TT-svd")
        nsucc += 1
    else:
        print_fail("0.6 Weighted Tensor Test: Entry comparison FULL, TT-svd","  T - TT-svd = %e" % np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]))
        nfail += 1

    Dd_norm = npla.norm(Dd,ord='fro')
    TT_svd_norm = mla.norm(TT_svd,ord='fro')
    if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm:
        print_ok("0.6 Weighted Tensor Test: Frobenius norm FULL, TT-svd")
        nsucc += 1
    else:
        print_fail("0.6 Weighted Tensor Test: Frobenius norm FULL, TT-svd",
                   "                  T          TT_svd\n"\
                       "Frobenius norm  %.5f         %.5f" % (npla.norm(Dd,ord='fro'), mla.norm(TT_svd,ord='fro')))
        nfail += 1

    #######################################
    # Check TT-SVD with kron prod
    #######################################

    # Contruct tensor form of Dd
    Dd = np.zeros((N**d,N**d))
    for i in range(d):
        tmp = np.array([1])
        for j in range(d):
            if i != j:
                tmp = np.kron(tmp,I)
            else:
                tmp = np.kron(tmp,D)
        Dd += tmp

    Dd_tensor = DT.matkron_to_mattensor(Dd,[N for i in range(d)],[N for i in range(d)])

    TT_tensor = TT.to_tensor()

    # From Dd_tensor obtain a TT representation with accuracy eps
    eps = 0.001
    TT_svd = DT.TTmat(Dd_tensor,nrows=N,ncols=N)
    TT_svd.build(eps=eps)

    if np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1):
        print_ok("0.7 Weighted Tensor Test: Entry comparison FULL, TT-svd-kron")
        nsucc += 1
    else:
        print_fail("0.7 Weighted Tensor Test: Entry comparison FULL, TT-svd-kron",
                   "  T - TT-svd = %e" % np.abs(Dd[T_idx[0],T_idx[1]]-TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]))
        nfail += 1

    Dd_norm = npla.norm(Dd,ord='fro')
    TT_svd_norm = mla.norm(TT_svd,ord='fro')
    if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm:
        print_ok("0.7 Weighted Tensor Test: Frobenius norm FULL, TT-svd-kron")
        nsucc += 1
    else:
        print_fail("0.7 Weighted Tensor Test: Frobenius norm FULL, TT-svd-kron",
                   "                  T          TT_svd\n"\
                       "Frobenius norm  %.5f         %.5f" % (npla.norm(Dd,ord='fro'), mla.norm(TT_svd,ord='fro')))
        nfail += 1

    #######################################
    # Check TT-rounding
    #######################################
    TT_round = TT.copy()
    eps = 0.001
    TT_round.rounding(eps)

    if np.abs(TT_round[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1):
        print_ok("0.8 Weighted Tensor Test: Entry comparison (post-rounding) TT-svd, TT-round")
        nsucc += 1
    else:
        print_fail("0.8 Weighted Tensor Test: Entry comparison  (post-rounding) TT-svd, TT-round",
               "  T-svd - TT-round = %e" % np.abs(TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - TT_round[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]))
        nfail += 1

    Dd_norm = npla.norm(Dd,ord='fro')
    TT_svd_norm = mla.norm(TT_svd,ord='fro')
    TT_round_norm = mla.norm(TT_round,ord='fro')
    if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm and np.abs(TT_svd_norm - TT_round_norm) < eps * Dd_norm:
        print_ok("0.8 Weighted Tensor Test: Frobenius norm (post-rounding) FULL, TT-svd, TT-round")
        nsucc += 1
    else:
        print_fail("0.8 Weighted Tensor Test: Frobenius norm (post-rounding) FULL, TT-svd, TT-round",
                   "                  T          TT_svd         TT_rounding\n"\
                       "Frobenius norm  %.5f         %.5f           %.5f" % (Dd_norm, TT_svd_norm, TT_round_norm))
        nfail += 1

    print_summary("WTT Algebra", nsucc, nfail)
    
    return (nsucc,nfail)
Esempio n. 8
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    import numpy.linalg as npla

    logging.basicConfig(level=loglev)

    if PLOTTING:
        from matplotlib import pyplot as plt

    nsucc = 0
    nfail = 0

    ################ TEST 1 ###########################
    # Test folding/unfolding index function
    sys.stdout.write("Test folding/unfolding index function\r")
    sys.stdout.flush()

    dlist = (4, 2, 8)
    base = 2
    dfold = [base for i in range(int(np.log(np.prod(dlist)) / np.log(base)))]
    A = np.arange(64).reshape(dlist)
    Aflat = A.flatten()
    Arsh = A.reshape(dfold)
    test = True
    err = []
    for i in range(dlist[0]):
        for j in range(dlist[1]):
            for k in range(dlist[2]):
                idxs = (i, j, k)
                val = (A[idxs] == Aflat[DT.idxunfold(dlist, idxs)]
                       and A[idxs] == Arsh[DT.idxfold(
                           dfold, DT.idxunfold(dlist, idxs))])
                if not val:
                    err.append(idxs)
                    test = False

    if test:
        print_ok("Test folding/unfolding index function")
        nsucc += 1
    else:
        print_fail("Test folding/unfolding index function")
        nfail += 1

    ################ TEST 2.1 ###########################
    # Test exponential N-dimensional vector (2^6 points)
    sys.stdout.write("Test exponential N-dimensional vector (2^6 points)\r")
    sys.stdout.flush()

    z = 2.
    q = 2
    L = 6
    N = q**L
    X = z**np.arange(N)

    TT = DT.QTTvec(X)
    TT.build()

    if TT.ranks() == [1 for i in range(L + 1)]:
        print_ok("Test exponential N-dimensional vector (2^6 points)")
        nsucc += 1
    else:
        print_fail("Test exponential N-dimensional vector (2^6 points)")
        nfail += 1

    ################ TEST 2.1b ###########################
    # Test exponential N-dimensional vector (28 points)
    sys.stdout.write("Test exponential N-dimensional vector (28 points)\r")
    sys.stdout.flush()

    z = 2.
    N = 28
    X = z**np.arange(N)
    eps = 1e-6

    TT = DT.QTTvec(X)
    TT.build(eps)

    L2err = npla.norm(TT.to_tensor() - X)
    if L2err <= eps:
        print_ok("Test exponential N-dimensional vector (28 points)")
        nsucc += 1
    else:
        print_fail(
            "Test exponential N-dimensional vector (28 points): L2err=%e" %
            L2err)
        nfail += 1

    ################ TEST 2.2 ###########################
    # Test sum of exponential N-dimensional vector
    sys.stdout.write("Test sum of exponential N-dimensional vector\r")
    sys.stdout.flush()

    import numpy.random as npr
    R = 3
    z = npr.rand(R)
    c = npr.rand(R)
    q = 2
    L = 8
    N = q**L
    X = np.dot(c, np.tile(z, (N, 1)).T**np.tile(np.arange(N), (R, 1)))

    TT = DT.QTTvec(X)
    TT.build()

    if np.max(TT.ranks()) <= R:
        print_ok("Test sum of exponential N-dimensional vector")
        nsucc += 1
    else:
        print_fail("Test sum of exponential N-dimensional vector")
        nfail += 1

    ################ TEST 2.3 ###########################
    # Test sum of trigonometric N-dimensional vector
    sys.stdout.write("Test sum of trigonometric N-dimensional vector\r")
    sys.stdout.flush()

    import numpy.random as npr
    R = 3
    a = npr.rand(R)
    c = npr.rand(R)
    q = 2
    L = 8
    N = q**L
    X = np.dot(c, np.sin(np.tile(z, (N, 1)).T * np.tile(np.arange(N), (R, 1))))

    TT = DT.QTTvec(X)
    TT.build()

    if np.max(TT.ranks()) <= 2 * R:
        print_ok("Test sum of trigonometric N-dimensional vector")
        nsucc += 1
    else:
        print_fail("Test sum of trigonometric N-dimensional vector")
        nfail += 1

    ################ TEST 2.4 ###########################
    # Test sum of exponential-trigonometric N-dimensional vector
    sys.stdout.write(
        "Test sum of exponential-trigonometric N-dimensional vector\r")
    sys.stdout.flush()

    import numpy.random as npr
    R = 3
    a = npr.rand(R)
    z = npr.rand(R)
    c = npr.rand(R)
    q = 2
    L = 8
    N = q**L
    X1 = np.tile(z, (N, 1)).T**np.tile(np.arange(N), (R, 1))
    X2 = np.sin(np.tile(z, (N, 1)).T * np.tile(np.arange(N), (R, 1)))
    X = np.dot(c, X1 * X2)

    TT = DT.QTTvec(X)
    TT.build()

    if np.max(TT.ranks()) <= 2 * R:
        print_ok("Test sum of exponential-trigonometric N-dimensional vector")
        nsucc += 1
    else:
        print_fail(
            "Test sum of exponential-trigonometric N-dimensional vector")
        nfail += 1

    ################ TEST 2.4 ###########################
    # Test sum of exponential-trigonometric N-dimensional vector
    sys.stdout.write("Test Chebyshev polynomial vector\r")
    sys.stdout.flush()

    from SpectralToolbox import Spectral1D as S1D
    P = S1D.Poly1D(S1D.JACOBI, [-0.5, -0.5])
    q = 2
    L = 8
    N = q**L
    (x, w) = P.GaussQuadrature(N - 1)
    X = P.GradEvaluate(x, N - 1, 0).flatten()

    TT = DT.QTTvec(X)
    TT.build()

    if np.max(TT.ranks()) <= 2:
        print_ok("Test Chebyshev polynomial vector")
        nsucc += 1
    else:
        print_fail("Test Chebyshev polynomial vector")
        nfail += 1

    ################ TEST 2.5 ###########################
    # Test N-dimensional vector
    sys.stdout.write("Test generic polynomial equidistant vector\r")
    sys.stdout.flush()

    from SpectralToolbox import Spectral1D as S1D
    import numpy.random as npr
    R = 100
    c = npr.rand(R + 1) - 0.5
    q = 2
    L = 16
    N = q**L
    x = np.linspace(-1, 1, N)

    X = np.dot(c, np.tile(x, (R + 1, 1))**np.tile(np.arange(R + 1), (N, 1)).T)

    TT = DT.QTTvec(X)
    TT.build(eps=1e-6)

    if np.max(TT.ranks()) <= R + 1:
        print_ok("Test generic polynomial (ord=%d) equidistant vector" % R)
        nsucc += 1
    else:
        print_fail("Test generic polynomial (ord=%d) equidistant vector" % R)
        nfail += 1

    ################ TEST 2.6 ###########################
    # Test N-dimensional vector
    sys.stdout.write("Test 1/(1+25x^2) Cheb vector\r")
    sys.stdout.flush()

    TT_eps = 1e-6
    from SpectralToolbox import Spectral1D as S1D
    P = S1D.Poly1D(S1D.JACOBI, [-0.5, -0.5])
    q = 2
    L = 16
    N = q**L
    (x, w) = P.GaussQuadrature(N - 1)
    X = 1. / (1. + 25. * x**2.)

    TT = DT.QTTvec(X)
    TT.build(eps=1e-6)

    import numpy.linalg as npla
    V = P.GradVandermonde1D(x, 60, 0)
    (xhat, res, rnk, s) = npla.lstsq(V,
                                     X)  # Polynomial approximation is better

    print_ok(
        "Test 1/(1+25x^2) Cheb vector: Max-rank = %d, Size = %d, Poly-int res = %e"
        % (np.max(TT.ranks()), TT.size(), res))
    nsucc += 1

    # ################ TEST 2.7 ###########################
    # # Test discontinuos function N-dimensional vector
    # sys.stdout.write("Test discontinuous vector\r")
    # sys.stdout.flush()

    # TT_eps = 1e-6
    # from SpectralToolbox import Spectral1D as S1D
    # P = S1D.Poly1D(S1D.JACOBI,[-0.5,-0.5])
    # q = 2
    # L = 16
    # N = q**L
    # (x,w) = P.GaussQuadrature(N-1)
    # X = (x<-0.1).astype(float) - (x>0.1).astype(float)

    # TT = DT.QTTvec(X,q,eps=1e-6)

    # import numpy.linalg as npla
    # V = P.GradVandermonde1D(x,TT.size(),0)
    # (xhat,res,rnk,s) = npla.lstsq(V,X) # Polynomial approximation is better

    # print_ok("Test discontinuous vector: Max-rank = %d, Size = %d, Eps = %e, Poly-int res = %e" % (np.max(TT.ranks()),TT.size(),TT_eps,res))

    ################# TEST 3.1 ##########################
    # Test d-dimensional Laplace operator
    # Scaling of storage for d-dimensional Laplace operator:
    # 1) Full tensor product: N^(2d)
    # 2) Sparse tensor product: ~ (3N)^d
    # 3) QTT format: 1D -> max-rank = 3: ~ 3*4*3*log2(N)
    #                dD -> max-rank = 4: ~ d*4*4*4*log2(N)

    d = 4
    span = np.array([0., 1.])
    q = 2
    L = 5
    N = q**L
    h = 1 / float(N - 1)
    TT_round = 1e-13

    D = -1. / h**2. * (np.diag(np.ones(
        (N - 1)), -1) + np.diag(np.ones(
            (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0))
    #D[0,0:2] = np.array([1.,0.])
    #D[-1,-2:] = np.array([0.,1.])

    D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N)
    TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N)
    TT_D.build(eps=TT_round)

    I = np.eye(N)
    I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N)
    TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N)
    TT_I.build(eps=TT_round)

    tt_list = []
    for i in range(d):
        if i == 0: tmp = TT_D.copy()
        else: tmp = TT_I.copy()
        for j in range(1, d):
            if i == j: tmp.kron(TT_D)
            else: tmp.kron(TT_I)
        tt_list.append(tmp)

    TT_Dxy = np.sum(tt_list).rounding(TT_round)

    if d == 2 and N <= 8:
        sys.stdout.write("Test 2-dimensional laplace from kron of 1D QTTmat\r")
        sys.stdout.flush()

        Dd = np.zeros((N**d, N**d))
        for i in range(d):
            tmp = np.array([1])
            for j in range(d):
                if i != j:
                    tmp = np.kron(tmp, I)
                else:
                    tmp = np.kron(tmp, D)
            Dd += tmp

        # Check equality with Dd
        nrows = [N for i in range(d)]
        ncols = [N for i in range(d)]
        err = []
        test = True
        for i in range(N**d):
            for j in range(N**d):
                sys.stdout.write("i = %d, j = %d \r" % (i, j))
                sys.stdout.flush()
                if np.abs(Dd[i, j] - TT_Dxy[DT.idxfold(nrows, i),
                                            DT.idxfold(ncols, j)]) > TT_round:
                    err.append((i, j))
                    test = False

        if test:
            print_ok("Test 2-dimensional laplace from kron of 1D QTTmat")
            nsucc += 1
        else:
            print_fail("Test 2-dimensional laplace from kron of 1D QTTmat")
            nfail += 1

    ################# TEST 3.2 ######################################
    # Test 2-dimensional Laplace operator from full tensor product

    if d == 2 and N <= 8:
        sys.stdout.write("Test 2-dimensional laplace from full kron product\r")
        sys.stdout.flush()

        Dd = np.zeros((N**d, N**d))
        for i in range(d):
            tmp = np.array([1])
            for j in range(d):
                if i != j:
                    tmp = np.kron(tmp, I)
                else:
                    tmp = np.kron(tmp, D)
            Dd += tmp

        Dd_tensor = DT.matkron_to_mattensor(Dd,
                                            nrows=[N for i in range(d)],
                                            ncols=[N for i in range(d)])
        TT_Dxykron = DT.QTTmat(Dd_tensor,
                               base=q,
                               nrows=[N for i in range(d)],
                               ncols=[N for i in range(d)])
        TT_Dxykron.build()

        # Check equality with Dd
        nrows = [N for i in range(d)]
        ncols = [N for i in range(d)]
        err = []
        test = True
        for i in range(N**d):
            for j in range(N**d):
                sys.stdout.write("i = %d, j = %d \r" % (i, j))
                sys.stdout.flush()
                if np.abs(Dd[i, j] -
                          TT_Dxykron[DT.idxfold(nrows, i),
                                     DT.idxfold(ncols, j)]) > TT_round:
                    err.append((i, j))
                    test = False

        if test:
            print_ok("Test 2-dimensional laplace from full kron product")
            nsucc += 1
        else:
            print_fail("Test 2-dimensional laplace from full kron product")
            nfail += 1

    ################# TEST 4.0 #########################################
    # Solve the d-dimensional Dirichlet-Poisson equation using full matrices
    # Use Conjugate-Gradient method

    d = 3
    span = np.array([0., 1.])
    q = 2
    L = 4
    N = q**L
    h = 1 / float(N - 1)
    X = np.linspace(span[0], span[1], N)
    eps_cg = 1e-13

    sys.stdout.write("%d-dim Dirichlet-Poisson problem FULL with CG\r" % d)
    sys.stdout.flush()

    try:
        # Construct d-D Laplace (with 2nd order finite diff)
        D = -1. / h**2. * (np.diag(np.ones(
            (N - 1)), -1) + np.diag(np.ones(
                (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0))
        D[0, 0:2] = np.array([1., 0.])
        D[-1, -2:] = np.array([0., 1.])
        D_sp = sp.coo_matrix(D)
        I_sp = sp.identity(N)
        I = np.eye(N)
        FULL_LAP = sp.coo_matrix((N**d, N**d))
        for i in range(d):
            tmp = sp.identity((1))
            for j in range(d):
                if i != j: tmp = sp.kron(tmp, I_sp)
                else: tmp = sp.kron(tmp, D_sp)
            FULL_LAP = FULL_LAP + tmp
    except MemoryError:
        print("FULL CG: Memory Error")
        dofull = False

    # Construct Right hand-side (b=1, Dirichlet BC = 0)
    b1D = np.ones(N)
    b1D[0] = 0.
    b1D[-1] = 0.
    tmp = np.array([1.])
    for j in range(d):
        tmp = np.kron(tmp, b1D)
    FULL_b = tmp

    # Solve full system using npla.solve
    (FULL_RES, FULL_CONV) = spla.cg(FULL_LAP, FULL_b, tol=eps_cg)

    if PLOTTING and d == 2:
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        X = np.linspace(span[0], span[1], N)
        (XX, YY) = np.meshgrid(X, X)
        fig = plt.figure(figsize=(14, 10))

    ################# TEST 4.1 #########################################
    # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and QTTvec
    # Use Conjugate-Gradient method

    sys.stdout.write(
        "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG\r" % d)
    sys.stdout.flush()

    TT_round = 1e-8
    eps_cg = 1e-3

    # Laplace operator
    D = -1. / h**2. * (np.diag(np.ones(
        (N - 1)), -1) + np.diag(np.ones(
            (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0))
    D[0, 0:2] = np.array([1., 0.])
    D[-1, -2:] = np.array([0., 1.])

    D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N)
    TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N)
    TT_D.build(eps=TT_round)

    I = np.eye(N)
    I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N)
    TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N)
    TT_I.build(eps=TT_round)

    tt_list = []
    for i in range(d):
        if i == 0: tmp = TT_D.copy()
        else: tmp = TT_I.copy()
        for j in range(1, d):
            if i == j: tmp.kron(TT_D)
            else: tmp.kron(TT_I)
        tt_list.append(tmp)

    TT_Dxy = np.sum(tt_list).rounding(TT_round)

    # Right hand side
    b1D = np.ones(N)
    b1D[0] = 0.
    b1D[-1] = 0.

    B = np.array([1.])
    for j in range(d):
        B = np.kron(B, b1D)
    B = np.reshape(B, [N for i in range(d)])

    TT_B = DT.QTTvec(B)
    TT_B.build(TT_round)

    # Solve QTT cg
    x0 = DT.QTTzerosvec(d=d, N=N, base=q)

    cg_start = time.clock()
    (TT_RES, TT_conv, TT_info) = mla.cg(TT_Dxy,
                                        TT_B,
                                        x0=x0,
                                        eps=eps_cg,
                                        ext_info=True,
                                        eps_round=TT_round)
    cg_stop = time.clock()

    L2err = mla.norm(
        TT_RES.to_tensor().reshape([N for i in range(d)]) -
        FULL_RES.reshape([N for i in range(d)]), 'fro')
    if L2err < eps_cg:
        print_ok(
            "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG      [PASSED] Time: %.10f\n"
            % (d, cg_stop - cg_start))
        nsucc += 1
    else:
        print_fail(
            "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG      [FAILED] L2err: %.e\n"
            % (d, L2err))
        nfail += 1

    if PLOTTING and d == 2:
        # Plot function
        ax = fig.add_subplot(321, projection='3d')
        ax.plot_surface(XX,
                        YY,
                        TT_RES.to_tensor().reshape((N, N)),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        ax = fig.add_subplot(322, projection='3d')
        ax.plot_surface(XX,
                        YY,
                        np.abs(TT_RES.to_tensor().reshape((N, N)) -
                               FULL_RES.reshape((N, N))),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        plt.show(block=False)

    ################# TEST 4.2 #########################################
    # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and np.ndarray
    # Use Conjugate-Gradient method

    sys.stdout.write(
        "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG\r" % d)
    sys.stdout.flush()

    TT_round = 1e-8
    eps_cg = 1e-3

    # Laplace operator
    D = -1. / h**2. * (np.diag(np.ones(
        (N - 1)), -1) + np.diag(np.ones(
            (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0))
    D[0, 0:2] = np.array([1., 0.])
    D[-1, -2:] = np.array([0., 1.])

    D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N)
    TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N)
    TT_D.build(eps=TT_round)

    I = np.eye(N)
    I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N)
    TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N)
    TT_I.build(eps=TT_round)

    tt_list = []
    for i in range(d):
        if i == 0: tmp = TT_D.copy()
        else: tmp = TT_I.copy()
        for j in range(1, d):
            if i == j: tmp.kron(TT_D)
            else: tmp.kron(TT_I)
        tt_list.append(tmp)

    TT_Dxy = np.sum(tt_list).rounding(TT_round)

    # Right hand side
    b1D = np.ones(N)
    b1D[0] = 0.
    b1D[-1] = 0.

    B = np.array([1.])
    for j in range(d):
        B = np.kron(B, b1D)
    B = np.reshape(B, [N for i in range(d)])
    B = np.reshape(B, [q for i in range(d * L)])

    # Solve QTT cg
    x0 = np.zeros([q for i in range(d * L)])

    cg_start = time.clock()
    (ARR_RES, TT_conv, TT_info1) = mla.cg(TT_Dxy,
                                          B,
                                          x0=x0,
                                          eps=eps_cg,
                                          ext_info=True,
                                          eps_round=TT_round)
    cg_stop = time.clock()

    L2err = mla.norm(
        ARR_RES.reshape([N for i in range(d)]) -
        FULL_RES.reshape([N for i in range(d)]), 'fro')
    if L2err < eps_cg:
        print_ok(
            "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG     [PASSED] Time: %.10f"
            % (d, cg_stop - cg_start))
        nsucc += 1
    else:
        print_fail(
            "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG     [FAILED] L2err: %.e"
            % (d, L2err))
        nfail += 1

    if PLOTTING and d == 2:
        # Plot function
        ax = fig.add_subplot(323, projection='3d')
        ax.plot_surface(XX,
                        YY,
                        ARR_RES.reshape((N, N)),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        ax = fig.add_subplot(324, projection='3d')
        ax.plot_surface(
            XX,
            YY,
            np.abs(ARR_RES.reshape((N, N)) - FULL_RES.reshape((N, N))),
            rstride=1,
            cstride=1,
            cmap=cm.coolwarm,
            linewidth=0,
            antialiased=False)
        plt.show(block=False)

    # ################# TEST 4.3 #########################################
    # # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and np.ndarray
    # # Use Preconditioned Conjugate-Gradient method

    # sys.stdout.write("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG\r" % d)
    # sys.stdout.flush()

    # TT_round = 1e-8
    # eps_cg = 1e-3

    # # Laplace operator
    # D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) )
    # D[0,0:2] = np.array([1.,0.])
    # D[-1,-2:] = np.array([0.,1.])

    # D_tensor = DT.matkron_to_mattensor(D,nrows=N,ncols=N)
    # TT_D = DT.QTTmat(D_tensor, base=q,nrows=N,ncols=N,eps=TT_round)

    # I = np.eye(N)
    # I_tensor = DT.matkron_to_mattensor(I,nrows=N,ncols=N)
    # TT_I = DT.QTTmat(I_tensor,base=q,nrows=N,ncols=N,eps=TT_round)

    # tt_list = []
    # for i in range(d):
    #     if i == 0: tmp = TT_D.copy()
    #     else: tmp = TT_I.copy()
    #     for j in range(1,d):
    #         if i == j: tmp.kron(TT_D)
    #         else: tmp.kron(TT_I)
    #     tt_list.append(tmp)

    # TT_Dxy = np.sum(tt_list).rounding(TT_round)

    # # Construct Preconditioner using Newton-iterations
    # TT_II = TT_I.copy()
    # for j in range(1,d): TT_II.kron(TT_I)
    # alpha = 1e-6
    # TT_Pround = 1e-4
    # TT_P = alpha*TT_II
    # eps = mla.norm(TT_II-mla.dot(TT_Dxy,TT_P),'fro')/mla.norm(TT_II,'fro')
    # i = 0
    # while eps > 5.*1e-1:
    #     i += 1
    #     TT_P  = (2. * TT_P - mla.dot(TT_P,mla.dot(TT_Dxy,TT_P).rounding(TT_Pround)).rounding(TT_Pround)).rounding(TT_Pround)
    #     eps = mla.norm(TT_II-mla.dot(TT_Dxy,TT_P),'fro')/mla.norm(TT_II,'fro')
    #     sys.stdout.write("\033[K")
    #     sys.stdout.write("Prec: err=%e, iter=%d\r" % (eps,i))
    #     sys.stdout.flush()

    # # Right hand side
    # b1D = np.ones(N)
    # b1D[0] = 0.
    # b1D[-1] = 0.

    # B = np.array([1.])
    # for j in range(d):
    #     B = np.kron(B,b1D)
    # B = np.reshape(B,[N for i in range(d)])
    # B = np.reshape(B,[q for i in range(d*L)])

    # # Solve QTT cg
    # x0 = np.zeros([q for i in range(d*L)])

    # # Precondition
    # TT_DP = mla.dot(TT_P,TT_Dxy).rounding(TT_round)
    # BP = mla.dot(TT_P,B)

    # cg_start = time.clock()
    # (ARR_RES,TT_conv,TT_info) = mla.cg(TT_DP,BP,x0=x0,eps=eps_cg,ext_info=True,eps_round=TT_round)
    # cg_stop = time.clock()

    # L2err = mla.norm(ARR_RES.reshape([N for i in range(d)])-FULL_RES.reshape([N for i in range(d)]), 'fro')
    # if L2err  < eps_cg:
    #     print_ok("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG [PASSED] Time: %.10f" % (d, cg_stop-cg_start))
    # else:
    #     print_fail("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG [FAILED] L2err: %.e" % (d,L2err))

    # if PLOTTING and d == 2:
    #     # Plot function
    #     ax = fig.add_subplot(325,projection='3d')
    #     ax.plot_surface(XX,YY,ARR_RES.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm,
    #                     linewidth=0, antialiased=False)
    #     ax = fig.add_subplot(326,projection='3d')
    #     ax.plot_surface(XX,YY,np.abs(ARR_RES.reshape((N,N))-FULL_RES.reshape((N,N))),rstride=1, cstride=1, cmap=cm.coolwarm,
    #                     linewidth=0, antialiased=False)
    #     plt.show(block=False)

    print_summary("QTT", nsucc, nfail)

    return (nsucc, nfail)
Esempio n. 9
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    if PLOTTING:
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from mpl_toolkits.mplot3d import Axes3D

    nsucc = 0
    nfail = 0

    ####
    # exp(- |X-X0|^2/2*l^2) Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 2
    size = (32, 32)  # 1024 points

    # Build up the 2d tensor wrapper
    X0 = np.array([0.2, 0.2])
    l = 0.05
    params = {'X0': X0, 'l': l}

    def f(X, params):
        return np.exp(-np.sum((X - params['X0'])**2.) / (2 * params['l']**2.))

    X = [np.linspace(0, 1., size[0]), np.linspace(0, 1., size[1])]
    TW = DT.TensorWrapper(f, X, params)

    # Compute low rank approx
    TTapprox = DT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (32x32) - (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nsucc += 1
    else:
        print_fail(
            'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (32x32) - (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nfail += 1

    if PLOTTING:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        last_idxs = TTapprox.get_ttdmrg_eval_idxs()
        plt.figure()
        plt.imshow(A.astype(float), origin='lower')
        plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'wo')
        plt.plot(last_idxs[:, 0], last_idxs[:, 1], 'ro')
        plt.title("exp(- |X-X0|^2/2*l^2) - 32x32")

        plt.show(block=False)

    ####
    # exp(- |X-X0|^2/2*l^2) Low Rank Approximation (Not power of 2)
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 2
    size = (54, 54)

    # Build up the 2d tensor wrapper
    X0 = np.array([0.2, 0.2])
    l = 0.05
    params = {'X0': X0, 'l': l}

    def f(X, params):
        return np.exp(-np.sum((X - params['X0'])**2.) / (2 * params['l']**2.))

    X = [np.linspace(0, 1., size[0]), np.linspace(0, 1., size[1])]
    TW = DT.TensorWrapper(f, X, params)

    # Compute low rank approx
    TTapprox = DT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (54x54) - (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nsucc += 1
    else:
        print_fail(
            'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (54x54) - (FroErr=%e, Fill=%.2f%%)'
            % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nfail += 1

    if PLOTTING:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        last_idxs = TTapprox.get_ttdmrg_eval_idxs()
        plt.figure()
        plt.imshow(A.astype(float), origin='lower')
        plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'wo')
        plt.plot(last_idxs[:, 0], last_idxs[:, 1], 'ro')
        plt.title("exp(- |X-X0|^2/2*l^2) - 54x54")

        plt.show(block=False)

    ####
    # 1./(x+y+1) Low Rank Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 2
    size = (33, 33)  # 1024 points

    # Build up the 2d tensor wrapper
    def f(X, params):
        return 1. / (X[0] + X[1] + 1.)

    X = [
        np.linspace(0, 2 * np.pi, size[0]),
        np.linspace(0, 2 * np.pi, size[1])
    ]
    TW = DT.TensorWrapper(f, X, None)

    # Compute low rank approx
    TTapprox = DT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nsucc += 1
    else:
        print_fail(
            'QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nfail += 1

    if PLOTTING:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        plt.figure()
        plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'o')
        plt.title("1./(x+y+1) - 33x33")

        plt.show(block=False)

    ####
    # Sin(sum(x)) TTcross Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 3
    size = [33] * d

    # Build up the tensor wrapper
    # def f(X,params): return np.sin( X[0] ) * np.sin(X[1])
    def f(X, params):
        return np.sin(np.sum(X))

    # def f(X,params): return 1./( np.sum(X) + 1 )
    X = [np.linspace(0, 2 * np.pi, size[0])] * d
    TW = DT.TensorWrapper(f, X)

    # Compute low rank approx
    TTapprox = DT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'QTTdmrg: sin(sum(x)) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nsucc += 1
    else:
        print_fail(
            'QTTdmrg: sin(sum(x)) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' %
            (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size())))
        nfail += 1

    if PLOTTING and d == 3:
        # Get filled idxs
        fill_idxs = np.array(TW.get_fill_idxs())
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2])
        ax.set_xlabel('x')
        ax.set_ylabel('y')
        ax.set_zlabel('z')
        plt.title("Sin(sum(x)) - %s" % str(size))

        # Get last used idxs
        last_idxs = TTapprox.get_ttdmrg_eval_idxs()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r')
        plt.title("Sin(sum(x)) - %s" % str(size))
        plt.show(block=False)

    ####
    # 1/(sum(x)+1) QTTdmrg Approximation
    ####
    maxvoleps = 1e-5
    delta = 1e-5
    eps = 1e-10

    d = 5
    size = [8] * d

    # Build up the 2d tensor wrapper
    def f(X, params):
        return 1. / (np.sum(X) + 1.)

    X = [np.linspace(0, 1, size[i]) for i in range(len(size))]
    TW = DT.TensorWrapper(f, X)

    # Compute low rank approx
    TTapprox = DT.QTTvec(TW)
    TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps)
    fill = TW.get_fill_level()
    crossRanks = TTapprox.ranks()
    PassedRanks = all(
        map(operator.eq, crossRanks[1:-1],
            TTapprox.ranks()[1:-1]))

    A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])]
    FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro')
    MaxErr = np.max(TTapprox.to_tensor() - A)
    kappa = np.max(A) / np.min(
        A)  # This is slightly off with respect to the analysis
    r = np.max(TTapprox.ranks())
    epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps
    if FroErr < epsTarget:
        print_ok(
            'QTTdmrg: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, MaxErr=%e, Fill=%.2f%%)'
            % (d, FroErr, MaxErr,
               100. * np.float(fill) / np.float(TW.get_global_size())))
        nsucc += 1
    else:
        print_fail(
            'QTTdmrg: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, FroErr=%e, Fill=%.2f%%)'
            % (d, FroErr, MaxErr,
               100. * np.float(fill) / np.float(TW.get_global_size())))
        nfail += 1

    print_summary("QTTdmrg", nsucc, nfail)

    return (nsucc, nfail)