def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla RUNTESTS = [0, 2] if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 if (0 in RUNTESTS): (ns, nf) = RunTestWTT0(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf if (2 in RUNTESTS): (ns, nf) = RunTestWTT2(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf if (4 in RUNTESTS): (ns, nf) = RunTestWTT4(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf if (5 in RUNTESTS): (ns, nf) = RunTestWTT5(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf if (6 in RUNTESTS): (ns, nf) = RunTestWTT6(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf if (7 in RUNTESTS): (ns, nf) = RunTestWTT7(maxprocs, PLOTTING, loglev) nsucc += ns nfail += nf print_summary("WTT General", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 ################################################################### # Test timings and comp. rate for compression of Laplace-like op. ################################################################### eps = 0.001 Ns = 2**np.arange(4, 7, 1, dtype=int) ds = 2**np.arange(4, 6, dtype=int) timing = np.zeros((len(Ns), len(ds))) comp_rate = np.zeros((len(Ns), len(ds))) for i_N, N in enumerate(Ns): D = np.diag(-np.ones((N - 1)), -1) + np.diag(-np.ones( (N - 1)), 1) + np.diag(2 * np.ones((N)), 0) I = np.eye(N) D_flat = D.flatten() I_flat = I.flatten() for i_d, d in enumerate(ds): sys.stdout.write('N=%d , d=%d [STARTED]\r' % (N, d)) sys.stdout.flush() # Canonical form of n-dimentional Laplace operator CPtmp = [] # U[i][alpha,k] = U_i(alpha,k) for i in range(d): CPi = np.empty((d, N**2)) for alpha in range(d): if i != alpha: CPi[alpha, :] = I_flat else: CPi[alpha, :] = D_flat CPtmp.append(CPi) CP = DT.Candecomp(CPtmp) # Canonical to TT sys.stdout.write("\033[K") sys.stdout.write('N=%4d , d=%3d [CP->TT]\r' % (N, d)) sys.stdout.flush() TT = DT.TTmat(CP, nrows=N, ncols=N) TT.build() TT_pre = TT.copy() pre_norm = mla.norm(TT_pre, 'fro') # Rounding TT sys.stdout.write("\033[K") sys.stdout.write('N=%4d , d=%3d [TT-round]\r' % (N, d)) sys.stdout.flush() st = time.clock() TT.rounding(eps) end = time.clock() if np.max(TT.ranks()) != 2: print_fail( "\033[K" + "1.1 Compression Timing N=%4d , d=%3d [RANK ERROR] Time: %f" % (N, d, end - st)) nfail += 1 elif mla.norm(TT_pre - TT, 'fro') > eps * pre_norm: print_fail( "\033[K" + "1.1 Compression Timing N=%4d , d=%3d [NORM ERROR] Time: %f" % (N, d, end - st)) nfail += 1 else: print_ok( "\033[K" + "1.1 Compression Timing N=%4d , d=%3d [ENDED] Time: %f" % (N, d, end - st)) nsucc += 1 comp_rate[i_N, i_d] = float(TT.size()) / N**(2. * d) timing[i_N, i_d] = end - st # Compute scalings with respect to N and d if PLOTTING: d_sc = np.polyfit(np.log2(ds), np.log2(timing[-1, :]), 1)[0] N_sc = np.polyfit(np.log2(Ns), np.log2(timing[:, -1]), 1)[0] sys.stdout.write("Scaling: N^%f, d^%f\n" % (N_sc, d_sc)) sys.stdout.flush() plt.figure(figsize=(14, 7)) plt.subplot(1, 2, 1) plt.loglog(Ns, comp_rate[:, -1], 'o-', basex=2, basey=2) plt.grid() plt.xlabel('N') plt.ylabel('Comp. Rate TT/FULL') plt.subplot(1, 2, 2) plt.loglog(Ns, timing[:, -1], 'o-', basex=2, basey=2) plt.grid() plt.xlabel('N') plt.ylabel('Round Time (s)') plt.show(block=False) plt.figure(figsize=(14, 7)) plt.subplot(1, 2, 1) plt.loglog(ds, comp_rate[-1, :], 'o-', basex=2, basey=2) plt.grid() plt.xlabel('d') plt.ylabel('Comp. Rate TT/FULL') plt.subplot(1, 2, 2) plt.loglog(ds, timing[-1, :], 'o-', basex=2, basey=2) plt.grid() plt.xlabel('d') plt.ylabel('Round Time (s)') plt.show(block=False) from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm (NN, dd) = np.meshgrid(np.log2(Ns), np.log2(ds)) T = timing.copy().T T[T == 0.] = np.min(T[np.nonzero(T)]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(NN, dd, np.log2(T), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) print_summary("TT Compression", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 #################################################################################### # Test GMRES method on simple multidim laplace equation #################################################################################### import scipy.sparse as sp import scipy.sparse.linalg as spla span = np.array([0., 1.]) d = 2 N = 64 h = 1 / float(N - 1) eps_gmres = 1e-3 eps_round = 1e-6 # sys.stdout.write("GMRES: Laplace N=%4d , d=%3d [START] \r" % (N,d)) # sys.stdout.flush() # Construct d-D Laplace (with 2nd order finite diff) D = -1. / h**2. * (np.diag(np.ones( (N - 1)), -1) + np.diag(np.ones( (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0)) D[0, 0:2] = np.array([1., 0.]) D[-1, -2:] = np.array([0., 1.]) D_sp = sp.coo_matrix(D) I_sp = sp.identity(N) I = np.eye(N) FULL_LAP = sp.coo_matrix((N**d, N**d)) for i in range(d): tmp = sp.identity((1)) for j in range(d): if i != j: tmp = sp.kron(tmp, I_sp) else: tmp = sp.kron(tmp, D_sp) FULL_LAP = FULL_LAP + tmp # Construction of TT Laplace operator CPtmp = [] D_flat = D.flatten() I_flat = I.flatten() for i in range(d): CPi = np.empty((d, N**2)) for alpha in range(d): if i != alpha: CPi[alpha, :] = I_flat else: CPi[alpha, :] = D_flat CPtmp.append(CPi) CP_lap = DT.Candecomp(CPtmp) TT_LAP = DT.TTmat(CP_lap, nrows=N, ncols=N, is_sparse=[True] * d) TT_LAP.build() TT_LAP.rounding(eps_round) CPtmp = None CP_lap = None # Construct Right hand-side (b=1, Dirichlet BC = 0) X = np.linspace(span[0], span[1], N) b1D = np.ones(N) b1D[0] = 0. b1D[-1] = 0. # Construct the d-D right handside tmp = np.array([1.]) for j in range(d): tmp = np.kron(tmp, b1D) FULL_b = tmp # Construct the TT right handside CPtmp = [] for i in range(d): CPi = np.empty((1, N)) CPi[0, :] = b1D CPtmp.append(CPi) CP_b = DT.Candecomp(CPtmp) W = [np.ones(N, dtype=float) / float(N) for i in range(d)] TT_b = DT.WTTvec(CP_b, W) TT_b.build() TT_b.rounding(eps_round) # Solve full system using npla.solve (FULL_RES, FULL_info) = spla.gmres(FULL_LAP, FULL_b, tol=eps_gmres) if PLOTTING: from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm (XX, YY) = np.meshgrid(X, X) fig = plt.figure(figsize=(18, 7)) plt.suptitle("GMRES") if d == 2: # Plot function ax = fig.add_subplot(131, projection='3d') ax.plot_surface(XX, YY, FULL_RES.reshape((N, N)), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Solve TT cg x0 = DT.zerosvec(d, N) (TT_RES, conv, TT_info) = mla.gmres(TT_LAP, TT_b, x0=x0, restart=10, eps=eps_gmres, ext_info=True) if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(132, projection='3d') ax.plot_surface(XX, YY, TT_RES.to_tensor(), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Error if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(133, projection='3d') ax.plot_surface(XX, YY, np.abs(TT_RES.to_tensor() - FULL_RES.reshape((N, N))), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) err2 = npla.norm(TT_RES.to_tensor().flatten() - FULL_RES, 2) if err2 < 1e-2: print_ok("7.1 Weighted GMRES: Laplace N=%4d , d=%3d , 2-err=%f" % (N, d, err2)) nsucc += 1 else: print_fail("7.1 Weighted GMRES: Laplace N=%4d , d=%3d , 2-err=%f" % (N, d, err2)) nfail += 1 print_summary("WTT GMRES", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, loglev=logging.WARNING): logging.basicConfig(level=loglev) store_file = '0d' # Storage file (used for storage and restarting purposes if os.path.exists(store_file + ".pkl"): os.remove(store_file + '.pkl') if os.path.exists(store_file + ".pkl.old"): os.remove(store_file + ".pkl.old") if os.path.exists(store_file + ".h5"): os.remove(store_file + '.h5') if os.path.exists(store_file + ".h5.old"): os.remove(store_file + ".h5.old") nsucc = 0 nfail = 0 orders = range(2, 15) for order in orders: # Function definition. # For MPI purposes, that cannot be imported from the system path (like user defined functions # used by f) MUST be defined inside the following definition. Whatever can be imported from # the system path (like numpy), can be imported from inside it. def f(p, params): import numpy as np return 1. / (np.sum(p) + 1.) params = None # Parameters to be passed to function f. In this case None. # They must be Pickable eps = 1e-8 kickrank = None # Improves convergence for high rank functions store_freq = 10 # Seconds between every storage surr_type = TT.PROJECTION # Alternatives are: TT.PROJECTION, TT.LINEAR_INTERPOLATION # and TT.LAGRANGE_INTERPOLATION d = 4 # Number of dimensions (The analytical integrals work for d=4) orders = [order] * d # orders of the approximation X = [] # Approximation points for i in range(d): if surr_type == TT.PROJECTION: # For each dimension, define: the polynomial type, the type of points, the polynomial parameters and the span over which the points will be rescaled X.append((S1D.JACOBI, S1D.GAUSSLOBATTO, (0., 0.), [0., 1.])) else: # In the LINEAR_INTERPOLATION and LAGRANGE_INTERPOLATION case any list of points in the span will work (Include the endpoints!). X.append(np.linspace(0., 1., 40)) if os.path.isfile( store_file + '.pkl' ): # If file already exists, then restart the construction from # already computed values STTapprox = TT.load(store_file, load_data=True) STTapprox.set_f(f) STTapprox.store_freq = store_freq STTapprox.build(maxprocs) else: # Otherwise start a new approximation STTapprox = TT.SQTT( f, X, params, eps=eps, range_dim=0, orders=orders, method='ttdmrg', kickrank=kickrank, surrogateONOFF=True, surrogate_type=surr_type, store_location=store_file, store_overwrite=True, store_freq=store_freq) # See documentation of STT STTapprox.build(maxprocs) logging.info("Fill: %.3f%% - N: %e" % (float(STTapprox.TW.get_fill_level()) / float(STTapprox.TW.get_global_size()) * 100., float(STTapprox.TW.get_fill_level()))) def eval_point(STTapprox, params): # Evaluates a point x with the STT approximation and the analytical function # Compare times and error N = 1000 xs = stats.uniform().rvs(N * d).reshape(N, d) # Evaluate a point val = np.zeros(N) start_eval = systime.clock() for i in range(N): val[i] = STTapprox(xs[i, :]) # Point evaluation in TT format end_eval = systime.clock() logging.info("Evaluation time: " + str((end_eval - start_eval) / N)) exact = np.zeros(N) start_eval = systime.clock() for i in range(N): exact[i] = f(xs[i, :], params) end_eval = systime.clock() logging.info("Exact evaluation time: " + str((end_eval - start_eval) / N)) err = np.sqrt(np.mean(val - exact)**2.) return err # Point evaluation point_err = eval_point(STTapprox, params) # Computing the mean mean = STTapprox.integrate() exact_mean = -272. * np.log(2) / 3. + 27. * np.log(3) + 125. * np.log( 5) / 6. mean_err = np.abs(mean - exact_mean) # Computing the variance var = (STTapprox**2).integrate( ) - mean**2. # Mind that in the expression STTapprox**2, # 2 is an integer! exact_var = np.log( 4722366482869645213696. / (1861718135983154296875. * np.sqrt(5))) - exact_mean**2. var_err = np.abs(var - exact_var) print_ok( "Spectral Quantics Tensor Train DMRG - 0D - Ord: %d - Point err: %e - Mean err: %e - Var err: %e" % (order, point_err, mean_err, var_err)) nsucc += 1 if os.path.exists(store_file + '.pkl'): os.remove(store_file + '.pkl') if os.path.exists(store_file + ".pkl.old"): os.remove(store_file + ".pkl.old") if os.path.exists(store_file + '.h5'): os.remove(store_file + '.h5') if os.path.exists(store_file + ".h5.old"): os.remove(store_file + ".h5.old") print_summary("SQTTdmrg", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) if PLOTTING: import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D nsucc = 0 nfail = 0 #### # Test Tensor Wrapper #### def f(x, params=None): if x.ndim == 1: return np.sum(x) if x.ndim == 2: return np.sum(x, axis=1) dims = [11, 21, 31] X = [ np.linspace(1., 10., dims[0]), np.linspace(1, 20., dims[1]), np.linspace(1, 30., dims[2]) ] XX = np.array(list(itertools.product(*X))) F = f(XX).reshape(dims) tw = DT.TensorWrapper(f, X, None, dtype=float) if F[5,10,15] == tw[5,10,15] and \ np.all(F[1,2,:] == tw[1,2,:]) and \ np.all(F[3:5,2:3,20:24] == tw[3:5,2:3,20:24]): print_ok("TTcross: Tensor Wrapper") nsucc += 1 else: print_fail("TTcross: TensorWrapper") nfail += 1 #### # Test Maxvol #### maxvoleps = 1e-2 pass_maxvol = True N = 100 i = 0 while pass_maxvol == True and i < N: i += 1 A = npr.random(600).reshape((100, 6)) (I, AsqInv, it) = DT.maxvol(A, delta=maxvoleps) if np.max(np.abs(np.dot(A, AsqInv))) > 1. + maxvoleps: pass_maxvol = False if pass_maxvol == True: print_ok('TTcross: Maxvol') nsucc += 1 else: print_fail('TTcross: Maxvol at it=%d' % i) nsucc += 1 #### # Test Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 pass_lowrankapprox = True N = 10 i = 0 logging.info( "(rows,cols,rank) FroA, FroErr, FroErr/FroA, maxAAinv, maxAinvA") while pass_lowrankapprox == True and i < N: i += 1 size = npr.random_integers(10, 100, 2) r = npr.random_integers(max(1, np.min(size) - 10), np.min(size)) A = npr.random(np.prod(size)).reshape(size) (I, J, AsqInv, it) = DT.lowrankapprox(A, r, delta=delta, maxvoleps=maxvoleps) AAinv = np.max(np.abs(np.dot(A[:, J], AsqInv))) AinvA = np.max(np.abs(np.dot(AsqInv, A[I, :]))) FroErr = npla.norm(np.dot(A[:, J], np.dot(AsqInv, A[I, :])) - A, 'fro') FroA = npla.norm(A, 'fro') logging.info( "(%d,%d,%d) %f, %f, %f %f %f" % (size[0], size[1], r, FroA, FroErr, FroErr / FroA, AAinv, AinvA)) if AAinv > 1. + maxvoleps: pass_maxvol = False if pass_maxvol == True: print_ok('TTcross: Random Low Rank Approx') nsucc += 1 else: print_fail('TTcross: Random Low Rank Approx at it=%d' % i) nsucc += 1 #### # Sin*Cos Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 size = (100, 100) r = 1 # Build up the 2d tensor wrapper def f(X, params): return np.sin(X[0]) * np.cos(X[1]) X = [ np.linspace(0, 2 * np.pi, size[0]), np.linspace(0, 2 * np.pi, size[1]) ] TW = DT.TensorWrapper(f, X, None, dtype=float) # Compute low rank approx (I, J, AsqInv, it) = DT.lowrankapprox(TW, r, delta=delta, maxvoleps=maxvoleps) fill = TW.get_fill_level() Fapprox = np.dot(TW[:, J].reshape((TW.shape[0], len(J))), np.dot(AsqInv, TW[I, :].reshape((len(I), TW.shape[1])))) FroErr = npla.norm(Fapprox - TW[:, :], 'fro') if FroErr < 1e-12: print_ok( 'TTcross: sin(x)*cos(y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTcross: sin(x)*cos(y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 if PLOTTING: plt.figure(figsize=(12, 7)) plt.subplot(1, 2, 1) plt.imshow(TW[:, :]) plt.subplot(1, 2, 2) plt.imshow(Fapprox) #### # Sin(x+y) Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 size = (100, 100) r = 2 # Build up the 2d tensor wrapper def f(X, params): return np.sin(X[0]) * np.cos(X[1]) def f(X, params): return np.sin(X[0] + X[1]) X = [ np.linspace(0, 2 * np.pi, size[0]), np.linspace(0, 2 * np.pi, size[1]) ] TW = DT.TensorWrapper(f, X, None, dtype=float) # Compute low rank approx (I, J, AsqInv, it) = DT.lowrankapprox(TW, r, delta=delta, maxvoleps=maxvoleps) fill = TW.get_fill_level() Fapprox = np.dot(TW[:, J], np.dot(AsqInv, TW[I, :])) FroErr = npla.norm(Fapprox - TW[:, :], 'fro') if FroErr < 1e-12: print_ok('TTcross: sin(x+y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTcross: sin(x+y) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 if PLOTTING: plt.figure(figsize=(12, 7)) plt.subplot(1, 2, 1) plt.imshow(TW[:, :]) plt.subplot(1, 2, 2) plt.imshow(Fapprox) #### # Sin(x)*cos(y)*Sin(z) TTcross Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 size = (10, 10, 10) # Build up the 2d tensor wrapper def f(X, params): return np.sin(X[0]) * np.cos(X[1]) * np.sin(X[2]) X = [ np.linspace(0, 2 * np.pi, size[0]), np.linspace(0, 2 * np.pi, size[1]), np.linspace(0, 2 * np.pi, size[2]) ] TW = DT.TensorWrapper(f, X, dtype=float) # Compute low rank approx TTapprox = DT.TTvec(TW) TTapprox.build(method='ttcross', eps=eps, mv_eps=maxvoleps, delta=delta) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.gt, crossRanks[1:-1], TTapprox.rounding(eps=delta).ranks()[1:-1])) FroErr = mla.norm( TTapprox.to_tensor() - TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])], 'fro') if FroErr < eps: print_ok( 'TTcross: sin(x)*cos(y)*sin(z) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTcross: sin(x)*cos(y)*sin(z) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 if PLOTTING: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') # Get last used idxs Is = TTapprox.Is Js = TTapprox.Js ndim = len(X) dims = [len(Xi) for Xi in X] idxs = [] for k in range(len(Is) - 1, -1, -1): for i in range(len(Is[k])): for j in range(len(Js[k])): for kk in range(dims[k]): idxs.append(Is[k][i] + (kk, ) + Js[k][j]) last_idxs = np.array(idxs) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r') plt.show(block=False) print_summary("TTcross", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 ##################################################################################### # Test matrix 2-norm on random matrices ##################################################################################### span = np.array([0., 1.]) d = 3 nrows = 16 ncols = 16 if isinstance(nrows, int): nrows = [nrows for i in range(d)] if isinstance(ncols, int): ncols = [ncols for i in range(d)] eps = 1e-6 round_eps = 1e-12 # sys.stdout.write("Matrix 2-norm: Random\n nrows=[%s],\n ncols=[%s], d=%3d [START] \n" % (','.join(map(str,nrows)),','.join(map(str,ncols)),d)) # sys.stdout.flush() # Construction of TT random matrix TT_RAND = DT.randmat(d, nrows, ncols) # Construct FULL random tensor FULL_RAND = TT_RAND.to_tensor() import itertools rowcol = list( itertools.chain(*[[ri, ci] for (ri, ci) in zip(nrows, ncols)])) FULL_RAND = np.reshape(FULL_RAND, rowcol) idxswap = list(range(0, 2 * d, 2)) idxswap.extend(range(1, 2 * d, 2)) FULL_RAND = np.transpose(FULL_RAND, axes=idxswap) FULL_RAND = np.reshape(FULL_RAND, (np.prod(nrows), np.prod(ncols))) # Check results tt_norm = mla.norm(TT_RAND, 2, round_eps=round_eps, eps=eps) full_norm = npla.norm(FULL_RAND, 2) if np.abs(tt_norm - full_norm) / npla.norm(FULL_RAND, 'fro') <= 0.02: print_ok( "3.1 Matrix 2-norm: Random nrows=%s, ncols=%s , d=%3d , TT-norm = %.5f , FULL-norm = %.5f" % (str(nrows), str(ncols), d, tt_norm, full_norm)) nsucc += 1 else: print_fail( "3.1 Matrix 2-norm: Random nrows=%s, ncols=%s, d=%3d , TT-norm = %.5f , FULL-norm = %.5f" % (str(nrows), str(ncols), d, tt_norm, full_norm), '') nfail += 1 # opt = 'a' # while (opt != 'c' and opt != 's' and opt != 'q'): # print("Matrix-vector product test with Schrodinger operator:") # print("\t [c]: continue") # print("\t [s]: skip") # print("\t [q]: exit") # opt = sys.stdin.read(1) # if (opt == 'q'): # exit(0) # if opt == 'c': # ##################################################################################### # # Test matrix-vector product by computing the smallest eigenvalue of the operator in # # "Tensor-Train decomposition" I.V.Oseledets # # "Algorithms in high dimensions" Beylkin and Mohlenkamp # ##################################################################################### # span = np.array([0.,1.]) # d = 2 # N = 16 # h = 1/float(N-1) # cv = 100. # cw = 5. # eps =1e-10 # # Construction of TT Laplace operator # CPtmp = [] # D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) ) # I = np.eye(N) # D_flat = D.flatten() # I_flat = I.flatten() # for i in range(d): # CPi = np.empty((d,N**2)) # for alpha in range(d): # if i != alpha: # CPi[alpha,:] = I_flat # else: # CPi[alpha,:] = D_flat # CPtmp.append(CPi) # CP_lap = DT.Candecomp(CPtmp) # TT_lap = DT.TTmat(CP_lap,nrows=N,ncols=N) # TT_lap.rounding(eps) # CPtmp = None # CP_lap = None # # Construction of TT Potential operator # CPtmp = [] # X = np.linspace(span[0],span[1],N) # # B = np.diag(np.cos(2.*np.pi*X),0) # B = np.diag(np.cos(X),0) # I = np.eye(N) # B_flat = B.flatten() # I_flat = I.flatten() # for i in range(d): # CPi = np.empty((d,N**2)) # for alpha in range(d): # if i != alpha: # CPi[alpha,:] = I_flat # else: # CPi[alpha,:] = B_flat # CPtmp.append(CPi) # CP_pot = DT.Candecomp(CPtmp) # TT_pot = DT.TTmat(CP_pot,nrows=N,ncols=N) # TT_pot.rounding(eps) # CPtmp = None # CP_pot = None # # Construction of TT electron-electron interaction # CPtmp_cos = [] # CPtmp_sin = [] # X = np.linspace(span[0],span[1],N) # # Bcos = np.diag(np.cos(2.*np.pi*X),0) # # Bsin = np.diag(np.sin(2.*np.pi*X),0) # Bcos = np.diag(np.cos(X),0) # Bsin = np.diag(np.sin(X),0) # I = np.eye(N) # # D_flat = D.flatten() # Bcos_flat = Bcos.flatten() # Bsin_flat = Bsin.flatten() # I_flat = I.flatten() # for i in range(d): # CPi_cos = np.zeros((d*(d-1)/2,N**2)) # CPi_sin = np.zeros((d*(d-1)/2,N**2)) # k=0 # for alpha in range(d): # for beta in range(alpha+1,d): # if alpha == i or beta == i : # CPi_cos[k,:] = Bcos_flat # CPi_sin[k,:] = Bsin_flat # else: # CPi_cos[k,:] = I_flat # CPi_sin[k,:] = I_flat # k += 1 # CPtmp_cos.append(CPi_cos) # CPtmp_sin.append(CPi_sin) # CP_int_cos = DT.Candecomp(CPtmp_cos) # CP_int_sin = DT.Candecomp(CPtmp_sin) # TT_int_cos = DT.TTmat(CP_int_cos,nrows=N,ncols=N) # TT_int_sin = DT.TTmat(CP_int_sin,nrows=N,ncols=N) # TT_int_cos.rounding(eps) # TT_int_sin.rounding(eps) # TT_int = (TT_int_cos + TT_int_sin).rounding(eps) # CPtmp_cos = None # CPtmp_sin = None # CP_int_cos = None # CP_int_sin = None # # # Construction of TT Scholes-tensor # # CPtmp = [] # # X = np.linspace(span[0],span[1],N) # # D = 1./(2*h) * (np.diag(np.ones(N-1),1) - np.diag(np.ones(N-1),-1)) # # D[0,0] = -1./h # # D[0,1] = 1./h # # D[-1,-1] = 1./h # # D[-1,-2] = -1./h # # I = np.eye(N) # # D_flat = D.flatten() # # I_flat = I.flatten() # # for i in range(d): # # CPi = np.zeros((d*(d-1)/2,N**2)) # # k = 0 # # for alpha in range(d): # # for beta in range(alpha+1,d): # # if alpha == i: # # CPi[k,:] = D_flat # # elif beta == i: # # CPi[k,:] = D_flat # # else: # # CPi[k,:] = I_flat # # k += 1 # # CPtmp.append(CPi) # # CP_sch = DT.Candecomp(CPtmp) # # TT_sch = DT.TTmat(CP_sch,nrows=N,ncols=N) # # TT_sch.rounding(eps) # H = (TT_lap + TT_pot + TT_int).rounding(eps) # Cd = mla.norm(H,2) # # Identity tensor # TT_id = DT.eye(d,N) # Hhat = (Cd * TT_id - H).rounding(eps) print_summary("TT Norms", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) if PLOTTING: import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D nsucc = 0 nfail = 0 #### # Test Tensor Wrapper #### def f(x, params=None): if x.ndim == 1: return np.sum(x) if x.ndim == 2: return np.sum(x, axis=1) dims = [11, 21, 31] X = [ np.linspace(1., 10., dims[0]), np.linspace(1, 20., dims[1]), np.linspace(1, 30., dims[2]) ] XX = np.array(list(itertools.product(*X))) F = f(XX).reshape(dims) tw = DT.TensorWrapper(f, X, None) if F[5,10,15] == tw[5,10,15] and \ np.all(F[1,2,:] == tw[1,2,:]) and \ np.all(F[3:5,2:3,20:24] == tw[3:5,2:3,20:24]): print_ok("TTdmrgcross: Tensor Wrapper") nsucc += 1 else: print_fail("TTdmrgcross: TensorWrapper") nsucc += 1 #### # 1./(x+y+1) Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 2 size = [100] * d # Build up the 2d tensor wrapper def f(X, params): return 1. / (X[0] + X[1] + 1.) X = [ np.linspace(0, 2 * np.pi, size[0]), np.linspace(0, 2 * np.pi, size[1]) ] TW = DT.TensorWrapper(f, X, None) # Compute low rank approx TTapprox = DT.TTvec(TW) TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'TTdmrgcross: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTdmrgcross: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 if PLOTTING: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) plt.figure() plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'o') plt.show(block=False) #### # sin(sum(x)) TTcross Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 3 size = [20] * d # Build up the tensor wrapper def f(X, params): return np.sin(np.sum(X)) X = [np.linspace(0, 2 * np.pi, size[0])] * d TW = DT.TensorWrapper(f, X) TTapprox = DT.TTvec(TW) TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm( TTapprox.to_tensor() - TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])], 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'TTdmrgcross: sin(sum(x)) - d=3 - Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTdmrgcross: sin(sum(x)) - d=3 - Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 if PLOTTING and d == 3: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') # Get last used idxs last_idxs = TTapprox.get_ttdmrg_eval_idxs() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r') plt.show(block=False) #### # 1/(sum(x)+1) TTdmrgcross Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 5 size = [10] * d # Build up the 2d tensor wrapper def f(X, params): return 1. / (np.sum(X) + 1.) X = [np.linspace(0, 1, size[i]) for i in range(len(size))] TW = DT.TensorWrapper(f, X) # Compute low rank approx TTapprox = DT.TTvec(TW) TTapprox.build(method='ttdmrgcross', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') MaxErr = np.max(TTapprox.to_tensor() - A) kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'TTdmrgcross: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, MaxErr=%e, Fill=%.2f%%)' % (d, FroErr, MaxErr, 100. * np.float(fill) / np.float(TW.get_size()))) nsucc += 1 else: print_fail( 'TTdmrgcross: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, FroErr=%e, Fill=%.2f%%)' % (d, FroErr, MaxErr, 100. * np.float(fill) / np.float(TW.get_size()))) nfail += 1 print_summary("TTdmrgcross", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) store_file = '2d' # Storage file (used for storage and restarting purposes if os.path.exists(store_file + ".pkl"): os.remove(store_file + '.pkl') if os.path.exists(store_file + ".pkl.old"): os.remove(store_file + ".pkl.old") if os.path.exists(store_file + ".h5"): os.remove(store_file + '.h5') if os.path.exists(store_file + ".h5.old"): os.remove(store_file + ".h5.old") nsucc = 0 nfail = 0 def f(p,params): import numpy as np XX = params['XX'] YY = params['YY'] return np.sin(np.pi *(XX+YY)) * 1./( (1. + np.sin(2*np.pi*(XX+YY))) * np.sum(p) + 1.) x = np.linspace(0,1,11) y = np.linspace(0,1,11) XX,YY = np.meshgrid(x,y) params = {'XX': XX, 'YY': YY} d = 8 ord = 10 store_freq = 20 orders = [20] * d X = [x, y] for i in range(d): X.append( (S1D.JACOBI, S1D.GAUSS, (0.,0.), [0.,1.]) ) if os.path.isfile(store_file + '.pkl'): STTapprox = TT.load(store_file,load_data=True) STTapprox.set_f(f) STTapprox.store_freq = store_freq STTapprox.build(maxprocs) else: STTapprox = TT.STT(f, X, params, range_dim=2, orders=orders, method='ttcross', surrogateONOFF=True, surrogate_type=TT.PROJECTION, store_location=store_file, store_overwrite=True, store_freq=store_freq) STTapprox.build(maxprocs) def eval_point(STTapprox,x,params,plotting=False): XX = params['XX'] YY = params['YY'] # Evaluate a point start_eval = systime.clock() val = STTapprox(x) end_eval = systime.clock() logging.info("TestSTTcross_2D: Evaluation time: " + str(end_eval-start_eval)) start_eval = systime.clock() exact = f(x,params) end_eval = systime.clock() logging.info("TestSTTcross_2D: Exact evaluation time: " + str(end_eval-start_eval)) logging.info("TestSTTcross_2D: Pointwise L2err: " + str(npla.norm( (val-exact).flatten(),2 ))) if plotting: import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(XX,YY,val, rstride=1, cstride=1, cmap=cm.coolwarm, \ linewidth=0, antialiased=False) plt.title("Surrogate") plt.show(block=False) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(XX,YY,exact, rstride=1, cstride=1, cmap=cm.coolwarm, \ linewidth=0, antialiased=False) plt.title("Exact") plt.show(block=False) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(XX,YY,np.abs(exact-val), rstride=1, cstride=1, cmap=cm.coolwarm, \ linewidth=0, antialiased=False) plt.title("Error") plt.show(block=False) eval_point(STTapprox,np.array([0.2]*d),params,plotting=PLOTTING) # Estimate mean error: DIST = RS.MultiDimDistribution([ stats.uniform() ] * d) exact = [] MCstep = 100 # Sampling xx = np.asarray( DIST.rvs(MCstep) ) # STT evaluation STTvals = STTapprox(xx) # Exact evaluation exact = np.asarray( [ f(xx[i,:],params) for i in range(MCstep) ] ) # Mean abs error abs_err = np.abs(STTvals - exact) mean_abs_err = np.mean(abs_err, axis=0) var_abs_err = np.var(abs_err, axis=0) if PLOTTING: import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm XX = params['XX'] YY = params['YY'] fig = plt.figure() ax = fig.add_subplot(121, projection='3d') surf = ax.plot_surface(XX,YY,mean_abs_err,rstride=1, cstride=1, cmap=cm.coolwarm, \ linewidth=0, antialiased=False) plt.title("Mean abs error") ax = fig.add_subplot(122, projection='3d') surf = ax.plot_surface(XX,YY,var_abs_err,rstride=1, cstride=1, cmap=cm.coolwarm, \ linewidth=0, antialiased=False) plt.title("Variance abs error") plt.show(block=False) # Mean L2 error L2_err = npla.norm( STTvals - exact, ord=2, axis=(1,2) ) mean_L2_err = np.mean(L2_err) var_L2_err = np.var(L2_err) logging.info("TestSTTcross_2D: Mean L2 Err = %e , Variance L2 Err = %e" % (mean_L2_err,var_L2_err)) os.remove(store_file + '.pkl') if os.path.exists(store_file + ".pkl.old"): os.remove(store_file + ".pkl.old") os.remove(store_file + '.h5') if os.path.exists(store_file + ".h5.old"): os.remove(store_file + ".h5.old") print_ok("STTcross 2D") nsucc += 1 print_summary("STTcross 2D", nsucc, nfail) return (nsucc,nfail)
def run(maxprocs, loglev=logging.WARNING): logging.basicConfig(level=loglev) import os import os.path import pickle as pkl import numpy as np import numpy.random as npr import math import random from functools import reduce import TensorToolbox as TT store_location = "tw" if os.path.isfile(store_location + ".pkl"): os.remove(store_location + ".pkl") if os.path.isfile(store_location + ".h5"): os.remove(store_location + ".h5") if os.path.isfile(store_location + ".pkl.old"): os.remove(store_location + ".pkl.old") if os.path.isfile(store_location + ".h5.old"): os.remove(store_location + ".h5.old") testn = 0 ################################################################### # 00: Construction of the multidimensional array and the corresponding tensor wrapper # global feval global nsucc global nfail feval = 0 nsucc = 0 nfail = 0 shape = [2, 3, 4, 5] d = len(shape) A = np.arange(np.prod(shape), dtype=float).reshape(shape) W = [npr.rand(s) for s in shape] Aglobal = A.copy() # Used in f in order to test fix_indices A *= reduce(np.multiply, np.ix_(*W)) def f(X, params): global feval feval += 1 return Aglobal[tuple(X)] X = [np.arange(s, dtype=int) for s in shape] TW = TT.TensorWrapper(f, X, params=None, W=W, dtype=A.dtype, marshal_f=False) TW.set_active_weights(True) testn += 1 print_ok(testn, "Construction") nsucc += 1 def test(testn, title, idx): global feval global nsucc global nfail feval = 0 TW.data = {} out = TW[idx] if np.any(A[idx].shape != out.shape) or (not np.allclose( A[idx], out, rtol=1e-10, atol=1e-12)): print_fail(testn, title, msg='Different output - idx: ' + str(idx)) nfail += 1 elif feval != np.prod(np.unique(A[idx]).shape): print_fail(testn, title, msg='Wrong number of function evaluations - idx: ' + str(idx)) nfail += 1 else: print_ok(testn, title) nsucc += 1 ################################################################### # 01: Single address access # idx = (1, 2, 3, 4) feval = 0 out = TW[idx] testn += 1 if not np.isclose(A[idx], out, rtol=1e-10, atol=1e-12): print_fail(testn, "Single address access", msg='Different output') nfail += 1 elif feval != 1: print_fail(testn, "Single address access", msg='Wrong number of function evaluations') nfail += 1 else: print_ok(testn, "Single address access") nsucc += 1 ################################################################### # Storage testn += 1 TW.data = {} TW.store_location = store_location TW[:, :, :, 0] TW.store(force=True) print_ok(testn, "Storage") nsucc += 1 # Reload testn += 1 TW = TT.load(store_location) TW.set_f(f, False) TW.set_active_weights(True) idx = tuple([slice(None, None, None)] * d) test(testn, "Reload", idx) if os.path.isfile(store_location + ".pkl"): os.remove(store_location + ".pkl") if os.path.isfile(store_location + ".h5"): os.remove(store_location + ".h5") if os.path.isfile(store_location + ".pkl.old"): os.remove(store_location + ".pkl.old") if os.path.isfile(store_location + ".h5.old"): os.remove(store_location + ".h5.old") ################################################################### # Single slice # testn += 1 idx = (1, slice(None, None, None), 3, 4) test(testn, "Single slice", idx) ################################################################### # Partial slice # testn += 1 idx = (1, 2, slice(1, 3, 1), 4) test(testn, "Partial slice", idx) ################################################################### # Partial stepping slice # testn += 1 idx = (1, 2, slice(0, 4, 2), 4) test(testn, "Partial stepping slice", idx) ################################################################### # Multiple slice # testn += 1 idx = (1, slice(None, None, None), 3, slice(0, 4, 2)) test(testn, "Multiple slice", idx) ################################################################### # Full slice # testn += 1 idx = tuple([slice(None, None, None)] * len(shape)) test(testn, "Full slice", idx) ################################################################### # List # testn += 1 idx = ([0, 1], [1, 2], [1, 3], [0, 4]) test(testn, "Lists", idx) ################################################################### # Single list # testn += 1 idx = (0, 1, [1, 3], 3) test(testn, "Single list", idx) ################################################################### # Double list # testn += 1 idx = (0, [0, 2], [1, 3], 3) test(testn, "Double list", idx) ################################################################### # Single list slice # testn += 1 idx = (0, [0, 2], slice(None, None, None), 3) test(testn, "Single list slice", idx) testn += 1 idx = (0, slice(None, None, None), [0, 2], 3) test(testn, "Single list slice", idx) testn += 1 idx = (slice(None, None, None), 0, [0, 2, 3], 3) test(testn, "Single list slice", idx) ################################################################### # Double list slice # testn += 1 idx = ([0, 1], slice(None, None, None), [0, 2], 3) test(testn, "Double list slice", idx) testn += 1 idx = (slice(None, None, None), 0, slice(None, None, None), [0, 2, 3]) test(testn, "Double slice list", idx) ################################################################### # Lists slice # testn += 1 idx = ([0, 1], [0, 2], slice(None, None, None), [1, 3]) test(testn, "Lists slice", idx) ################################################################### # Fix indices # testn += 1 fix_idxs = [0, 2] fix_dims = [0, 2] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] * TW.ndim test(testn, "Fix indices", idx) ################################################################### # Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Release indices", idx) ################################################################### # Fix indices 2 # testn += 1 fix_idxs = [0] fix_dims = [0] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] + [(0, 1)] * (TW.ndim - 1) test(testn, "Fix indices - second test", idx) ################################################################### # Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Release indices", idx) ###################################################################################### ###################################################################################### ## Reshape function ## Atmp_shape = A.copy() # Storing original array A = np.reshape(A, [5, 4, 3, 2]) TW.reshape([5, 4, 3, 2]) ################################################################### # Reshaped - Single slice # testn += 1 idx = (4, slice(None, None, None), 2, 1) test(testn, "Reshaped - Single slice", idx) ################################################################### # Reshaped - Partial slice # testn += 1 idx = (4, slice(1, 3, 1), 2, 1) test(testn, "Reshaped - Partial slice", idx) ################################################################### # Reshaped - Partial stepping slice # testn += 1 idx = (4, slice(0, 4, 2), 2, 1) test(testn, "Reshaped - Partial stepping slice", idx) ################################################################### # Reshaped - Multiple slice # testn += 1 idx = (slice(0, 4, 2), 3, slice(None, None, None), 1) test(testn, "Reshaped - Multiple slice", idx) ################################################################### # Reshaped - Full slice # testn += 1 idx = tuple([slice(None, None, None)] * len(A.shape)) test(testn, "Reshaped - Full slice", idx) ################################################################### # Reshaped - List # testn += 1 idx = ([0, 4], [1, 3], [1, 2], [0, 1]) test(testn, "Reshaped - Lists", idx) ################################################################### # Reshaped - Single list # testn += 1 idx = (3, [1, 3], 1, 0) test(testn, "Reshaped - Single list", idx) ################################################################### # Reshaped - Double list # testn += 1 idx = (3, [1, 3], [0, 2], 0) test(testn, "Reshaped - Double list", idx) ################################################################### # Reshaped - Single list slice # testn += 1 idx = (3, slice(None, None, None), [0, 2], 0) test(testn, "Reshaped - Single list slice", idx) testn += 1 idx = (3, [0, 2], slice(None, None, None), 0) test(testn, "Reshaped - Single list slice", idx) testn += 1 idx = (3, [0, 2, 3], 0, slice(None, None, None)) test(testn, "Reshaped - Single list slice", idx) ################################################################### # Reshaped - Double list slice # testn += 1 idx = (3, [0, 2], slice(None, None, None), [0, 1]) test(testn, "Reshaped - Double list slice", idx) testn += 1 idx = (slice(None, None, None), 0, slice(None, None, None), [0, 1]) test(testn, "Reshaped - Double slice list", idx) ################################################################### # Reshaped - Lists slice # testn += 1 idx = ([0, 1], [0, 2], slice(None, None, None), [1, 0]) test(testn, "Reshaped - Lists slice", idx) ################################################################### # Reshaped - Fix indices # testn += 1 fix_idxs = [0, 2] fix_dims = [0, 2] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped - Fix indices", idx) ################################################################### # Reshaped - Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped - Release indices", idx) ################################################################### # Reshaped - Fix indices 2 # testn += 1 fix_idxs = [0] fix_dims = [0] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] + [(0, 1)] * (TW.ndim - 1) test(testn, "Reshaped - Fix indices - second test", idx) ################################################################### # Reshaped - Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped - Release indices", idx) ################################################################### # Reshaped - Restore original shape # testn += 1 A = Atmp_shape.copy() TW.reset_shape() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped - Restore original shape", idx) ## ## Shape restored ############################################################################# ############################################################################# ############################################################################# ############################################################################# ## Reshape function 2 ## shape = [2, 4, 4, 4] d = len(shape) A = np.arange(np.prod(shape), dtype=float).reshape(shape) W = [npr.rand(s) for s in shape] Aglobal = A.copy() # Used in f in order to test fix_indices A *= reduce(np.multiply, np.ix_(*W)) def f(X, params): global feval feval += 1 return Aglobal[tuple(X)] X = [np.arange(s, dtype=int) for s in shape] TW = TT.TensorWrapper(f, X, None, W=W, dtype=A.dtype, marshal_f=False) TW.set_active_weights(True) Atmp_shape = A.copy() # Storing original array newshape = [2] * int(round(np.log2(np.prod(shape)))) A = np.reshape(A, newshape) TW.reshape(newshape) ################################################################### # Reshaped 2 - Single slice # testn += 1 idx = (0, slice(None, None, None), 1, 1, 0, 1, 0) test(testn, "Reshaped 2 - Single slice", idx) ################################################################### # Reshaped 2 - Partial slice # testn += 1 idx = (0, slice(0, 1, 1), 1, 1, 1, 0, 1) test(testn, "Reshaped 2 - Partial slice", idx) ################################################################### # Reshaped 2 - Multiple slice # testn += 1 idx = (slice(0, 1, 1), 0, slice(None, None, None), 1, 1, 0, 1) test(testn, "Reshaped 2 - Multiple slice", idx) ################################################################### # Reshaped 2 - Full slice # testn += 1 idx = tuple([slice(None, None, None)] * len(A.shape)) test(testn, "Reshaped 2 - Full slice", idx) ################################################################### # Reshaped 2 - List # testn += 1 idx = ([1, 0], [0, 1], [1, 0], [0, 1], [0, 0], [1, 1], [0, 1]) test(testn, "Reshaped 2 - Lists", idx) ################################################################### # Reshaped 2 - Single list # testn += 1 idx = (1, [0, 0], 1, 0, 0, 1, 1) test(testn, "Reshaped 2 - Single list", idx) ################################################################### # Reshaped 2 - Double list # testn += 1 idx = (1, [1, 0], [0, 0], 0, 1, 0, 1) test(testn, "Reshaped 2 - Double list", idx) ################################################################### # Reshaped 2 - Single list slice # testn += 1 idx = (1, slice(None, None, None), [0, 1], 0, 0, 1, 1) test(testn, "Reshaped 2 - Single list slice", idx) testn += 1 idx = (1, [0, 0], slice(None, None, None), 0, 1, 1, 1) test(testn, "Reshaped 2 - Single list slice", idx) testn += 1 idx = (1, [0, 1, 1], 0, slice(None, None, None), 1, 0, 1) test(testn, "Reshaped 2 - Single list slice", idx) ################################################################### # Reshaped 2 - Double list slice # testn += 1 idx = (1, [0, 1], slice(None, None, None), [0, 1], 0, 0, 1) test(testn, "Reshaped 2 - Double list slice", idx) testn += 1 idx = (slice(None, None, None), 0, slice(None, None, None), [0, 1], 0, 1, 1) test(testn, "Reshaped 2 - Double slice list", idx) ################################################################### # Reshaped 2 - Lists slice # testn += 1 idx = ([0, 1], [0, 1], slice(None, None, None), [1, 0], [0, 0], [1, 1], [1, 0]) test(testn, "Reshaped 2 - Lists slice", idx) ################################################################### # Reshaped 2 - Fix indices # testn += 1 fix_idxs = [0, 0, 1] fix_dims = [0, 3, 2] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped 2 - Fix indices", idx) ################################################################### # Reshaped 2 - Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped 2 - Release indices", idx) ################################################################### # Reshaped 2 - Fix indices 2 # testn += 1 fix_idxs = [0] fix_dims = [0] Atmp = A.copy() A = A[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] + [(0, 1)] * (TW.ndim - 1) test(testn, "Reshaped 2 - Fix indices - second test", idx) ################################################################### # Reshaped 2 - Release indices # testn += 1 A = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped 2 - Release indices", idx) ################################################################### # Reshaped 2 - Restore original shape # testn += 1 A = Atmp_shape.copy() TW.reset_shape() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped 2 - Restore original shape", idx) ## ## Shape restored ############################################################################# ############################################################################# ############################################################################# ############################################################################# ## PowQ: Check funtionalities for power of Q extension ## shape = [3, 5, 5, 5] Q = 2 qshape = [Q**(int(math.log(s, Q)) + 1) for s in shape] d = len(shape) W = [npr.rand(s) for s in shape] Wtest = [ np.hstack((W[i], np.ones(qshape[i] - shape[i]) * W[i][-1])) for i in range(d) ] # Create A vs = [npr.random(size=shape[i]) for i in range(d)] A = reduce(np.multiply, np.ix_(*vs)) # Create Atest vs_test = [ np.hstack((vs[i], np.ones(qshape[i] - shape[i]) * vs[i][-1])) for i in range(d) ] Atest = reduce(np.multiply, np.ix_(*vs_test)) Atest *= reduce(np.multiply, np.ix_(*Wtest)) Aglobal = A.copy() # Used in f in order to test fix_indices A *= reduce(np.multiply, np.ix_(*W)) def f(X, params): global feval feval += 1 return Aglobal[tuple(X)] def test(testn, title, idx): global feval global nsucc global nfail feval = 0 TW.data = {} out = TW[idx] if np.any(Atest[idx].shape != out.shape) or (not np.allclose( Atest[idx], out, rtol=1e-10, atol=1e-12)): print_fail(testn, title, msg='Different output - idx: ' + str(idx)) nfail += 1 elif feval != np.prod(np.unique(Atest[idx]).shape): print_fail(testn, title, msg='Wrong number of function evaluations - idx: ' + str(idx)) nfail += 1 else: print_ok(testn, title) nsucc += 1 X = [np.arange(s, dtype=int) for s in shape] TW = TT.TensorWrapper(f, X, None, W=W, dtype=A.dtype, marshal_f=False) TW.set_Q(Q) TW.set_active_weights(True) ################################################################### # 01: Single address access # idx = (1, 2, 3, 4) feval = 0 out = TW[idx] testn += 1 if not np.isclose(Atest[idx], out, rtol=1e-10, atol=1e-12): print_fail(testn, "PowQ - Single address access", msg='Different output') nfail += 1 elif feval != 1: print_fail(testn, "PowQ - Single address access", msg='Wrong number of function evaluations') nfail += 1 else: print_ok(testn, "PowQ - Single address access") nsucc += 1 ################################################################### # Storage testn += 1 TW.data = {} TW.store_location = store_location TW[:, :, :, 0] TW.store(force=True) print_ok(testn, "PowQ - Storage") nsucc += 1 # Reload testn += 1 TW = TT.load(store_location) TW.set_f(f, False) TW.set_active_weights(True) idx = tuple([slice(None, None, None)] * d) test(testn, "PowQ - Reload", idx) if os.path.isfile(store_location + ".pkl"): os.remove(store_location + ".pkl") if os.path.isfile(store_location + ".h5"): os.remove(store_location + ".h5") if os.path.isfile(store_location + ".pkl.old"): os.remove(store_location + ".pkl.old") if os.path.isfile(store_location + ".h5.old"): os.remove(store_location + ".h5.old") ################################################################### # Single slice # testn += 1 idx = (1, slice(None, None, None), 3, 4) test(testn, "PowQ - Single slice", idx) ################################################################### # Partial slice # testn += 1 idx = (1, 2, slice(1, 3, 1), 4) test(testn, "PowQ - Partial slice", idx) ################################################################### # Partial stepping slice # testn += 1 idx = (1, 2, slice(0, 4, 2), 4) test(testn, "PowQ - Partial stepping slice", idx) ################################################################### # Multiple slice # testn += 1 idx = (1, slice(None, None, None), 3, slice(0, 4, 2)) test(testn, "PowQ - Multiple slice", idx) ################################################################### # Full slice # testn += 1 idx = tuple([slice(None, None, None)] * len(shape)) test(testn, "PowQ - Full slice", idx) ################################################################### # List # testn += 1 idx = ([0, 1], [1, 2], [1, 3], [0, 4]) test(testn, "PowQ - Lists", idx) ################################################################### # Single list # testn += 1 idx = (0, 1, [1, 3], 3) test(testn, "PowQ - Single list", idx) ################################################################### # Double list # testn += 1 idx = (0, [0, 2], [1, 3], 3) test(testn, "PowQ - Double list", idx) ################################################################### # Single list slice # testn += 1 idx = (0, [0, 2], slice(None, None, None), 3) test(testn, "PowQ - Single list slice", idx) testn += 1 idx = (0, slice(None, None, None), [0, 2], 3) test(testn, "PowQ - Single list slice", idx) testn += 1 idx = (slice(None, None, None), 0, [0, 2, 3], 3) test(testn, "PowQ - Single list slice", idx) ################################################################### # Double list slice # testn += 1 idx = ([0, 1], slice(None, None, None), [0, 2], 3) test(testn, "PowQ - Double list slice", idx) testn += 1 idx = (slice(None, None, None), 0, slice(None, None, None), [0, 2, 3]) test(testn, "PowQ - Double slice list", idx) ################################################################### # Lists slice # testn += 1 idx = ([0, 1], [0, 2], slice(None, None, None), [1, 3]) test(testn, "PowQ - Lists slice", idx) ################################################################### # Fix indices # testn += 1 fix_idxs = [0, 2] fix_dims = [0, 2] Atmp = Atest.copy() Atest = Atest[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] * TW.ndim test(testn, "PowQ - Fix indices", idx) ################################################################### # Release indices # testn += 1 Atest = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "PowQ - Release indices", idx) ################################################################### # Fix indices 2 # testn += 1 fix_idxs = [0] fix_dims = [0] Atmp = Atest.copy() Atest = Atest[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] + [(0, 1)] * (TW.ndim - 1) test(testn, "PowQ - Fix indices - second test", idx) ################################################################### # Release indices # testn += 1 Atest = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "PowQ - Release indices", idx) ############################################################################# ############################################################################# ## Reshaped PowQ: Check funtionalities for power of Q extension ## shape = [3, 5, 5, 5] Q = 2 qshape = [Q**(int(math.log(s, Q)) + 1) for s in shape] d = len(shape) W = [npr.rand(s) for s in shape] Wtest = [ np.hstack((W[i], np.ones(qshape[i] - shape[i]) * W[i][-1])) for i in range(d) ] # Create A vs = [npr.random(size=shape[i]) for i in range(d)] A = reduce(np.multiply, np.ix_(*vs)) # Create Atest vs_test = [ np.hstack((vs[i], np.ones(qshape[i] - shape[i]) * vs[i][-1])) for i in range(d) ] Atest = reduce(np.multiply, np.ix_(*vs_test)) Atest *= reduce(np.multiply, np.ix_(*Wtest)) Aglobal = A.copy() # Used in f in order to test fix_indices A *= reduce(np.multiply, np.ix_(*W)) def f(X, params): global feval feval += 1 return Aglobal[tuple(X)] def test(testn, title, idx): global feval global nsucc global nfail feval = 0 TW.data = {} out = TW[idx] if np.any(Atest[idx].shape != out.shape) or (not np.allclose( Atest[idx], out, rtol=1e-10, atol=1e-12)): print_fail(testn, title, msg='Different output - idx: ' + str(idx)) nfail += 1 elif feval != np.prod(np.unique(Atest[idx]).shape): print_fail(testn, title, msg='Wrong number of function evaluations - idx: ' + str(idx)) nfail += 1 else: print_ok(testn, title) nsucc += 1 X = [np.arange(s, dtype=int) for s in shape] TW = TT.TensorWrapper(f, X, None, W=W, dtype=A.dtype, marshal_f=False) TW.set_Q(Q) TW.set_active_weights(True) Atest_shape = Atest.copy() # Storing original array newshape = [Q] * int(math.log(np.prod(qshape), Q)) Atest = np.reshape(Atest, newshape) TW.reshape(newshape) ################################################################### # Reshaped PowQ - Single slice # testn += 1 idx = (0, slice(None, None, None)) + tuple( [random.randint(0, Q - 1) for i in range(len(newshape) - 2)]) test(testn, "Reshaped PowQ - Single slice", idx) ################################################################### # Reshaped PowQ - Partial slice # testn += 1 idx = (0, slice(0, 1, 1)) + tuple( [random.randint(0, Q - 1) for i in range(len(newshape) - 2)]) test(testn, "Reshaped PowQ - Partial slice", idx) ################################################################### # Reshaped PowQ - Multiple slice # testn += 1 idx = (slice(0, 1, 1), 0, slice(None, None, None)) + tuple( [random.randint(0, Q - 1) for i in range(len(newshape) - 3)]) test(testn, "Reshaped PowQ - Multiple slice", idx) ################################################################### # Reshaped PowQ - Full slice # testn += 1 idx = tuple([slice(None, None, None)] * len(Atest.shape)) test(testn, "Reshaped PowQ - Full slice", idx) ################################################################### # Reshaped PowQ - List # testn += 1 nn = 2 idx = tuple([[random.randint(0, Q - 1) for j in range(nn)] for i in range(len(newshape))]) test(testn, "Reshaped PowQ - Lists", idx) ################################################################### # Reshaped PowQ - Single list # testn += 1 nn = 3 idx = (1, [random.randint(0, Q - 1) for j in range(nn)]) + tuple( [random.randint(0, Q - 1) for i in range(len(newshape) - 2)]) test(testn, "Reshaped PowQ - Single list", idx) ################################################################### # Reshaped PowQ - Double list # testn += 1 nn = 2 idx = (1, [random.randint(0, Q - 1) for j in range(nn)], [ random.randint(0, Q - 1) for j in range(nn) ]) + tuple([random.randint(0, Q - 1) for i in range(len(newshape) - 3)]) test(testn, "Reshaped PowQ - Double list", idx) ################################################################### # Reshaped PowQ - Single list slice # testn += 1 nn = 2 idx = (1, slice(None, None, None), [ random.randint(0, Q - 1) for j in range(nn) ]) + tuple([random.randint(0, Q - 1) for i in range(len(newshape) - 3)]) test(testn, "Reshaped PowQ - Single list slice", idx) testn += 1 nn = 3 idx = (1, [random.randint(0, Q - 1) for j in range(nn)], slice(None, None, None)) + tuple([ random.randint(0, Q - 1) for i in range(len(newshape) - 3) ]) test(testn, "Reshaped PowQ - Single list slice", idx) testn += 1 nn = 3 idx = (1, [random.randint(0, Q - 1) for j in range(nn)], 0, slice(None, None, None)) + tuple([ random.randint(0, Q - 1) for i in range(len(newshape) - 4) ]) test(testn, "Reshaped PowQ - Single list slice", idx) ################################################################### # Reshaped PowQ - Double list slice # testn += 1 nn = 3 idx = (1, [random.randint(0, Q - 1) for j in range(nn)], slice(None, None, None), [random.randint(0, Q - 1) for j in range(nn)]) + tuple( [random.randint(0, Q - 1) for i in range(len(newshape) - 4)]) test(testn, "Reshaped PowQ - Double list slice", idx) testn += 1 nn = 3 idx = (slice(None, None, None), 0, slice(None, None, None), [ random.randint(0, Q - 1) for j in range(nn) ]) + tuple([random.randint(0, Q - 1) for i in range(len(newshape) - 4)]) test(testn, "Reshaped PowQ - Double slice list", idx) ################################################################### # Reshaped PowQ - Lists slice # testn += 1 nn = 3 idx = ([random.randint(0, Q - 1) for j in range(nn) ], [random.randint(0, Q - 1) for j in range(nn)], slice(None, None, None)) + tuple( [[random.randint(0, Q - 1) for j in range(nn)] for i in range(len(newshape) - 3)]) test(testn, "Reshaped PowQ - Lists slice", idx) ################################################################### # Reshaped PowQ - Fix indices # testn += 1 fix_idxs = [0, 0, 1] fix_dims = [0, 3, 2] Atmp = Atest.copy() Atest = Atest[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped PowQ - Fix indices", idx) ################################################################### # Reshaped PowQ - Release indices # testn += 1 Atest = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped PowQ - Release indices", idx) ################################################################### # Reshaped PowQ - Fix indices 2 # testn += 1 fix_idxs = [0] fix_dims = [0] Atmp = Atest.copy() Atest = Atest[tuple([ fix_idxs[fix_dims.index(i)] if (i in fix_dims) else slice(None, None, None) for i in range(d) ])] TW.fix_indices(fix_idxs, fix_dims) idx = [slice(None, None, None)] + [(0, 1)] * (TW.ndim - 1) test(testn, "Reshaped PowQ - Fix indices - second test", idx) ################################################################### # Reshaped PowQ - Release indices # testn += 1 Atest = Atmp.copy() TW.release_indices() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped PowQ - Release indices", idx) ################################################################### # Reshaped PowQ - Restore original shape # testn += 1 Atest = Atest_shape.copy() TW.reset_ghost_shape() idx = [slice(None, None, None)] * TW.ndim test(testn, "Reshaped PowQ - Restore original shape", idx) ## ## Shape restored ############################################################################# ############################################################################# print_summary("Weighted Tensor Wrapper", nsucc, nfail) return (nsucc, nfail)
def RunUnitTests(maxprocs=None, loglev=logging.WARNING): """ Runs all the unit tests. :param int maxprocs: If MPI support is enabled, defines how many processors to use. """ from TensorToolbox.unittests.auxiliary import print_summary nsucc = 0 nfail = 0 (ns, nf) = RunTestTensorWrapper(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestWeightedTensorWrapper(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestTT(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestWTT(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestQTT(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestTTcross(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestTTdmrg(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestWTTdmrg(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestTTdmrgcross(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestQTTdmrg(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestSTTcross_0D(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestSTTcross_2D(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestSTTdmrg_0D(maxprocs, loglev=loglev) nsucc += ns nfail += nf # (ns,nf) = RunTestSTTdmrg_2D(maxprocs,loglev=loglev) # Need to fix restarting (ns, nf) = RunTestSTTdmrgcross_0D(maxprocs, loglev=loglev) nsucc += ns nfail += nf (ns, nf) = RunTestSQTTdmrg_0D(maxprocs, loglev=loglev) nsucc += ns nfail += nf print_summary("TT ALL", nsucc, nfail) if nfail > 0: sys.exit(1) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): # Test Weighted TT. Weights are uniform. logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 N = 16 d = 3 nrows = [N for i in range(d)] ncols = [N for i in range(d)] D = np.diag(-np.ones((N-1)),-1) + np.diag(-np.ones((N-1)),1) + np.diag(2*np.ones((N)),0) I = np.eye(N) Dd = np.zeros((N**d,N**d)) for i in range(d): tmp = np.array([1]) for j in range(d): if i != j: tmp = np.kron(tmp,I) else: tmp = np.kron(tmp,D) Dd += tmp if PLOTTING: plt.figure() plt.spy(Dd) plt.show(block=False) idxs = [range(N) for i in range(d)] MI = list(itertools.product(*idxs)) # Multi indices # Canonical form of n-dimentional Laplace operator D_flat = D.flatten() I_flat = I.flatten() # CP = np.empty((d,d,N**2),dtype=np.float64) CPtmp = [] # U[i][alpha,k] = U_i(alpha,k) for i in range(d): CPi = np.empty((d,N**2)) for alpha in range(d): if i != alpha: CPi[alpha,:] = I_flat else: CPi[alpha,:] = D_flat CPtmp.append(CPi) CP = DT.Candecomp(CPtmp) # Let's compare Dd[i,j] with its Canonical counterpart T_idx = (10,9) # Index in the tensor product repr. idxs = np.vstack( (np.asarray(MI[T_idx[0]]), np.asarray(MI[T_idx[1]])) ) # row 1 contains row multi-idx, row 2 contains col multi-idx for Tensor # Now if we take the columns of idxs we get the multi-indices for the CP. # Since in CP we flattened the array, compute the corresponding indices for CP. CP_idxs = idxs[0,:]*N + idxs[1,:] TT = DT.TTmat(CP,nrows=N,ncols=N) TT.build() if np.abs(Dd[T_idx[0],T_idx[1]] - CP[CP_idxs]) < 100.*np.spacing(1) and np.abs(CP[CP_idxs] - TT[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < 100.*np.spacing(1): print_ok("0.1 Weighted Tensor Test: Entry comparison (pre-rounding) FULL, CP, TT") nsucc += 1 else: print_fail("0.1 Weighted Tensor Test: Entry comparison FULL, CP, TT") nfail += 1 # print(" T CP TT") # print("%.5f %.5f %.5f" % (Dd[T_idx[0],T_idx[1]],CP[CP_idxs],TT[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])) # print("Space Tensor: %d" % np.prod(Dd.shape)) # print("Space CP: %d" % CP.size()) # print("Space TT: %d" % TT.size()) ######################################## # Multi-Linear Algebra ######################################## # Sum by scalar CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))]) W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.] TTa = DT.WTTvec(CPa,W) TTa.build(1e-13) TTb = TTa + 3. if np.abs(TTb[3,3,3] - 8.) < 1e-12: print_ok("0.2 Weighted Tensor Test: TT sum by scalar") nsucc += 1 else: print_fail("0.2 Weighted Tensor Test: TT sum by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],8.)) nfail += 1 # Diff by scalar CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))]) W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.] TTa = DT.WTTvec(CPa,W) TTa.build(1e-13) TTb = TTa - 3. if np.abs(TTb[3,3,3] - 2.) < 1e-12: print_ok("0.2 Weighted Tensor Test: TT diff by scalar") nsucc += 1 else: print_fail("0.2 Weighted Tensor Test: TT diff by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],2.)) nfail += 1 # Mul by scalar CPa = DT.Candecomp([5.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))]) W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.] TTa = DT.WTTvec(CPa,W) TTa.build(1e-13) TTb = TTa * 3. if np.abs(TTb[3,3,3] - 15.) < 1e-12: print_ok("0.2 Weighted Tensor Test: TT mul by scalar") nsucc += 1 else: print_fail("0.2 Weighted Tensor Test: TT mul by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],15.)) nfail += 1 # Div by scalar CPa = DT.Candecomp([15.*np.ones((1,5)),np.ones((1,6)),np.ones((1,7))]) W = [ np.ones(5)/5., np.ones(6)/6., np.ones(7)/7.] TTa = DT.WTTvec(CPa,W) TTa.build(1e-13) TTb = TTa / 3. if np.abs(TTb[3,3,3] - 5.) < 1e-12: print_ok("0.2 Weighted Tensor Test: TT div by scalar") nsucc += 1 else: print_fail("0.2 Weighted Tensor Test: TT div by scalar", "TT[idx] + b = %e, Expected = %e" % (TTb[3,3,3],5.)) nfail += 1 # Sum C = TT + TT if C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - 2. * Dd[T_idx[0],T_idx[1]] <= 2e2 * np.spacing(1): print_ok("0.2 Weighted Tensor Test: TT sum") nsucc += 1 else: print_fail("0.2 Weighted Tensor Test: TT sum", "TT[idx] + TT[idx] = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) nfail += 1 C = TT * TT if C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - Dd[T_idx[0],T_idx[1]]**2. <= 10.*np.spacing(1): print_ok("0.3 Weighted Tensor Test: TT mul") nsucc += 1 else: print_fail("0.3 Weighted Tensor Test: TT mul", "TT[idx] * TT[idx] = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) nfail += 1 # C *= (C+TT) # if C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] == Dd[T_idx[0],T_idx[1]]**2. * (Dd[T_idx[0],T_idx[1]]**2.+Dd[T_idx[0],T_idx[1]]): # print_ok("0.4 Weighted Tensor Test: TT operations") # else: # print_fail("0.4 Weighted Tensor Test: TT operations", "(TT*TT)*(TT*TT+TT) = %.5f" % C[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) if np.abs(npla.norm(Dd,ord='fro')-mla.norm(TT,ord='fro')) < TT.size() * 100.*np.spacing(1): print_ok("0.5 Weighted Tensor Test: Frobenius norm (pre-rounding) FULL, TT") nsucc += 1 else: print_fail("0.5 Weighted Tensor Test: Frobenius norm (pre-rounding) FULL, TT", " T TT\n"\ "Frobenius norm %.5f %.5f" % (npla.norm(Dd,ord='fro'), DT.norm(TT,ord='fro'))) nfail += 1 ####################################### # Check TT-SVD ####################################### # Contruct tensor form of Dd Dd_flat = np.zeros((N**(2*d))) for i in range(d): tmp = np.array([1]) for j in range(d): if i != j: tmp = np.kron(tmp,I_flat) else: tmp = np.kron(tmp,D_flat) Dd_flat += tmp Dd_tensor= Dd_flat.reshape([N**2 for j in range(d)]) TT_tensor = TT.to_tensor() # From Dd_tensor obtain a TT representation with accuracy eps eps = 0.001 TT_svd = DT.TTmat(Dd_tensor,nrows=N,ncols=N) TT_svd.build(eps=eps) if np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1): print_ok("0.6 Weighted Tensor Test: Entry comparison FULL, TT-svd") nsucc += 1 else: print_fail("0.6 Weighted Tensor Test: Entry comparison FULL, TT-svd"," T - TT-svd = %e" % np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])) nfail += 1 Dd_norm = npla.norm(Dd,ord='fro') TT_svd_norm = mla.norm(TT_svd,ord='fro') if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm: print_ok("0.6 Weighted Tensor Test: Frobenius norm FULL, TT-svd") nsucc += 1 else: print_fail("0.6 Weighted Tensor Test: Frobenius norm FULL, TT-svd", " T TT_svd\n"\ "Frobenius norm %.5f %.5f" % (npla.norm(Dd,ord='fro'), mla.norm(TT_svd,ord='fro'))) nfail += 1 ####################################### # Check TT-SVD with kron prod ####################################### # Contruct tensor form of Dd Dd = np.zeros((N**d,N**d)) for i in range(d): tmp = np.array([1]) for j in range(d): if i != j: tmp = np.kron(tmp,I) else: tmp = np.kron(tmp,D) Dd += tmp Dd_tensor = DT.matkron_to_mattensor(Dd,[N for i in range(d)],[N for i in range(d)]) TT_tensor = TT.to_tensor() # From Dd_tensor obtain a TT representation with accuracy eps eps = 0.001 TT_svd = DT.TTmat(Dd_tensor,nrows=N,ncols=N) TT_svd.build(eps=eps) if np.abs(Dd[T_idx[0],T_idx[1]] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1): print_ok("0.7 Weighted Tensor Test: Entry comparison FULL, TT-svd-kron") nsucc += 1 else: print_fail("0.7 Weighted Tensor Test: Entry comparison FULL, TT-svd-kron", " T - TT-svd = %e" % np.abs(Dd[T_idx[0],T_idx[1]]-TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])) nfail += 1 Dd_norm = npla.norm(Dd,ord='fro') TT_svd_norm = mla.norm(TT_svd,ord='fro') if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm: print_ok("0.7 Weighted Tensor Test: Frobenius norm FULL, TT-svd-kron") nsucc += 1 else: print_fail("0.7 Weighted Tensor Test: Frobenius norm FULL, TT-svd-kron", " T TT_svd\n"\ "Frobenius norm %.5f %.5f" % (npla.norm(Dd,ord='fro'), mla.norm(TT_svd,ord='fro'))) nfail += 1 ####################################### # Check TT-rounding ####################################### TT_round = TT.copy() eps = 0.001 TT_round.rounding(eps) if np.abs(TT_round[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])]) < d * 100.*np.spacing(1): print_ok("0.8 Weighted Tensor Test: Entry comparison (post-rounding) TT-svd, TT-round") nsucc += 1 else: print_fail("0.8 Weighted Tensor Test: Entry comparison (post-rounding) TT-svd, TT-round", " T-svd - TT-round = %e" % np.abs(TT_svd[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])] - TT_round[DT.idxfold(nrows,T_idx[0]),DT.idxfold(ncols,T_idx[1])])) nfail += 1 Dd_norm = npla.norm(Dd,ord='fro') TT_svd_norm = mla.norm(TT_svd,ord='fro') TT_round_norm = mla.norm(TT_round,ord='fro') if np.abs(Dd_norm - TT_svd_norm) < eps * Dd_norm and np.abs(TT_svd_norm - TT_round_norm) < eps * Dd_norm: print_ok("0.8 Weighted Tensor Test: Frobenius norm (post-rounding) FULL, TT-svd, TT-round") nsucc += 1 else: print_fail("0.8 Weighted Tensor Test: Frobenius norm (post-rounding) FULL, TT-svd, TT-round", " T TT_svd TT_rounding\n"\ "Frobenius norm %.5f %.5f %.5f" % (Dd_norm, TT_svd_norm, TT_round_norm)) nfail += 1 print_summary("WTT Algebra", nsucc, nfail) return (nsucc,nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) import numpy as np import numpy.linalg as npla import itertools import time import TensorToolbox as DT import TensorToolbox.multilinalg as mla if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 ##################################################################################### # Test matrix-vector product by computing the matrix-vector product ##################################################################################### span = np.array([0.,1.]) d = 2 N = 16 h = 1/float(N-1) eps = 1e-10 # sys.stdout.write("Matrix-vector: Laplace N=%4d , d=%3d [START] \n" % (N,d)) # sys.stdout.flush() # Construct 2D Laplace (with 2nd order finite diff) D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) ) D[0,0:3] = np.array([1./(3.*h**2.),-2./(3.*h**2.),1./(3.*h**2.)]) D[-1,-3:] = -np.array([1./(3.*h**2.),-2./(3.*h**2.),1./(3.*h**2.)]) I = np.eye(N) FULL_LAP = np.zeros((N**d,N**d)) for i in range(d): tmp = np.array([[1.]]) for j in range(d): if i != j: tmp = np.kron(tmp,I) else: tmp = np.kron(tmp,D) FULL_LAP += tmp # Construction of TT Laplace operator CPtmp = [] # D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) ) # I = np.eye(N) D_flat = D.flatten() I_flat = I.flatten() for i in range(d): CPi = np.empty((d,N**2)) for alpha in range(d): if i != alpha: CPi[alpha,:] = I_flat else: CPi[alpha,:] = D_flat CPtmp.append(CPi) CP_lap = DT.Candecomp(CPtmp) TT_LAP = DT.TTmat(CP_lap,nrows=N,ncols=N) TT_LAP.build(eps) TT_LAP.rounding(eps) CPtmp = None CP_lap = None # Construct input vector X = np.linspace(span[0],span[1],N) SIN = np.sin(X) I = np.ones((N)) FULL_SIN = np.zeros((N**d)) for i in range(d): tmp = np.array([1.]) for j in range(d): if i != j: tmp = np.kron(tmp,I) else: tmp = np.kron(tmp,SIN) FULL_SIN += tmp if PLOTTING: from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm (XX,YY) = np.meshgrid(X,X) fig = plt.figure() if d == 2: # Plot function ax = fig.add_subplot(221,projection='3d') ax.plot_surface(XX,YY,FULL_SIN.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Construct TT input vector CPtmp = [] for i in range(d): CPi = np.empty((d,N)) for alpha in range(d): if i != alpha: CPi[alpha,:] = I else: CPi[alpha,:] = SIN CPtmp.append(CPi) CP_SIN = DT.Candecomp(CPtmp) TT_SIN = DT.TTvec(CP_SIN) TT_SIN.build() TT_SIN.rounding(eps) if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(222,projection='3d') ax.plot_surface(XX,YY,TT_SIN.to_tensor(),rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Apply full laplacian FULL_RES = np.dot(FULL_LAP,FULL_SIN) if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(223,projection='3d') ax.plot_surface(XX,YY,FULL_RES.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Apply TT laplacian TT_RES = mla.dot(TT_LAP,TT_SIN) if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(224,projection='3d') ax.plot_surface(XX,YY,TT_RES.to_tensor(),rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # Check results if not np.allclose(FULL_RES,TT_RES.to_tensor().flatten()): print_fail("2.1 Matrix-vector: Laplace N=%4d , d=%3d" % (N,d)) nfail += 1 else: print_ok("2.1 Matrix-vector: Laplace N=%4d , d=%3d" % (N,d)) nsucc += 1 ##################################################################################### # Test matrix-vector product by computing the matrix-vector product of randomly generated input ##################################################################################### span = np.array([0.,1.]) d = 3 nrows = [16,20,24] ncols = [16,12,14] if isinstance(nrows,int): nrows = [nrows for i in range(d)] if isinstance(ncols,int): ncols = [ncols for i in range(d)] eps = 1e-10 # sys.stdout.write("Matrix-vector: Random\n nrows=[%s],\n ncols=[%s], d=%3d [START] \n" % (','.join(map(str,nrows)),','.join(map(str,ncols)),d)) # sys.stdout.flush() # Construction of TT random matrix TT_RAND = DT.randmat(d,nrows,ncols) # Construct FULL random tensor FULL_RAND = TT_RAND.to_tensor() import itertools rowcol = list(itertools.chain(*[[ri,ci] for (ri,ci) in zip(nrows,ncols)])) FULL_RAND = np.reshape(FULL_RAND,rowcol) idxswap = list(range(0,2*d,2)) idxswap.extend(range(1,2*d,2)) FULL_RAND = np.transpose(FULL_RAND,axes=idxswap) FULL_RAND = np.reshape(FULL_RAND,(np.prod(nrows),np.prod(ncols))) # Construct TT random vector TT_VEC = DT.randvec(d,ncols) # Construct FULL random vector FULL_VEC = TT_VEC.to_tensor().flatten() # Apply TT TT_RES = mla.dot(TT_RAND,TT_VEC) # Apply FULL FULL_RES = np.dot(FULL_RAND,FULL_VEC) # Check results if not np.allclose(FULL_RES,TT_RES.to_tensor().flatten()): print_fail("2.2 Matrix-vector: Random N=%4d , d=%3d" % (N,d),'') nfail += 1 else: print_ok("2.2 Matrix-vector: Random N=%4d , d=%3d" % (N,d)) nsucc += 1 print_summary("TT Matrix-Vector", nsucc, nfail) return (nsucc,nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): import numpy.linalg as npla logging.basicConfig(level=loglev) if PLOTTING: from matplotlib import pyplot as plt nsucc = 0 nfail = 0 ################ TEST 1 ########################### # Test folding/unfolding index function sys.stdout.write("Test folding/unfolding index function\r") sys.stdout.flush() dlist = (4, 2, 8) base = 2 dfold = [base for i in range(int(np.log(np.prod(dlist)) / np.log(base)))] A = np.arange(64).reshape(dlist) Aflat = A.flatten() Arsh = A.reshape(dfold) test = True err = [] for i in range(dlist[0]): for j in range(dlist[1]): for k in range(dlist[2]): idxs = (i, j, k) val = (A[idxs] == Aflat[DT.idxunfold(dlist, idxs)] and A[idxs] == Arsh[DT.idxfold( dfold, DT.idxunfold(dlist, idxs))]) if not val: err.append(idxs) test = False if test: print_ok("Test folding/unfolding index function") nsucc += 1 else: print_fail("Test folding/unfolding index function") nfail += 1 ################ TEST 2.1 ########################### # Test exponential N-dimensional vector (2^6 points) sys.stdout.write("Test exponential N-dimensional vector (2^6 points)\r") sys.stdout.flush() z = 2. q = 2 L = 6 N = q**L X = z**np.arange(N) TT = DT.QTTvec(X) TT.build() if TT.ranks() == [1 for i in range(L + 1)]: print_ok("Test exponential N-dimensional vector (2^6 points)") nsucc += 1 else: print_fail("Test exponential N-dimensional vector (2^6 points)") nfail += 1 ################ TEST 2.1b ########################### # Test exponential N-dimensional vector (28 points) sys.stdout.write("Test exponential N-dimensional vector (28 points)\r") sys.stdout.flush() z = 2. N = 28 X = z**np.arange(N) eps = 1e-6 TT = DT.QTTvec(X) TT.build(eps) L2err = npla.norm(TT.to_tensor() - X) if L2err <= eps: print_ok("Test exponential N-dimensional vector (28 points)") nsucc += 1 else: print_fail( "Test exponential N-dimensional vector (28 points): L2err=%e" % L2err) nfail += 1 ################ TEST 2.2 ########################### # Test sum of exponential N-dimensional vector sys.stdout.write("Test sum of exponential N-dimensional vector\r") sys.stdout.flush() import numpy.random as npr R = 3 z = npr.rand(R) c = npr.rand(R) q = 2 L = 8 N = q**L X = np.dot(c, np.tile(z, (N, 1)).T**np.tile(np.arange(N), (R, 1))) TT = DT.QTTvec(X) TT.build() if np.max(TT.ranks()) <= R: print_ok("Test sum of exponential N-dimensional vector") nsucc += 1 else: print_fail("Test sum of exponential N-dimensional vector") nfail += 1 ################ TEST 2.3 ########################### # Test sum of trigonometric N-dimensional vector sys.stdout.write("Test sum of trigonometric N-dimensional vector\r") sys.stdout.flush() import numpy.random as npr R = 3 a = npr.rand(R) c = npr.rand(R) q = 2 L = 8 N = q**L X = np.dot(c, np.sin(np.tile(z, (N, 1)).T * np.tile(np.arange(N), (R, 1)))) TT = DT.QTTvec(X) TT.build() if np.max(TT.ranks()) <= 2 * R: print_ok("Test sum of trigonometric N-dimensional vector") nsucc += 1 else: print_fail("Test sum of trigonometric N-dimensional vector") nfail += 1 ################ TEST 2.4 ########################### # Test sum of exponential-trigonometric N-dimensional vector sys.stdout.write( "Test sum of exponential-trigonometric N-dimensional vector\r") sys.stdout.flush() import numpy.random as npr R = 3 a = npr.rand(R) z = npr.rand(R) c = npr.rand(R) q = 2 L = 8 N = q**L X1 = np.tile(z, (N, 1)).T**np.tile(np.arange(N), (R, 1)) X2 = np.sin(np.tile(z, (N, 1)).T * np.tile(np.arange(N), (R, 1))) X = np.dot(c, X1 * X2) TT = DT.QTTvec(X) TT.build() if np.max(TT.ranks()) <= 2 * R: print_ok("Test sum of exponential-trigonometric N-dimensional vector") nsucc += 1 else: print_fail( "Test sum of exponential-trigonometric N-dimensional vector") nfail += 1 ################ TEST 2.4 ########################### # Test sum of exponential-trigonometric N-dimensional vector sys.stdout.write("Test Chebyshev polynomial vector\r") sys.stdout.flush() from SpectralToolbox import Spectral1D as S1D P = S1D.Poly1D(S1D.JACOBI, [-0.5, -0.5]) q = 2 L = 8 N = q**L (x, w) = P.GaussQuadrature(N - 1) X = P.GradEvaluate(x, N - 1, 0).flatten() TT = DT.QTTvec(X) TT.build() if np.max(TT.ranks()) <= 2: print_ok("Test Chebyshev polynomial vector") nsucc += 1 else: print_fail("Test Chebyshev polynomial vector") nfail += 1 ################ TEST 2.5 ########################### # Test N-dimensional vector sys.stdout.write("Test generic polynomial equidistant vector\r") sys.stdout.flush() from SpectralToolbox import Spectral1D as S1D import numpy.random as npr R = 100 c = npr.rand(R + 1) - 0.5 q = 2 L = 16 N = q**L x = np.linspace(-1, 1, N) X = np.dot(c, np.tile(x, (R + 1, 1))**np.tile(np.arange(R + 1), (N, 1)).T) TT = DT.QTTvec(X) TT.build(eps=1e-6) if np.max(TT.ranks()) <= R + 1: print_ok("Test generic polynomial (ord=%d) equidistant vector" % R) nsucc += 1 else: print_fail("Test generic polynomial (ord=%d) equidistant vector" % R) nfail += 1 ################ TEST 2.6 ########################### # Test N-dimensional vector sys.stdout.write("Test 1/(1+25x^2) Cheb vector\r") sys.stdout.flush() TT_eps = 1e-6 from SpectralToolbox import Spectral1D as S1D P = S1D.Poly1D(S1D.JACOBI, [-0.5, -0.5]) q = 2 L = 16 N = q**L (x, w) = P.GaussQuadrature(N - 1) X = 1. / (1. + 25. * x**2.) TT = DT.QTTvec(X) TT.build(eps=1e-6) import numpy.linalg as npla V = P.GradVandermonde1D(x, 60, 0) (xhat, res, rnk, s) = npla.lstsq(V, X) # Polynomial approximation is better print_ok( "Test 1/(1+25x^2) Cheb vector: Max-rank = %d, Size = %d, Poly-int res = %e" % (np.max(TT.ranks()), TT.size(), res)) nsucc += 1 # ################ TEST 2.7 ########################### # # Test discontinuos function N-dimensional vector # sys.stdout.write("Test discontinuous vector\r") # sys.stdout.flush() # TT_eps = 1e-6 # from SpectralToolbox import Spectral1D as S1D # P = S1D.Poly1D(S1D.JACOBI,[-0.5,-0.5]) # q = 2 # L = 16 # N = q**L # (x,w) = P.GaussQuadrature(N-1) # X = (x<-0.1).astype(float) - (x>0.1).astype(float) # TT = DT.QTTvec(X,q,eps=1e-6) # import numpy.linalg as npla # V = P.GradVandermonde1D(x,TT.size(),0) # (xhat,res,rnk,s) = npla.lstsq(V,X) # Polynomial approximation is better # print_ok("Test discontinuous vector: Max-rank = %d, Size = %d, Eps = %e, Poly-int res = %e" % (np.max(TT.ranks()),TT.size(),TT_eps,res)) ################# TEST 3.1 ########################## # Test d-dimensional Laplace operator # Scaling of storage for d-dimensional Laplace operator: # 1) Full tensor product: N^(2d) # 2) Sparse tensor product: ~ (3N)^d # 3) QTT format: 1D -> max-rank = 3: ~ 3*4*3*log2(N) # dD -> max-rank = 4: ~ d*4*4*4*log2(N) d = 4 span = np.array([0., 1.]) q = 2 L = 5 N = q**L h = 1 / float(N - 1) TT_round = 1e-13 D = -1. / h**2. * (np.diag(np.ones( (N - 1)), -1) + np.diag(np.ones( (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0)) #D[0,0:2] = np.array([1.,0.]) #D[-1,-2:] = np.array([0.,1.]) D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N) TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N) TT_D.build(eps=TT_round) I = np.eye(N) I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N) TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N) TT_I.build(eps=TT_round) tt_list = [] for i in range(d): if i == 0: tmp = TT_D.copy() else: tmp = TT_I.copy() for j in range(1, d): if i == j: tmp.kron(TT_D) else: tmp.kron(TT_I) tt_list.append(tmp) TT_Dxy = np.sum(tt_list).rounding(TT_round) if d == 2 and N <= 8: sys.stdout.write("Test 2-dimensional laplace from kron of 1D QTTmat\r") sys.stdout.flush() Dd = np.zeros((N**d, N**d)) for i in range(d): tmp = np.array([1]) for j in range(d): if i != j: tmp = np.kron(tmp, I) else: tmp = np.kron(tmp, D) Dd += tmp # Check equality with Dd nrows = [N for i in range(d)] ncols = [N for i in range(d)] err = [] test = True for i in range(N**d): for j in range(N**d): sys.stdout.write("i = %d, j = %d \r" % (i, j)) sys.stdout.flush() if np.abs(Dd[i, j] - TT_Dxy[DT.idxfold(nrows, i), DT.idxfold(ncols, j)]) > TT_round: err.append((i, j)) test = False if test: print_ok("Test 2-dimensional laplace from kron of 1D QTTmat") nsucc += 1 else: print_fail("Test 2-dimensional laplace from kron of 1D QTTmat") nfail += 1 ################# TEST 3.2 ###################################### # Test 2-dimensional Laplace operator from full tensor product if d == 2 and N <= 8: sys.stdout.write("Test 2-dimensional laplace from full kron product\r") sys.stdout.flush() Dd = np.zeros((N**d, N**d)) for i in range(d): tmp = np.array([1]) for j in range(d): if i != j: tmp = np.kron(tmp, I) else: tmp = np.kron(tmp, D) Dd += tmp Dd_tensor = DT.matkron_to_mattensor(Dd, nrows=[N for i in range(d)], ncols=[N for i in range(d)]) TT_Dxykron = DT.QTTmat(Dd_tensor, base=q, nrows=[N for i in range(d)], ncols=[N for i in range(d)]) TT_Dxykron.build() # Check equality with Dd nrows = [N for i in range(d)] ncols = [N for i in range(d)] err = [] test = True for i in range(N**d): for j in range(N**d): sys.stdout.write("i = %d, j = %d \r" % (i, j)) sys.stdout.flush() if np.abs(Dd[i, j] - TT_Dxykron[DT.idxfold(nrows, i), DT.idxfold(ncols, j)]) > TT_round: err.append((i, j)) test = False if test: print_ok("Test 2-dimensional laplace from full kron product") nsucc += 1 else: print_fail("Test 2-dimensional laplace from full kron product") nfail += 1 ################# TEST 4.0 ######################################### # Solve the d-dimensional Dirichlet-Poisson equation using full matrices # Use Conjugate-Gradient method d = 3 span = np.array([0., 1.]) q = 2 L = 4 N = q**L h = 1 / float(N - 1) X = np.linspace(span[0], span[1], N) eps_cg = 1e-13 sys.stdout.write("%d-dim Dirichlet-Poisson problem FULL with CG\r" % d) sys.stdout.flush() try: # Construct d-D Laplace (with 2nd order finite diff) D = -1. / h**2. * (np.diag(np.ones( (N - 1)), -1) + np.diag(np.ones( (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0)) D[0, 0:2] = np.array([1., 0.]) D[-1, -2:] = np.array([0., 1.]) D_sp = sp.coo_matrix(D) I_sp = sp.identity(N) I = np.eye(N) FULL_LAP = sp.coo_matrix((N**d, N**d)) for i in range(d): tmp = sp.identity((1)) for j in range(d): if i != j: tmp = sp.kron(tmp, I_sp) else: tmp = sp.kron(tmp, D_sp) FULL_LAP = FULL_LAP + tmp except MemoryError: print("FULL CG: Memory Error") dofull = False # Construct Right hand-side (b=1, Dirichlet BC = 0) b1D = np.ones(N) b1D[0] = 0. b1D[-1] = 0. tmp = np.array([1.]) for j in range(d): tmp = np.kron(tmp, b1D) FULL_b = tmp # Solve full system using npla.solve (FULL_RES, FULL_CONV) = spla.cg(FULL_LAP, FULL_b, tol=eps_cg) if PLOTTING and d == 2: from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm X = np.linspace(span[0], span[1], N) (XX, YY) = np.meshgrid(X, X) fig = plt.figure(figsize=(14, 10)) ################# TEST 4.1 ######################################### # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and QTTvec # Use Conjugate-Gradient method sys.stdout.write( "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG\r" % d) sys.stdout.flush() TT_round = 1e-8 eps_cg = 1e-3 # Laplace operator D = -1. / h**2. * (np.diag(np.ones( (N - 1)), -1) + np.diag(np.ones( (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0)) D[0, 0:2] = np.array([1., 0.]) D[-1, -2:] = np.array([0., 1.]) D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N) TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N) TT_D.build(eps=TT_round) I = np.eye(N) I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N) TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N) TT_I.build(eps=TT_round) tt_list = [] for i in range(d): if i == 0: tmp = TT_D.copy() else: tmp = TT_I.copy() for j in range(1, d): if i == j: tmp.kron(TT_D) else: tmp.kron(TT_I) tt_list.append(tmp) TT_Dxy = np.sum(tt_list).rounding(TT_round) # Right hand side b1D = np.ones(N) b1D[0] = 0. b1D[-1] = 0. B = np.array([1.]) for j in range(d): B = np.kron(B, b1D) B = np.reshape(B, [N for i in range(d)]) TT_B = DT.QTTvec(B) TT_B.build(TT_round) # Solve QTT cg x0 = DT.QTTzerosvec(d=d, N=N, base=q) cg_start = time.clock() (TT_RES, TT_conv, TT_info) = mla.cg(TT_Dxy, TT_B, x0=x0, eps=eps_cg, ext_info=True, eps_round=TT_round) cg_stop = time.clock() L2err = mla.norm( TT_RES.to_tensor().reshape([N for i in range(d)]) - FULL_RES.reshape([N for i in range(d)]), 'fro') if L2err < eps_cg: print_ok( "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG [PASSED] Time: %.10f\n" % (d, cg_stop - cg_start)) nsucc += 1 else: print_fail( "%d-dim Dirichlet-Poisson problem QTTmat,QTTvec with CG [FAILED] L2err: %.e\n" % (d, L2err)) nfail += 1 if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(321, projection='3d') ax.plot_surface(XX, YY, TT_RES.to_tensor().reshape((N, N)), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax = fig.add_subplot(322, projection='3d') ax.plot_surface(XX, YY, np.abs(TT_RES.to_tensor().reshape((N, N)) - FULL_RES.reshape((N, N))), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) ################# TEST 4.2 ######################################### # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and np.ndarray # Use Conjugate-Gradient method sys.stdout.write( "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG\r" % d) sys.stdout.flush() TT_round = 1e-8 eps_cg = 1e-3 # Laplace operator D = -1. / h**2. * (np.diag(np.ones( (N - 1)), -1) + np.diag(np.ones( (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0)) D[0, 0:2] = np.array([1., 0.]) D[-1, -2:] = np.array([0., 1.]) D_tensor = DT.matkron_to_mattensor(D, nrows=N, ncols=N) TT_D = DT.QTTmat(D_tensor, base=q, nrows=N, ncols=N) TT_D.build(eps=TT_round) I = np.eye(N) I_tensor = DT.matkron_to_mattensor(I, nrows=N, ncols=N) TT_I = DT.QTTmat(I_tensor, base=q, nrows=N, ncols=N) TT_I.build(eps=TT_round) tt_list = [] for i in range(d): if i == 0: tmp = TT_D.copy() else: tmp = TT_I.copy() for j in range(1, d): if i == j: tmp.kron(TT_D) else: tmp.kron(TT_I) tt_list.append(tmp) TT_Dxy = np.sum(tt_list).rounding(TT_round) # Right hand side b1D = np.ones(N) b1D[0] = 0. b1D[-1] = 0. B = np.array([1.]) for j in range(d): B = np.kron(B, b1D) B = np.reshape(B, [N for i in range(d)]) B = np.reshape(B, [q for i in range(d * L)]) # Solve QTT cg x0 = np.zeros([q for i in range(d * L)]) cg_start = time.clock() (ARR_RES, TT_conv, TT_info1) = mla.cg(TT_Dxy, B, x0=x0, eps=eps_cg, ext_info=True, eps_round=TT_round) cg_stop = time.clock() L2err = mla.norm( ARR_RES.reshape([N for i in range(d)]) - FULL_RES.reshape([N for i in range(d)]), 'fro') if L2err < eps_cg: print_ok( "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG [PASSED] Time: %.10f" % (d, cg_stop - cg_start)) nsucc += 1 else: print_fail( "%d-dim Dirichlet-Poisson problem QTTmat,ndarray with CG [FAILED] L2err: %.e" % (d, L2err)) nfail += 1 if PLOTTING and d == 2: # Plot function ax = fig.add_subplot(323, projection='3d') ax.plot_surface(XX, YY, ARR_RES.reshape((N, N)), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax = fig.add_subplot(324, projection='3d') ax.plot_surface( XX, YY, np.abs(ARR_RES.reshape((N, N)) - FULL_RES.reshape((N, N))), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.show(block=False) # ################# TEST 4.3 ######################################### # # Solve the 2-dimensional Dirichlet-Poisson equation using QTTmat and np.ndarray # # Use Preconditioned Conjugate-Gradient method # sys.stdout.write("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG\r" % d) # sys.stdout.flush() # TT_round = 1e-8 # eps_cg = 1e-3 # # Laplace operator # D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) ) # D[0,0:2] = np.array([1.,0.]) # D[-1,-2:] = np.array([0.,1.]) # D_tensor = DT.matkron_to_mattensor(D,nrows=N,ncols=N) # TT_D = DT.QTTmat(D_tensor, base=q,nrows=N,ncols=N,eps=TT_round) # I = np.eye(N) # I_tensor = DT.matkron_to_mattensor(I,nrows=N,ncols=N) # TT_I = DT.QTTmat(I_tensor,base=q,nrows=N,ncols=N,eps=TT_round) # tt_list = [] # for i in range(d): # if i == 0: tmp = TT_D.copy() # else: tmp = TT_I.copy() # for j in range(1,d): # if i == j: tmp.kron(TT_D) # else: tmp.kron(TT_I) # tt_list.append(tmp) # TT_Dxy = np.sum(tt_list).rounding(TT_round) # # Construct Preconditioner using Newton-iterations # TT_II = TT_I.copy() # for j in range(1,d): TT_II.kron(TT_I) # alpha = 1e-6 # TT_Pround = 1e-4 # TT_P = alpha*TT_II # eps = mla.norm(TT_II-mla.dot(TT_Dxy,TT_P),'fro')/mla.norm(TT_II,'fro') # i = 0 # while eps > 5.*1e-1: # i += 1 # TT_P = (2. * TT_P - mla.dot(TT_P,mla.dot(TT_Dxy,TT_P).rounding(TT_Pround)).rounding(TT_Pround)).rounding(TT_Pround) # eps = mla.norm(TT_II-mla.dot(TT_Dxy,TT_P),'fro')/mla.norm(TT_II,'fro') # sys.stdout.write("\033[K") # sys.stdout.write("Prec: err=%e, iter=%d\r" % (eps,i)) # sys.stdout.flush() # # Right hand side # b1D = np.ones(N) # b1D[0] = 0. # b1D[-1] = 0. # B = np.array([1.]) # for j in range(d): # B = np.kron(B,b1D) # B = np.reshape(B,[N for i in range(d)]) # B = np.reshape(B,[q for i in range(d*L)]) # # Solve QTT cg # x0 = np.zeros([q for i in range(d*L)]) # # Precondition # TT_DP = mla.dot(TT_P,TT_Dxy).rounding(TT_round) # BP = mla.dot(TT_P,B) # cg_start = time.clock() # (ARR_RES,TT_conv,TT_info) = mla.cg(TT_DP,BP,x0=x0,eps=eps_cg,ext_info=True,eps_round=TT_round) # cg_stop = time.clock() # L2err = mla.norm(ARR_RES.reshape([N for i in range(d)])-FULL_RES.reshape([N for i in range(d)]), 'fro') # if L2err < eps_cg: # print_ok("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG [PASSED] Time: %.10f" % (d, cg_stop-cg_start)) # else: # print_fail("%d-dim Dirichlet-Poisson problem QTTmat,ndarray with Prec-CG [FAILED] L2err: %.e" % (d,L2err)) # if PLOTTING and d == 2: # # Plot function # ax = fig.add_subplot(325,projection='3d') # ax.plot_surface(XX,YY,ARR_RES.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm, # linewidth=0, antialiased=False) # ax = fig.add_subplot(326,projection='3d') # ax.plot_surface(XX,YY,np.abs(ARR_RES.reshape((N,N))-FULL_RES.reshape((N,N))),rstride=1, cstride=1, cmap=cm.coolwarm, # linewidth=0, antialiased=False) # plt.show(block=False) print_summary("QTT", nsucc, nfail) return (nsucc, nfail)
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING): logging.basicConfig(level=loglev) if PLOTTING: import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D nsucc = 0 nfail = 0 #### # exp(- |X-X0|^2/2*l^2) Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 2 size = (32, 32) # 1024 points # Build up the 2d tensor wrapper X0 = np.array([0.2, 0.2]) l = 0.05 params = {'X0': X0, 'l': l} def f(X, params): return np.exp(-np.sum((X - params['X0'])**2.) / (2 * params['l']**2.)) X = [np.linspace(0, 1., size[0]), np.linspace(0, 1., size[1])] TW = DT.TensorWrapper(f, X, params) # Compute low rank approx TTapprox = DT.QTTvec(TW) TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (32x32) - (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nsucc += 1 else: print_fail( 'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (32x32) - (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nfail += 1 if PLOTTING: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) last_idxs = TTapprox.get_ttdmrg_eval_idxs() plt.figure() plt.imshow(A.astype(float), origin='lower') plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'wo') plt.plot(last_idxs[:, 0], last_idxs[:, 1], 'ro') plt.title("exp(- |X-X0|^2/2*l^2) - 32x32") plt.show(block=False) #### # exp(- |X-X0|^2/2*l^2) Low Rank Approximation (Not power of 2) #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 2 size = (54, 54) # Build up the 2d tensor wrapper X0 = np.array([0.2, 0.2]) l = 0.05 params = {'X0': X0, 'l': l} def f(X, params): return np.exp(-np.sum((X - params['X0'])**2.) / (2 * params['l']**2.)) X = [np.linspace(0, 1., size[0]), np.linspace(0, 1., size[1])] TW = DT.TensorWrapper(f, X, params) # Compute low rank approx TTapprox = DT.QTTvec(TW) TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (54x54) - (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nsucc += 1 else: print_fail( 'QTTdmrg: exp(- |X-X0|^2/2*l^2) Low Rank Approx - (54x54) - (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nfail += 1 if PLOTTING: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) last_idxs = TTapprox.get_ttdmrg_eval_idxs() plt.figure() plt.imshow(A.astype(float), origin='lower') plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'wo') plt.plot(last_idxs[:, 0], last_idxs[:, 1], 'ro') plt.title("exp(- |X-X0|^2/2*l^2) - 54x54") plt.show(block=False) #### # 1./(x+y+1) Low Rank Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 2 size = (33, 33) # 1024 points # Build up the 2d tensor wrapper def f(X, params): return 1. / (X[0] + X[1] + 1.) X = [ np.linspace(0, 2 * np.pi, size[0]), np.linspace(0, 2 * np.pi, size[1]) ] TW = DT.TensorWrapper(f, X, None) # Compute low rank approx TTapprox = DT.QTTvec(TW) TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nsucc += 1 else: print_fail( 'QTTdmrg: 1./(x+y+1) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nfail += 1 if PLOTTING: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) plt.figure() plt.plot(fill_idxs[:, 0], fill_idxs[:, 1], 'o') plt.title("1./(x+y+1) - 33x33") plt.show(block=False) #### # Sin(sum(x)) TTcross Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 3 size = [33] * d # Build up the tensor wrapper # def f(X,params): return np.sin( X[0] ) * np.sin(X[1]) def f(X, params): return np.sin(np.sum(X)) # def f(X,params): return 1./( np.sum(X) + 1 ) X = [np.linspace(0, 2 * np.pi, size[0])] * d TW = DT.TensorWrapper(f, X) # Compute low rank approx TTapprox = DT.QTTvec(TW) TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'QTTdmrg: sin(sum(x)) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nsucc += 1 else: print_fail( 'QTTdmrg: sin(sum(x)) Low Rank Approx (FroErr=%e, Fill=%.2f%%)' % (FroErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nfail += 1 if PLOTTING and d == 3: # Get filled idxs fill_idxs = np.array(TW.get_fill_idxs()) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(fill_idxs[:, 0], fill_idxs[:, 1], fill_idxs[:, 2]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') plt.title("Sin(sum(x)) - %s" % str(size)) # Get last used idxs last_idxs = TTapprox.get_ttdmrg_eval_idxs() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(last_idxs[:, 0], last_idxs[:, 1], last_idxs[:, 2], c='r') plt.title("Sin(sum(x)) - %s" % str(size)) plt.show(block=False) #### # 1/(sum(x)+1) QTTdmrg Approximation #### maxvoleps = 1e-5 delta = 1e-5 eps = 1e-10 d = 5 size = [8] * d # Build up the 2d tensor wrapper def f(X, params): return 1. / (np.sum(X) + 1.) X = [np.linspace(0, 1, size[i]) for i in range(len(size))] TW = DT.TensorWrapper(f, X) # Compute low rank approx TTapprox = DT.QTTvec(TW) TTapprox.build(method='ttdmrg', eps=eps, mv_eps=maxvoleps) fill = TW.get_fill_level() crossRanks = TTapprox.ranks() PassedRanks = all( map(operator.eq, crossRanks[1:-1], TTapprox.ranks()[1:-1])) A = TW.copy()[tuple([slice(None, None, None) for i in range(len(size))])] FroErr = mla.norm(TTapprox.to_tensor() - A, 'fro') MaxErr = np.max(TTapprox.to_tensor() - A) kappa = np.max(A) / np.min( A) # This is slightly off with respect to the analysis r = np.max(TTapprox.ranks()) epsTarget = (2. * r + kappa * r + 1.)**(np.log2(d)) * (r + 1.) * eps if FroErr < epsTarget: print_ok( 'QTTdmrg: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, MaxErr=%e, Fill=%.2f%%)' % (d, FroErr, MaxErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nsucc += 1 else: print_fail( 'QTTdmrg: 1/(sum(x)+1), d=%d, Low Rank Approx (FroErr=%e, FroErr=%e, Fill=%.2f%%)' % (d, FroErr, MaxErr, 100. * np.float(fill) / np.float(TW.get_global_size()))) nfail += 1 print_summary("QTTdmrg", nsucc, nfail) return (nsucc, nfail)