Ejemplo n.º 1
0
def main(args):
    name = os.path.basename(args.tensor)
    name = name.split('.')[0]
    with open(args.tensor, 'rb') as f:
        data = pickle.load(f).T
        filters, channels, cols, rows = data.shape
        channel_data = []
        col_data = []
        row_data = []
        for d in data:
            core, U = tucker.hooi(dtensor(d.T), [1, 1, 1], init='nvecs')
            core = np.squeeze(core)
            channel_data.append((core * U[2]).reshape(1, 1,
                                                      channels))  # channels
            col_data.append(U[1].reshape(1, cols))  # cols
            row_data.append(U[0].reshape(rows, 1))  # rows

        channel_params = np.stack(channel_data, axis=-1)
        print(channel_params.shape)
        col_params = np.expand_dims(np.stack(col_data, axis=-1), axis=-1)
        print(col_params.shape)
        row_params = np.expand_dims(np.stack(row_data, axis=-1), axis=-1)
        print(row_params.shape)

    path = os.path.join(args.dest_dir, name + '_d.params')
    with open(path, 'w+') as f:
        pickle.dump(channel_params, f)

    path = os.path.join(args.dest_dir, name + '_h.params')
    with open(path, 'w+') as f:
        pickle.dump(col_params, f)

    path = os.path.join(args.dest_dir, name + '_v.params')
    with open(path, 'w+') as f:
        pickle.dump(row_params, f)
Ejemplo n.º 2
0
def test_new():
    sz = (10, 23, 5)
    A = np.random.randn(*sz)
    T = dtensor(A)
    assert A.ndim == T.ndim
    assert A.shape == T.shape
    assert (A == T).all()
    assert (T == A).all()
Ejemplo n.º 3
0
 def as_dtensor(self):
     sz = self.original_tensor_size
     order = np.concatenate((self.row_indices, self.column_indices))
     order = order.tolist()
     data = self.data.reshape(get_elements_at(sz, order))
     # transpose + argsort(order) equals ipermute
     data = data.transpose(np.argsort(order))
     return dtensor(data)
def test_new():
    sz = (10, 23, 5)
    A = randn(*sz)
    T = dtensor(A)
    assert_equal(A.ndim, T.ndim)
    assert_equal(A.shape, T.shape)
    assert_true((A == T).all())
    assert_true((T == A).all())
Ejemplo n.º 5
0
def test_new():
    sz = (10, 23, 5)
    A = randn(*sz)
    T = dtensor(A)
    assert_equal(A.ndim, T.ndim)
    assert_equal(A.shape, T.shape)
    assert_true((A == T).all())
    assert_true((T == A).all())
Ejemplo n.º 6
0
def test_new():
    sz = (10, 23, 5)
    A = randn(*sz)
    T = dtensor(A)
    assert A.ndim == T.ndim
    assert A.shape == T.shape
    assert (A == T).all()
    assert (T == A).all()
Ejemplo n.º 7
0
 def impute(self):
     time_s = time.time()
     est_data = self.miss_data.copy()
     SD = dtensor(est_data)
     core1, U1 = tucker.hooi(SD, self.ranks, init='nvecs')
     ttm_data = core1.ttm(U1[0], 0).ttm(U1[1], 1).ttm(U1[2], 2)
     self.est_data = self.W * est_data + (self.W == False) * ttm_data
     time_e = time.time()
     self.exec_time = time_e - time_s
Ejemplo n.º 8
0
def obtainCompressedCSV(readingsNormalized):
    """ Compresses teh sensor data """
    T_in = dtensor(readingsNormalized.reshape(necessary, 3, 3))
    T_out, fit, itr, _ = als(T_in, 3)
    final_Compressed = "\n".join(
        [",".join([str(k) for k in n]) for n in T_out.U[0]] +
        [",".join([str(k) for k in n]) for n in T_out.U[1]] +
        [",".join([str(k) for k in n])
         for n in T_out.U[2]] + [",".join([str(n) for n in T_out.lmbda])])
    return final_Compressed
Ejemplo n.º 9
0
    def totensor(self):
        """
        Converts a ktensor into a dense tensor

        Returns
        -------
        arr : dtensor
            Fully computed multidimensional array whose shape matches
            the original ktensor.
        """
        return dtensor(self.toarray())
Ejemplo n.º 10
0
    def totensor(self):
        """
        Converts a ktensor into a dense tensor

        Returns
        -------
        arr : dtensor
            Fully computed multidimensional array whose shape matches
            the original ktensor.
        """
        return dtensor(self.toarray())
Ejemplo n.º 11
0
def halrtc_cpt(sparse_data, lou, conv_thre, K, W, alpha=[0, 0, 1]):
    #ori_speeddata = scio.loadmat('../GZ_data/speed_tensor.mat')['tensor']
    #ori_speeddata, ori_W = tc.deal_orimiss(ori_speeddata, False)
    time_s = time.time()
    X = sparse_data.copy()
    Y = {}
    N = len(np.shape(X))
    W1 = (W == False)
    M = {}
    T_temp = X.copy()
    #alpha = [1/4,1/4,1/4,1/4] if N==4 else [0,0,1]
    for _ in range(N):
        M[_] = dtensor(np.zeros(np.shape(X)))
        Y[_] = dtensor(np.zeros(np.shape(X)))
    for iter in range(K):
        X_pre = X.copy()
        T_temp_pre = T_temp.copy()
        for i in range(N):
            SD = dtensor(X_pre)
            Matrix = SD.unfold(i) + 1 / lou * (Y[i].unfold(i))

            U, sigma, VT = np.linalg.svd(Matrix, 0)
            row_s = len(sigma)
            mat_sig = np.zeros((row_s, row_s))
            for ii in range(row_s):
                mat_sig[ii, ii] = max(sigma[ii] - alpha[i] / lou, 0)
            M[i] = (np.dot(np.dot(U, mat_sig), VT[:row_s, :])).fold()

        T_temp = (np.sum([M[j] - 1 / lou * Y[j]
                          for j in range(N)], axis=0)) / N
        X[W1] = T_temp[W1]
        X_Fnorm = np.sum((X - X_pre)**2)
        if X_Fnorm < conv_thre:
            break
        for i in range(N):
            Y[i] -= lou * (M[i] - X)
    time_e = time.time()
    #print('-'*8+'halrtc'+'-'*8)
    #print('exec_time:'+str(time_e-time_s)+'s')
    print('iter:', iter)
    return X
Ejemplo n.º 12
0
    def impute(self):
        time_s = time.time()
        alpha = self.alpha_vec
        lou = self.lou
        X = self.miss_data.copy()
        Y, M = {}, {}
        N = len(X.shape)
        W1 = (self.W == False)
        T_temp = X.copy()

        for _ in range(N):
            M[_] = dtensor(np.zeros_like(X))
            Y[_] = dtensor(np.zeros_like(X))

        for _ in range(self.max_iter):
            X_pre = X.copy()
            for i in range(N):
                SD = dtensor(X_pre)
                Matrix = SD.unfold(i) + 1 / lou * (Y[i].unfold(i))

                U, sigma, VT = np.linalg.svd(Matrix, 0)
                row_s = len(sigma)
                mat_sig = np.zeros((row_s, row_s))
                for ii in range(row_s):
                    mat_sig[ii, ii] = max(sigma[ii] - alpha[i] / lou, 0)
                M[i] = (np.dot(np.dot(U, mat_sig), VT[:row_s, :])).fold()

            T_temp = (np.sum([M[j] - 1 / lou * Y[j]
                              for j in range(N)], axis=0)) / N
            X[W1] = T_temp[W1]
            X_Fnorm = np.sum((X - X_pre)**2)
            if X_Fnorm < self.threshold:
                break
            for i in range(N):
                Y[i] -= lou * (M[i] - X)

        time_e = time.time()
        self.exec_time = time_e - time_s
        self.est_data = X
        return X
Ejemplo n.º 13
0
def cp_cpt(sparse_data, rank, W):
    time_s = time.time()
    est_data = sparse_data.copy()
    dshape = np.shape(est_data)
    SD = dtensor(sparse_data.copy())
    U = []
    P, fit, itr, arr = cp.als(SD, rank)
    loc_data = P.totensor()
    est_data = W * est_data + (W == False) * loc_data
    time_e = time.time()
    print('-' * 8 + 'cp' + '-' * 8)
    print('exec_time:' + str(time_e - time_s) + 's')
    return est_data
Ejemplo n.º 14
0
def multi_tucker(sparse_data, rates, W):
    SD = dtensor(sparse_data)
    est_dict = {}
    for rate in rates:
        rank_set = [0, 0, 0]
        for i in range(3):
            U, sigma, VT = scipy.linalg.svd(SD.unfold(i), 0)
            for r in range(len(sigma)):
                if sum(sigma[:r]) / sum(sigma) > rate:
                    rank_set[i] = r
                    break
        print(rank_set)
        est_dict[rate] = tucker_cpt(sparse_data, rank_set, W)
    return est_dict
Ejemplo n.º 15
0
def lrtc_cpt(sparse_data, alpha, beta, gama, conv_thre, K, W):
    time_s = time.time()
    Y = sparse_data.copy()
    N = len(np.shape(sparse_data))
    X = Y.copy()
    normY = np.sum(Y**2)**0.5
    M = {}
    MX, MY, M_fold = {}, {}, {}
    for iter in range(K):
        Y_pre = Y.copy()
        for n in range(N):

            MX[n] = dtensor(X).unfold(n)
            MY[n] = dtensor(Y).unfold(n)
            M_temp = (alpha[n] * MX[n] + beta[n] * MY[n]) / (alpha[n] +
                                                             beta[n])
            para_fi = gama[n] / (alpha[n] + beta[n])
            U, sigma, VT = np.linalg.svd(M_temp, full_matrices=0)
            row_s = len(sigma)
            mat_sig = np.zeros((row_s, row_s))
            max_rank = 0
            for ii in range(row_s):
                mat_sig[ii, ii] = max(sigma[ii] - para_fi, 0)
            M[n] = np.dot(np.dot(U[:, :row_s], mat_sig), VT[:row_s, :])
            M_fold[n] = M[n].fold()
        X = np.sum([alpha[i] * M_fold[i]
                    for i in range(N)], axis=0) / sum(alpha)
        Y_temp = np.sum([beta[i] * M_fold[i]
                         for i in range(N)], axis=0) / sum(beta)
        Y[W == False] = Y_temp[W == False]
        Y_Fnorm = np.sum((Y - Y_pre)**2)
        if Y_Fnorm < conv_thre:
            break
    time_e = time.time()
    print('-' * 8 + 'lrtc' + '-' * 8)
    print('exec_time:' + str(time_e - time_s) + 's')
    return Y
Ejemplo n.º 16
0
def test_factorization():
    I, J, K, rank = 10, 20, 75, 5
    A = orthomax(randn(I, rank))
    B = orthomax(randn(J, rank))
    C = orthomax(randn(K, rank))

    core_real = dtensor(randn(rank, rank, rank))
    T = core_real.ttm([A, B, C])
    core, U = tucker_hooi.tucker_hooi(T, rank)

    assert_true(allclose(T, ttm(core, U)))
    assert_true(allclose(A, orthomax(U[0])))
    assert_true(allclose(B, orthomax(U[1])))
    assert_true(allclose(B, orthomax(U[2])))
    assert_true(allclose(core_real, core))
Ejemplo n.º 17
0
def disabled_test_factorization():
    I, J, K, rank = 10, 20, 75, 5
    A = orthomax(np.random.randn(I, rank))
    B = orthomax(np.random.randn(J, rank))
    C = orthomax(np.random.randn(K, rank))

    core_real = dtensor(np.random.randn(rank, rank, rank))
    T = core_real.ttm([A, B, C])
    core, U = tucker.hooi(T, rank)

    assert np.allclose(T, ttm(core, U))
    assert np.allclose(A, orthomax(U[0]))
    assert np.allclose(B, orthomax(U[1]))
    assert np.allclose(C, orthomax(U[2]))
    assert np.allclose(core_real, core)
Ejemplo n.º 18
0
def T_SVD(Atensor, p):
    SD = dtensor(Atensor.copy())
    N = len(np.shape(SD))
    U_list, r_list = [], []
    SG = []
    for i in range(N):
        B = SD.unfold(i)
        U, sigma, VT = scipy.linalg.svd(B, 0)
        row_s = len(sigma)
        mat_sig = np.zeros((row_s, row_s))
        for j in range(row_s):
            mat_sig[j, j] = sigma[j]
            if sum(sigma[:j]) / sum(sigma) > p:
                SG.append(sigma[j])
                break

        U_list.append(U[:, :j])
        r_list.append(j)
    return SG, j, U_list, r_list
Ejemplo n.º 19
0
def STD_cpt(sparse_data, W, threshold=1e-4, alpha=2e-10, lm=0.01, p=0.7):
    ds = sparse_data.shape
    X_ori = sparse_data.copy()
    U_list, r_list = T_SVD(X_ori, p)[-2:]
    core, U_list = tucker.hooi(dtensor(X_ori), r_list, init='nvecs')
    [A, B, C] = U_list
    #core = dtensor(X_ori).ttm(A.T, 0).ttm(B.T, 1).ttm(C.T, 2)
    X = core.ttm(A, 0).ttm(B, 1).ttm(C, 2)
    #print(np.linalg.norm(X-X_ori))
    #return
    Upre_list = U_list
    F_diff = sys.maxsize
    iter = 0
    while F_diff > threshold and iter < 500:
        X_pre = X.copy()
        #print('Xpre_norm',np.linalg.norm(X_pre))
        # Upre_list = []
        # for u in U_list:
        #     Upre_list.append(u.copy())
        core_pre = core.copy()
        E = W * (X_ori - core_pre.ttm(Upre_list[0], 0).ttm(
            Upre_list[1], 1).ttm(Upre_list[2], 2))
        for i in range(X.ndim):
            mul1 = (W * E).unfold(i)
            if i == 0:
                mul2 = np.kron(Upre_list[2], Upre_list[1])
            elif i == 1:
                mul2 = np.kron(Upre_list[2], Upre_list[0])
            else:
                mul2 = np.kron(Upre_list[1], Upre_list[0])
            mul3 = core_pre.unfold(i).T
            Upre_list[i] = (1 - alpha * lm) * Upre_list[i] + alpha * np.dot(
                np.dot(mul1, mul2), mul3)
            #print(np.dot(mul1,mul2))
        Temp = E.ttm(Upre_list[0].T, 0).ttm(Upre_list[1].T,
                                            1).ttm(Upre_list[2].T, 2)
        core = (1 - alpha * lm) * core_pre + alpha * Temp
        X = core.ttm(Upre_list[0], 0).ttm(Upre_list[1], 1).ttm(Upre_list[2], 2)
        F_diff = np.linalg.norm(X - X_pre)
        #break
        iter += 1
    return X
Ejemplo n.º 20
0
def tucker_cpt(sparse_data, rank_list, W):
    time_s = time.time()
    est_data = sparse_data.copy()
    dshape = np.shape(est_data)
    SD = dtensor(est_data)
    #U = tucker.hosvd(SD,rank_list)
    core1, U1 = tucker.hooi(SD, rank_list, init='nvecs')
    #print('mean,var',np.mean(core1.unfold(0)),core1.var)
    #print('U_mean',(U1[0]==0).max())
    left1 = SD.unfold(0)
    U_1, sigma, VT = np.linalg.svd(left1, 0)
    #print(np.sum(U1[0]-U_1))
    #ttm:����˷�
    ttm_data = core1.ttm(U1[0], 0).ttm(U1[1], 1).ttm(U1[2], 2)
    print(np.linalg.norm(ttm_data - sparse_data))
    #print(np.mean(ttm_data))
    est_data = W * est_data + (W == False) * ttm_data
    time_e = time.time()
    print('-' * 8 + 'tucker' + '-' * 8)
    print('exec_time:' + str(time_e - time_s) + 's')
    return est_data
Ejemplo n.º 21
0
    def impute(self):
        time_s = time.time()
        X_ori = self.miss_data.copy()
        core, U_list = tucker.hooi(dtensor(X_ori), self.ranks, init='nvecs')
        X = self.restruct(core, U_list)

        F_diff = sys.maxsize
        iter = 0
        while iter < self.max_iter:
            F_diff_pre = F_diff
            X_pre = X.copy()
            core_pre = core.copy()
            E = self.W * (X_ori - self.restruct(core_pre, U_list))
            for i in range(X.ndim):
                mul1 = (self.W * E).unfold(i)
                if i == 0:
                    mul2 = np.kron(U_list[2], U_list[1])
                elif i == 1:
                    mul2 = np.kron(U_list[2], U_list[0])
                else:
                    mul2 = np.kron(U_list[1], U_list[0])

                mul3 = core_pre.unfold(i).T
                U_list[i] = (1 - self.alpha * self.lam) * U_list[i] + \
                    self.alpha * np.dot(np.dot(mul1, mul2), mul3)

            core_temp = self.restruct(E, U_list, transpose=True)
            core = (1 - self.alpha * self.lam) * \
                core_pre + self.alpha * core_temp
            X = self.restruct(core, U_list)
            F_diff = np.linalg.norm(X - X_pre)
            # print('STD:', F_diff)
            if abs(F_diff - F_diff_pre) < self.threshold:
                break

            iter += 1
        time_e = time.time()
        self.exec_time = time_e - time_s
        self.est_data = X
        return X
Ejemplo n.º 22
0
def test_dense_fold():
    X = dtensor(T)
    I, J, K = T.shape
    X1 = X[:, :, 0]
    X2 = X[:, :, 1]

    U = X.unfold(0)
    assert_equal((3, 8), U.shape)
    for j in range(J):
        assert_true((U[:, j] == X1[:, j]).all())
        assert_true((U[:, j + J] == X2[:, j]).all())

    U = X.unfold(1)
    assert_equal((4, 6), U.shape)
    for i in range(I):
        assert_true((U[:, i] == X1[i, :]).all())
        assert_true((U[:, i + I] == X2[i, :]).all())

    U = X.unfold(2)
    assert_equal((2, 12), U.shape)
    for k in range(U.shape[1]):
        assert_true((U[:, k] == array([X1.flatten('F')[k], X2.flatten('F')[k]])).all())
Ejemplo n.º 23
0
def test_unfold():
    Td = dtensor(zeros(shape, dtype=np.float32))
    Td[subs] = vals

    for i in range(len(shape)):
        rdims = [i]
        cdims = setdiff1d(range(len(shape)), rdims)[::-1]
        Md = Td.unfold(i)

        T = sptensor(subs, vals, shape, accumfun=lambda l: l[-1])

        Ms = T.unfold(rdims, cdims)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))

        Ms = T.unfold(rdims)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))

        Md = Md.T
        Ms = T.unfold(rdims, cdims, transp=True)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))
Ejemplo n.º 24
0
def test_unfold():
    Td = dtensor(zeros(shape, dtype=np.float32))
    Td[subs] = vals

    for i in range(len(shape)):
        rdims = [i]
        cdims = setdiff1d(range(len(shape)), rdims)[::-1]
        Md = Td.unfold(i)

        T = sptensor(subs, vals, shape, accumfun=lambda l: l[-1])

        Ms = T.unfold(rdims, cdims)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))

        Ms = T.unfold(rdims)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))

        Md = Md.T
        Ms = T.unfold(rdims, cdims, transp=True)
        assert_equal(Md.shape, Ms.shape)
        assert_true((allclose(Md, Ms.toarray())))
Ejemplo n.º 25
0
def test_dense_fold(T):
    X = dtensor(T)
    I, J, K = T.shape
    X1 = X[:, :, 0]
    X2 = X[:, :, 1]

    U = X.unfold(0)
    assert (3, 8) == U.shape
    for j in range(J):
        assert (U[:, j] == X1[:, j]).all()
        assert (U[:, j + J] == X2[:, j]).all()

    U = X.unfold(1)
    assert (4, 6) == U.shape
    for i in range(I):
        assert (U[:, i] == X1[i, :]).all()
        assert (U[:, i + I] == X2[i, :]).all()

    U = X.unfold(2)
    assert (2, 12) == U.shape
    for k in range(U.shape[1]):
        assert (U[:, k] == np.array([X1.flatten('F')[k],
                                     X2.flatten('F')[k]])).all()
def test_dense_fold():
    X = dtensor(T)
    I, J, K = T.shape
    X1 = X[:, :, 0]
    X2 = X[:, :, 1]

    U = X.unfold(0)
    assert_equal((3, 8), U.shape)
    for j in range(J):
        assert_true((U[:, j] == X1[:, j]).all())
        assert_true((U[:, j + J] == X2[:, j]).all())

    U = X.unfold(1)
    assert_equal((4, 6), U.shape)
    for i in range(I):
        assert_true((U[:, i] == X1[i, :]).all())
        assert_true((U[:, i + I] == X2[i, :]).all())

    U = X.unfold(2)
    assert_equal((2, 12), U.shape)
    for k in range(U.shape[1]):
        assert_true((U[:, k] == array([X1.flatten('F')[k],
                                       X2.flatten('F')[k]])).all())
Ejemplo n.º 27
0
    def truncated_svd(self):
        SD = dtensor(self.data.copy())
        N = len(SD.shape)
        U_list = []  # left singluar matrix list
        r_list = []  # rank list
        SG = []  # singular value list
        for i in range(N):
            B = SD.unfold(i)
            U, sigma, _ = scipy.linalg.svd(B, 0)
            row_s = len(sigma)
            mat_sig = scipy.zeros((row_s, row_s))
            for j in range(row_s):
                mat_sig[j, j] = sigma[j]
                if sum(sigma[:j]) / sum(sigma) > self.truncate_rate:
                    SG.append(sigma[j])
                    break

            U_list.append(U[:, :j])
            r_list.append(j)

        self.SV_list = SG
        self.LSM_list = U_list
        self.rank_list = r_list
        return SG, U_list, r_list
Ejemplo n.º 28
0
def silrtc_cpt(sparse_data, alpha, beta, conv_thre, K, W):
    time_s = time.time()
    X = sparse_data.copy()
    M = {}
    N = len(np.shape(X))
    for iter in range(K):
        X_pre = X.copy()
        for i in range(N):
            para_fi = alpha[i] / beta[i]
            U, sigma, VT = np.linalg.svd(dtensor(X).unfold(i), full_matrices=0)
            row_s = len(sigma)
            mat_sig = np.zeros((row_s, row_s))
            for ii in range(row_s):
                mat_sig[ii, ii] = max(sigma[ii] - para_fi, 0)
            M[i] = (np.dot(np.dot(U, mat_sig), VT[:row_s, :])).fold()
        X_temp = np.sum([beta[j] * M[j] for j in range(N)], axis=0) / sum(beta)
        X[W == False] = X_temp[W == False]
        X_diffnorm = np.sum((X - X_pre)**2)
        if X_diffnorm < conv_thre:
            break
    time_e = time.time()
    print('-' * 8 + 'silrtc' + '-' * 8)
    print('exec_time:' + str(time_e - time_s) + 's')
    return X
Ejemplo n.º 29
0
def test_dtensor_ttm(T, Y, U):
    X = dtensor(T)
    Y2 = X.ttm(U, 0)
    assert (2, 4, 2) == Y2.shape
    assert (Y == Y2).all()
Ejemplo n.º 30
0
def test_dtensor_ttm():
    X = dtensor(T)
    Y2 = X.ttm(U, 0)
    assert_equal((2, 4, 2), Y2.shape)
    assert_true((Y == Y2).all())
Ejemplo n.º 31
0
def test_dtensor_fold_unfold():
    sz = (10, 35, 3, 12)
    X = dtensor(randn(*sz))
    for i in range(4):
        U = X.unfold(i).fold()
        assert_true((X == U).all())
Ejemplo n.º 32
0
def test_dtensor_ttm(T, Y, U):
    X = dtensor(T)
    Y2 = X.ttm(U, 0)
    assert (2, 4, 2) == Y2.shape
    assert (Y == Y2).all()
Ejemplo n.º 33
0
print(SP)
A,B,C = miss_pos[0].tolist(),miss_pos[1].tolist(),miss_pos[2].tolist()
dim1_miss = set(A)
Ndata = np.zeros((SP[0]-len(dim1_miss),SP[1],SP[2]))
Zdata = np.zeros((len(dim1_miss),SP[1],SP[2]))
j,k = 0,0
for i in range(SP[0]):
    if i not in dim1_miss:
        Ndata[j,:,:] = Vdata[i,:,:]
        j += 1
    else:
        Zdata[k,:,:] = Vdata[i,:,:]
        k += 1
time_s = time.time()
S = Vdata.copy()
SD = dtensor(Vdata)
T_ = SD.unfold(1)
print(time.time()-time_s,'s')
SVD_ = np.linalg.svd(T_)
print(time.time()-time_s,'s')
sys.exit()
Y = S
X = Y.copy()
M = {}
MX,MY,M_fold = {},{},{}
n = 1
if 1:
            MX[n] = dtensor(X).unfold(n)
            MY[n] = dtensor(Y).unfold(n)
            print(time.time()-time_s,'s')
            M_temp = (0.1*MX[n]+0.1*MY[n])/0.2
def test_dtensor_ttm():
    X = dtensor(T)
    Y2 = X.ttm(U, 0)
    assert_equal((2, 4, 2), Y2.shape)
    assert_true((Y == Y2).all())
def test_dtensor_fold_unfold():
    sz = (10, 35, 3, 12)
    X = dtensor(randn(*sz))
    for i in range(4):
        U = X.unfold(i).fold()
        assert_true((X == U).all())