def test1():
    A = ttensor.ttensor(tensor.tenrands([2, 3, 4]), [numpy.random.random([10, 2]), numpy.random.random([30, 3]), numpy.random.random([40, 4])]).totensor()
    [a, b] = dta(A, [1, 2, 3])
    print a
    print b
    Core = numpy.arange(24).reshape([2, 3, 4])
    # Core = numpy.array([[1,3,5],[2,4,6]] , [[7,9,11],[8,10,12]])
    u1 = numpy.array([[1, 2], [3, 4]])
    u2 = numpy.array([[0, 1, 0], [1, 0, 1], [1, 1, 1]])
    u3 = numpy.array([[1, 1, 1, 1], [1, 2, 3, 4], [1, 1, 1, 1]])
    tt = ttensor.ttensor(tensor.tensor(Core), [u1, u2, u3])

    print tt
    [a, b] = dta(tt.totensor(), [1, 2, 3])
    print a
    print a.totensor()
    print b
def test1():
    A = ttensor.ttensor(tensor.tenrands([2, 3, 4]), [
        numpy.random.random([10, 2]),
        numpy.random.random([30, 3]),
        numpy.random.random([40, 4])
    ]).totensor()
    [a, b] = dta(A, [1, 2, 3])
    print a
    print b
    Core = numpy.arange(24).reshape([2, 3, 4])
    # Core = numpy.array([[1,3,5],[2,4,6]] , [[7,9,11],[8,10,12]])
    u1 = numpy.array([[1, 2], [3, 4]])
    u2 = numpy.array([[0, 1, 0], [1, 0, 1], [1, 1, 1]])
    u3 = numpy.array([[1, 1, 1, 1], [1, 2, 3, 4], [1, 1, 1, 1]])
    tt = ttensor.ttensor(tensor.tensor(Core), [u1, u2, u3])

    print tt
    [a, b] = dta(tt.totensor(), [1, 2, 3])
    print a
    print a.totensor()
    print b
def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    """Dynamic Tensor Analysis"""
    # number of order of the input tensor.
    order = new_tensor.ndims()

    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if variance_matrix_list is None:
        variance_matrix_list = []
        dv = new_tensor.shape
        for i in range(0, order):
            variance_matrix_list.extend(
                [sparse.coo_matrix(([], ([], [])), [dv[i], dv[i]])])

    # If the forgetting factor is not given, it is 1.
    if alpha is None:
        alpha = 1

    u = []
    new_variance_matrix_list = []
    for i in range(0, order):
        if new_tensor.__class__ == tensor.tensor:
            new_tensor_matricize = tenmat.tenmat(new_tensor, [i]).tondarray()
        elif new_tensor.__class__ == sptensor.sptensor:
            new_tensor_matricize = sptenmat.sptenmat(new_tensor,
                                                     [i]).tosparsemat()
        elif new_tensor.__class__ == ttensor.ttensor:
            raise TypeError("It is not supported yet.")
            return
        else:
            raise TypeError(
                "1st argument must be tensor, sptensor, or ttensor")
            return

        new_variance_matrix_list.extend([
            numpy.array(alpha * variance_matrix_list[i] + numpy.dot(
                new_tensor_matricize, new_tensor_matricize.transpose()))
        ])
        # print "new,", new_variance_matrix_list
        (eigenvalue, eigenmatrix) = eigwrapper(new_variance_matrix_list[i],
                                               rank[i])
        u.extend([numpy.array(eigenmatrix)])

    # print new_tensor
    core = new_tensor.ttm(u, None, 't')
    reconstruct_tensor = ttensor.ttensor(core, u)
    print "core:", sparsity(core.tondarray().tolist())
    return reconstruct_tensor, new_variance_matrix_list
def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    """Dynamic Tensor Analysis"""
    # number of order of the input tensor.
    order = new_tensor.ndims()

    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if variance_matrix_list is None:
        variance_matrix_list = []
        dv = new_tensor.shape
        for i in range(0, order):
            variance_matrix_list.extend([sparse.coo_matrix(([], ([], [])), [dv[i], dv[i]])])

    # If the forgetting factor is not given, it is 1.
    if alpha is None:
        alpha = 1

    u = []
    new_variance_matrix_list = []
    for i in range(0, order):
        if new_tensor.__class__ == tensor.tensor:
            new_tensor_matricize = tenmat.tenmat(new_tensor, [i]).tondarray()
        elif new_tensor.__class__ == sptensor.sptensor:
            new_tensor_matricize = sptenmat.sptenmat(new_tensor, [i]).tosparsemat()
        elif new_tensor.__class__ == ttensor.ttensor:
            raise TypeError("It is not supported yet.")
            return
        else:
            raise TypeError("1st argument must be tensor, sptensor, or ttensor")
            return

        new_variance_matrix_list.extend([numpy.array(alpha*variance_matrix_list[i] + numpy.dot(new_tensor_matricize, new_tensor_matricize.transpose()))])
        # print "new,", new_variance_matrix_list
        (eigenvalue, eigenmatrix) = eigwrapper(new_variance_matrix_list[i], rank[i])
        u.extend([numpy.array(eigenmatrix)])

    # print new_tensor
    core = new_tensor.ttm(u, None, 't')
    reconstruct_tensor = ttensor.ttensor(core, u)
    print "core:", sparsity(core.tondarray().tolist())
    return reconstruct_tensor, new_variance_matrix_list