Пример #1
0
def test_cp_vonneumann_entropy_mixed_state():
    """Test for cp_vonneumann_entropy on CP tensors. 
    This test checks that the VNE of mixed states is calculated correctly.
    """
    state1 = tl.tensor([[
        0.03004805, 0.42426117, 0.5483771, 0.4784077, 0.25792725, 0.34388784,
        0.99927586, 0.96605812
    ]])
    state1 = state1 / tl.norm(state1)
    state2 = tl.tensor([[
        0.84250089, 0.43429687, 0.26551928, 0.18262211, 0.55584835, 0.2565509,
        0.33197401, 0.97741178
    ]])
    state2 = state2 / tl.norm(state2)
    mat_mixed = tl.tensor((tl.dot(tl.transpose(state1), state1) +
                           tl.dot(tl.transpose(state2), state2)) / 2.)
    actual_vne = 0.5546
    mat = parafac(tl.tensor(mat_mixed), rank=2, normalize_factors=True)
    mat_unnorm = parafac(tl.tensor(mat_mixed), rank=2, normalize_factors=False)
    tl_vne = cp_vonneumann_entropy(mat)
    tl_vne_unnorm = cp_vonneumann_entropy(mat_unnorm)
    tl.testing.assert_array_almost_equal(tl_vne, actual_vne, decimal=3)
    tl.testing.assert_array_almost_equal(tl_vne_unnorm, actual_vne, decimal=3)
    assert_array_almost_equal(tl_vne, actual_vne, decimal=3)
    assert_array_almost_equal(tl_vne_unnorm, actual_vne, decimal=3)
Пример #2
0
def tr_to_tensor(factors):
    """Returns the full tensor whose TR decomposition is given by 'factors'

        Re-assembles 'factors', which represent a tensor in TR format
        into the corresponding full tensor

    Parameters
    ----------
    factors : list of 3D-arrays
              TR factors (TR-cores)

    Returns
    -------
    output_tensor : ndarray
                   tensor whose TR decomposition was given by 'factors'
    """
    full_shape = [f.shape[1] for f in factors]
    full_tensor = tl.reshape(factors[0], (-1, factors[0].shape[2]))

    for factor in factors[1:-1]:
        rank_prev, _, rank_next = factor.shape
        factor = tl.reshape(factor, (rank_prev, -1))
        full_tensor = tl.dot(full_tensor, factor)
        full_tensor = tl.reshape(full_tensor, (-1, rank_next))

    full_tensor = tl.reshape(full_tensor,
                             (factors[-1].shape[2], -1, factors[-1].shape[0]))
    full_tensor = tl.moveaxis(full_tensor, 0, -1)
    full_tensor = tl.reshape(full_tensor,
                             (-1, factors[-1].shape[0] * factors[-1].shape[2]))
    factor = tl.moveaxis(factors[-1], -1, 1)
    factor = tl.reshape(factor, (-1, full_shape[-1]))
    full_tensor = tl.dot(full_tensor, factor)
    return tl.reshape(full_tensor, full_shape)
Пример #3
0
    def __factor(self, tensor, factors, mode, pseudo_inverse):
        """Compute a factor optimization for TCA

		Parameters
		----------
		tensor : torch.Tensor
			The tensor of activity of N neurons, T timepoints and K trials of shape N, T, K
		factors : list
			List of tensors, each one containing a factor
		mode : int
			Index of the factor to optimize
		pseudo_inverse : torch.Tensor
			Pseudo inverse matrix of the current factor

		Returns
		-------
		torch.Tensor
			Optimized factor

		"""
        for i, factor in enumerate(factors):
            if i != mode:
                pseudo_inverse = pseudo_inverse * tl.dot(
                    tl.transpose(factor), factor)
        factor = tl.dot(tl.base.unfold(tensor, mode),
                        tl.tenalg.khatri_rao(factors, skip_matrix=mode))
        factor = tl.transpose(
            tl.solve(tl.transpose(pseudo_inverse), tl.transpose(factor)))

        return factor
Пример #4
0
    def __factor_non_negative(self, tensor, factors, mode):
        """Compute a non-negative factor optimization for TCA

		Parameters
		----------
		tensor : torch.Tensor
			The tensor of activity of N neurons, T timepoints and K trials of shape N, T, K
		factors : list
			List of tensors, each one containing a factor
		mode : int
			Index of the factor to optimize

		Returns
		-------
		float
			Number to which multiply the factor to for optimization

		"""
        sub_indices = [i for i in range(self.dimension) if i != mode]
        for i, e in enumerate(sub_indices):
            if i:
                accum = accum * tl.dot(tl.transpose(factors[e]), factors[e])
            else:
                accum = tl.dot(tl.transpose(factors[e]), factors[e])

        numerator = tl.dot(tl.base.unfold(tensor, mode),
                           tl.tenalg.khatri_rao(factors, skip_matrix=mode))
        numerator = tl.clip(numerator, a_min=self.epsilon, a_max=None)
        denominator = tl.dot(factors[mode], accum)
        denominator = tl.clip(denominator, a_min=self.epsilon, a_max=None)

        return (numerator / denominator)
def Projectiononstiefelmanifold(Point, Parameter):
    p=np.array(Parameter.shape,dtype=int)[0]    
    I=tl.tensor(np.identity(p))
    Result=tl.dot(I+tl.dot(Point,Point.T),Parameter)
    #P=tl.dot(Point.T,Parameter)
    #P=Skewmatrix(P)
    Result=Result+tl.dot(Point,Skewmatrix(tl.dot(Point.T,Parameter)))
    return Result
Пример #6
0
def test_vonneumann_entropy_mixed_state():
    """Test for vonneumann_entropy on 2-dimensional tensors.
    This test checks that the VNE of mixed states is calculated correctly.
    """
    state1 = tl.tensor([[0.03004805, 0.42426117, 0.5483771 , 0.4784077 , 0.25792725, 0.34388784, 0.99927586, 0.96605812]])
    state1 = state1/tl.norm(state1)
    state2 = tl.tensor([[0.84250089, 0.43429687, 0.26551928, 0.18262211, 0.55584835, 0.2565509 , 0.33197401, 0.97741178]])
    state2 = state2/tl.norm(state2)
    mat_mixed = tl.tensor((tl.dot(tl.transpose(state1), state1) + tl.dot(tl.transpose(state2), state2))/2.)
    actual_vne = 0.5546
    tl_vne = vonneumann_entropy(mat_mixed)
    assert_array_almost_equal(tl_vne, actual_vne, decimal=3)
Пример #7
0
def dtd(factors_old, X_old, X_new, rank, n_iter=1, mu=1, verbose=False):

    weights = tl.ones(rank)
    if verbose:
        X = tl.tensor(np.concatenate((X_old, X_new)))
    n_dim = tl.ndim(X_old)
    U = factors_old.copy()

    for i in range(n_iter):
        # temporal mode for A1
        V = tl.tensor(np.ones((rank, rank)))
        for j, factor in enumerate(U):
            if j != 0:
                V = V * tl.dot(tl.transpose(factor), factor)
        mttkrp = unfolding_dot_khatri_rao(X_new, (None, U), 0)
        A1 = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp)))

        # non-temporal mode
        for mode in range(1, n_dim):
            U1 = U.copy()
            U1[0] = A1
            V = tl.tensor(np.ones((rank, rank)))
            W = tl.tensor(np.ones((rank, rank)))
            for j, factor in enumerate(U):
                factor_old = factors_old[j]
                if j != mode:
                    W = W * tl.dot(tl.transpose(factor_old), factor)
                    if j == 0:
                        V = V * (mu * tl.dot(tl.transpose(factor), factor) +
                                 tl.dot(tl.transpose(A1), A1))
                    else:
                        V = V * tl.dot(tl.transpose(factor), factor)
            mttkrp0 = mu * tl.dot(factors_old[mode], W)
            mttkrp1 = unfolding_dot_khatri_rao(X_new, (None, U1), mode)
            U[mode] = tl.transpose(
                tl.solve(tl.transpose(V), tl.transpose(mttkrp0 + mttkrp1)))

        # temporal mode for A0
        V = tl.tensor(np.ones((rank, rank)))
        W = tl.tensor(np.ones((rank, rank)))
        for j, factor in enumerate(U):
            factor_old = factors_old[j]
            if j != 0:
                V = V * tl.dot(tl.transpose(factor), factor)
                W = W * tl.dot(tl.transpose(factor_old), factor)
        mttkrp = tl.dot(factors_old[0], W)
        U[0] = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp)))
        if verbose:
            U1 = U.copy()
            U1[0] = np.concatenate((U[0], A1))
            X_est = construct_tensor(U1)
            compare_tensors(X, X_est)

    U[0] = np.concatenate((U[0].copy(), A1))
    return KruskalTensor((weights, U))
Пример #8
0
def test_cp_to_tensor_with_weights():
    A = tl.reshape(tl.arange(1,5), (2,2))
    B = tl.reshape(tl.arange(5,9), (2,2))
    weigths = tl.tensor([2,-1], **tl.context(A))

    out = cp_to_tensor((weigths, [A,B]))
    expected = tl.tensor([[-2,-2], [6, 10]])  # computed by hand
    assert_array_equal(out, expected)

    (weigths, factors) = random_cp((5, 5, 5), rank=5, normalise_factors=True, full=False)
    true_res = tl.dot(tl.dot(factors[0], tl.diag(weigths)),
                      tl.transpose(tl.tenalg.khatri_rao(factors[1:])))
    true_res = tl.fold(true_res, 0, (5, 5, 5))  
    res = cp_to_tensor((weigths, factors))
    assert_array_almost_equal(true_res, res,
     err_msg='weights incorrectly incorporated in cp_to_tensor')
Пример #9
0
def mps_to_tensor(factors):
    """Returns the full tensor whose MPS decomposition is given by 'factors'

        Re-assembles 'factors', which represent a tensor in MPS/TT format
        into the corresponding full tensor

    Parameters
    ----------
    factors: list of 3D-arrays
              MPS factors (known as core in TT terminology)

    Returns
    -------
    output_tensor: ndarray
                   tensor whose MPS/TT decomposition was given by 'factors'
    """
    full_shape = [f.shape[1] for f in factors]
    full_tensor = tl.reshape(factors[0], (full_shape[0], -1))

    for factor in factors[1:]:
        rank_prev, _, rank_next = factor.shape
        factor = tl.reshape(factor, (rank_prev, -1))
        full_tensor = tl.dot(full_tensor, factor)
        full_tensor = tl.reshape(full_tensor, (-1, rank_next))

    return tl.reshape(full_tensor, full_shape)
Пример #10
0
def test_svd():
    """Test for the SVD functions"""
    tol = 0.1
    tol_orthogonality = 0.01

    for name, svd_fun in T.SVD_FUNS.items():
        sizes = [(100, 100), (100, 5), (10, 10), (10, 4), (5, 100)]
        n_eigenvecs = [90, 4, 5, 4, 5]

        for s, n in zip(sizes, n_eigenvecs):
            matrix = np.random.random(s)
            matrix_backend = T.tensor(matrix)
            fU, fS, fV = svd_fun(matrix_backend, n_eigenvecs=n)
            U, S, V = svd(matrix)
            U, S, V = U[:, :n], S[:n], V[:n, :]

            assert_array_almost_equal(np.abs(S), T.abs(fS), decimal=3,
                err_msg='eigenvals not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

            # True reconstruction error (based on numpy SVD)
            true_rec_error = np.sum((matrix - np.dot(U, S.reshape((-1, 1))*V))**2)
            # Reconstruction error with the backend's SVD
            rec_error = T.sum((matrix_backend - T.dot(fU, T.reshape(fS, (-1, 1))*fV))**2)
            # Check that the two are similar
            assert_(true_rec_error - rec_error <= tol,
                msg='Reconstruction not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

            # Check for orthogonality when relevant
            if name != 'symeig_svd':
                left_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(left_orthogonality_error <= tol_orthogonality,
                    msg='Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                            name, tl.get_backend(), n, s))
                right_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(right_orthogonality_error <= tol_orthogonality,
                    msg='Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

        # Should fail on non-matrices
        with assert_raises(ValueError):
            tensor = T.tensor(np.random.random((3, 3, 3)))
            svd_fun(tensor)

        # Test for singular matrices (some eigenvals will be zero)
        # Rank at most 5
        matrix = T.tensor(np.dot(np.random.random((20, 5)), np.random.random((5, 20))))
        U, S, V = tl.partial_svd(matrix, n_eigenvecs=n)
        true_rec_error = tl.sum((matrix - tl.dot(U, tl.reshape(S, (-1, 1))*V))**2)
        assert_(true_rec_error <= tol)

        # Test if partial_svd returns the same result for the same setting
        matrix = T.tensor(np.random.random((20, 5)))
        random_state = np.random.RandomState(0)
        U1, S1, V1 = tl.partial_svd(matrix, n_eigenvecs=2, random_state=random_state)
        U2, S2, V2 = tl.partial_svd(matrix, n_eigenvecs=2, random_state=0)
        assert_array_equal(U1, U2)
        assert_array_equal(S1, S2)
        assert_array_equal(V1, V2)
Пример #11
0
def tt_matrix_to_tensor(tt_matrix):
    """Returns the full tensor whose TT-Matrix decomposition is given by 'factors'

        Re-assembles 'factors', which represent a tensor in TT-Matrix format
        into the corresponding full tensor

    Parameters
    ----------
    factors: list of 4D-arrays
              TT-Matrix factors (known as core) of shape (rank_k, left_dim_k, right_dim_k, rank_{k+1})

    Returns
    -------
    output_tensor: ndarray
                   tensor whose TT-Matrix decomposition was given by 'factors'
    """
    # Each core is of shape (rank_left, size_in, size_out, rank_right)
    rank, in_shape, out_shape, rank_right = zip(*(tl.shape(f) for f in tt_matrix))
    rank += (rank_right[-1], )                           
    ndim = len(in_shape)
    
    # Intertwine the dims 
    # full_shape = in_shape[0], out_shape[0], in_shape[1], ...
    full_shape = sum(zip(*(in_shape, out_shape)), ())
    order = list(range(0, ndim*2, 2)) + list(range(1, ndim*2, 2))

    for i, factor in enumerate(tt_matrix):
        if not i:
            # factor = factor.squeeze(0)
            res = tl.reshape(factor, (factor.shape[1], -1))
        else:
            res = tl.dot(tl.reshape(res, (-1, rank[i])), tl.reshape(factor, (rank[i], -1)))
    res = tl.reshape(res, full_shape)
    
    return tl.transpose(res, order)
Пример #12
0
def tt_to_tensor(factors):
    """Returns the full tensor whose TT decomposition is given by 'factors'

        Re-assembles 'factors', which represent a tensor in TT/Matrix-Product-State format
        into the corresponding full tensor

    Parameters
    ----------
    factors : list of 3D-arrays
              TT factors (TT-cores)

    Returns
    -------
    output_tensor : ndarray
                   tensor whose TT/MPS decomposition was given by 'factors'
    """
    full_shape = [f.shape[1] for f in factors]
    full_tensor = tl.reshape(factors[0], (full_shape[0], -1))

    for factor in factors[1:]:
        rank_prev, _, rank_next = factor.shape
        factor = tl.reshape(factor, (rank_prev, -1))
        full_tensor = tl.dot(full_tensor, factor)
        full_tensor = tl.reshape(full_tensor, (-1, rank_next))

    return tl.reshape(full_tensor, full_shape)
Пример #13
0
def test_tt_factorized_linear():
    x = tlr.random_tt((2, 7), rank=[1, 7, 1])
    weights = tlr.random_tt_matrix((2, 7, 2, 7), rank=[1, 10, 1])
    out = tt_factorized_linear(x, weights).to_tensor()
    out = tl.reshape(out, (-1, 1))
    manual_out = tl.dot(weights.to_matrix(),
                        tl.reshape(x.to_tensor(), (-1, 1)))
    assert_array_almost_equal(out, manual_out, decimal=4)
Пример #14
0
def test_vonneumann_entropy_pure_state():
    """Test for vonneumann_entropy on 2-dimensional tensors.
    This test checks that pure states have a VNE of zero.
    """
    state = tl.randn((8, 1))
    state = state / tl.norm(state)
    mat_pure = tl.dot(state, tl.transpose(state))
    tl_vne = vonneumann_entropy(mat_pure)
    assert_array_almost_equal(tl_vne, 0, decimal=3)
Пример #15
0
def als(tensor,rank,factors=None,it_max=100,tol=1e-7,list_factors=False,error_fast=True,time_rec=False):
  """
    ALS methode of CP decomposition

    Parameters
    ----------
    tensor : tensor
    rank : int
    factors : list of matrices, optional
        an initial factor matrices. The default is None.
    it_max : int, optional
        maximal number of iteration. The default is 100.
    tol : float, optional
        error tolerance. The default is 1e-7.
    list_factors : boolean, optional
        If true, then return factor matrices of each iteration. The default is False.
    error_fast : boolean, optional
        If true, use err_fast to compute data fitting error, otherwise, use err. The default is True.
    time_rec : boolean, optional
        If true, return computation time of each iteration. The default is False.

    Returns
    -------
    the CP decomposition, number of iteration and termination criterion. 
    list_fac and list_time are optional.
  """
  N=tl.ndim(tensor) # order of tensor
  norm_tensor=tl.norm(tensor) # norm of tensor
  if time_rec == True : list_time=[]
  if list_factors==True : list_fac=[] # list of factor matrices

  if (factors==None): factors=svd_init_fac(tensor,rank)

  weights=None
  it=0
  if list_factors==True : list_fac.append(copy.deepcopy(factors))
  error=[err(tensor,weights,factors)/norm_tensor]
  while (error[len(error)-1]>tol and it<it_max):
    if time_rec == True : tic=time.time() 
    for n in range(N):
      V=np.ones((rank,rank))
      for i in range(len(factors)):
        if i != n : V=V*tl.dot(tl.transpose(factors[i]),factors[i])
      W=tl.cp_tensor.unfolding_dot_khatri_rao(tensor, (None,factors), n) 
      factors[n]= tl.transpose(tl.solve(tl.transpose(V),tl.transpose(W)))
    if list_factors==True : list_fac.append(copy.deepcopy(factors))
    it=it+1
    if (error_fast==False) : error.append(err(tensor,weights,factors)/norm_tensor)
    else : error.append(err_fast(norm_tensor,factors[N-1],V,W)/norm_tensor)
    if time_rec == True : 
      toc=time.time() 
      list_time.append(toc-tic)
  # weights,factors=tl.cp_tensor.cp_normalize((None,factors))
  if list_factors==True and time_rec==True: return(weights,factors,it,error,list_fac,list_time)
  if time_rec==True : return(weights,factors,it,error,list_time)
  if list_factors==True : return(weights,factors,it,error,list_fac)
  return(weights,factors,it,error)
Пример #16
0
def test_cp_to_tensor():
    """Test for cp_to_tensor."""
    U1 = np.reshape(np.arange(1, 10), (3, 3))
    U2 = np.reshape(np.arange(10, 22), (4, 3))
    U3 = np.reshape(np.arange(22, 28), (2, 3))
    U4 = np.reshape(np.arange(28, 34), (2, 3))
    U = [tl.tensor(t) for t in [U1, U2, U3, U4]]
    true_res = tl.tensor([[[[  46754.,   51524.],
                            [  52748.,   58130.]],

                           [[  59084.,   65114.],
                            [  66662.,   73466.]],

                           [[  71414.,   78704.],
                            [  80576.,   88802.]],

                           [[  83744.,   92294.],
                            [  94490.,  104138.]]],


                          [[[ 113165.,  124784.],
                            [ 127790.,  140912.]],

                           [[ 143522.,  158264.],
                            [ 162080.,  178730.]],

                           [[ 173879.,  191744.],
                            [ 196370.,  216548.]],

                           [[ 204236.,  225224.],
                            [ 230660.,  254366.]]],


                          [[[ 179576.,  198044.],
                            [ 202832.,  223694.]],

                           [[ 227960.,  251414.],
                            [ 257498.,  283994.]],

                           [[ 276344.,  304784.],
                            [ 312164.,  344294.]],

                           [[ 324728.,  358154.],
                            [ 366830.,  404594.]]]])
    res = cp_to_tensor((tl.ones(3), U))
    assert_array_equal(res, true_res, err_msg='Khatri-rao incorrectly transformed into full tensor.')

    columns = 4
    rows = [3, 4, 2]
    matrices = [tl.tensor(np.arange(k * columns).reshape((k, columns))) for k in rows]
    tensor = cp_to_tensor((tl.ones(columns), matrices))
    for i in range(len(rows)):
        unfolded = unfold(tensor, mode=i)
        U_i = matrices.pop(i)
        reconstructed = tl.dot(U_i, tl.transpose(khatri_rao(matrices)))
        assert_array_almost_equal(reconstructed, unfolded)
        matrices.insert(i, U_i)
Пример #17
0
def test_tt_vonneumann_entropy_pure_state():
    """Test for tt_vonneumann_entropy TT tensors.
    This test checks that pure states have a VNE of zero.
    """
    state = tl.randn((8, 1))
    state = state/tl.norm(state)
    mat_pure = tl.reshape(tl.dot(state, tl.transpose(state)), (2, 2, 2, 2, 2, 2))
    mat_pure = matrix_product_state(mat_pure, rank=(1, 3, 2, 1, 2, 3, 1))
    tl_vne = tt_vonneumann_entropy(mat_pure)
    assert_array_almost_equal(tl_vne, 0, decimal=3)
Пример #18
0
def test_cp_vonneumann_entropy_pure_state():
    """Test for cp_vonneumann_entropy on 2-dimensional CP tensors.
    This test checks that pure states have a VNE of zero.
    """
    state = tl.randn((8, 1))
    state = state / tl.norm(state)
    mat_pure = tl.dot(state, tl.transpose(state))
    mat = parafac(mat_pure, rank=1, normalize_factors=True)
    tl_vne = cp_vonneumann_entropy(mat)
    assert_array_almost_equal(tl_vne, 0, decimal=3)
Пример #19
0
def test_tr_to_tensor():
    # Create ground truth TR factors
    factors = [tl.randn((2, 4, 3)), tl.randn((3, 5, 2)), tl.randn((2, 6, 2))]

    # Create tensor
    tensor = tl.zeros((4, 5, 6))

    for i in range(4):
        for j in range(5):
            for k in range(6):
                product = tl.dot(
                    tl.dot(factors[0][:, i, :], factors[1][:, j, :]),
                    factors[2][:, k, :])
                # TODO: add trace to backend instead of this
                tensor = tl.index_update(
                    tensor, tl.index[i, j, k],
                    tl.sum(product * tl.eye(product.shape[0])))

    # Check that TR factors re-assemble to the original tensor
    assert_array_almost_equal(tensor, tr_to_tensor(factors))
Пример #20
0
def contract(tensor1, modes1, tensor2, modes2):
    """Tensor contraction between two tensors on specified modes
    
    Parameters
    ----------
    tensor1 : tl.tensor
    modes1 : int list or int
        modes on which to contract tensor1
    tensor2 : tl.tensor
    modes2 : int list or int
        modes on which to contract tensor2

    Returns
    -------
    contraction : tensor1 contracted with tensor2 on the specified modes
    """
    if isinstance(modes1, int):
        modes1 = [modes1]
    if isinstance(modes2, int):
        modes2 = [modes2]
    modes1 = list(modes1)
    modes2 = list(modes2)

    if len(modes1) != len(modes2):
        raise ValueError(
            'Can only contract two tensors along the same number of modes'
            '(len(modes1) == len(modes2))'
            'However, got {} modes for tensor 1 and {} mode for tensor 2'
            '(modes1={}, and modes2={})'.format(len(modes1), len(modes2),
                                                modes1, modes2))

    contraction_dims = [tl.shape(tensor1)[i] for i in modes1]
    if contraction_dims != [tl.shape(tensor2)[i] for i in modes2]:
        raise ValueError(
            'Trying to contract tensors over modes of different sizes'
            '(contracting modes of sizes {} and {}'.format(
                contraction_dims, [tl.shape(tensor2)[i] for i in modes2]))
    shared_dim = int(np.prod(contraction_dims))

    modes1_free = [i for i in range(tl.ndim(tensor1)) if i not in modes1]
    free_shape1 = [tl.shape(tensor1)[i] for i in modes1_free]

    tensor1 = tl.reshape(tl.transpose(tensor1, modes1_free + modes1),
                         (int(np.prod(free_shape1)), shared_dim))

    modes2_free = [i for i in range(tl.ndim(tensor2)) if i not in modes2]
    free_shape2 = [tl.shape(tensor2)[i] for i in modes2_free]

    tensor2 = tl.reshape(tl.transpose(tensor2, modes2 + modes2_free),
                         (shared_dim, int(np.prod(free_shape2))))

    res = tl.dot(tensor1, tensor2)
    return tl.reshape(res, tuple(free_shape1 + free_shape2))
Пример #21
0
    def get_orthogonality_loss(self):

        if self.rank_tucker == -1:
            return 0

        loss = 0

        for fact in self.order1_tens[1]:
            loss += torch.sum(
                (tl.dot(fact.T, fact) - torch.eye(fact.shape[1]).cuda())**2)

        if self.order >= 2:

            for fact in self.order2_tens[1]:
                loss += torch.sum((tl.dot(fact.T, fact) -
                                   torch.eye(fact.shape[1]).cuda())**2)

        if self.order == 3:

            for fact in self.order3_tens[1]:
                loss += torch.sum((tl.dot(fact.T, fact) -
                                   torch.eye(fact.shape[1]).cuda())**2)

        return loss
Пример #22
0
def svd_thresholding(matrix, threshold):
    """Singular value thresholding operator

    Parameters
    ----------
    matrix : ndarray
    threshold : float

    Returns
    -------
    ndarray
        matrix on which the operator has been applied

    See also
    --------
    procrustes : procrustes operator
    """
    U, s, V = tl.partial_svd(matrix, n_eigenvecs=min(matrix.shape))
    return tl.dot(U, tl.reshape(soft_thresholding(s, threshold), (-1, 1)) * V)
Пример #23
0
def test_unfolding_dot_khatri_rao():
    """Test for unfolding_dot_khatri_rao
    
    Check against other version check sparse safe
    """
    shape = (10, 10, 10, 4)
    rank = 5
    tensor = tl.tensor(np.random.random(shape))
    weights, factors = random_cp(shape=shape, rank=rank, 
                                      full=False, normalise_factors=True)
    
    for mode in range(tl.ndim(tensor)):
        # Version forming explicitely the khatri-rao product
        unfolded = unfold(tensor, mode)
        kr_factors = khatri_rao(factors, weights=weights, skip_matrix=mode)
        true_res = tl.dot(unfolded, kr_factors)

        # Efficient sparse-safe version
        res = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
        assert_array_almost_equal(true_res, res, decimal=3)
Пример #24
0
def procrustes(matrix):
    """Procrustes operator

    Parameters
    ----------
    matrix : ndarray

    Returns
    -------
    ndarray
        matrix on which the Procrustes operator has been applied
        has the same shape as the original tensor


    See also
    --------
    svd_thresholding : SVD-thresholding operator
    """
    U, _, V = tl.partial_svd(matrix, n_eigenvecs=min(matrix.shape))
    return tl.dot(U, V)
Пример #25
0
def parafac(tensor,
            rank,
            n_iter_max=100,
            tol=1e-8,
            random_state=None,
            verbose=False,
            return_errors=False,
            mode_three_val=[[0.5, 0.5, 0.0], [0.0, 0.5, 0.5]]):
    """CANDECOMP/PARAFAC decomposition via alternating least squares (ALS)

    Computes a rank-`rank` decomposition of `tensor` [1]_ such that,

        ``tensor = [| factors[0], ..., factors[-1] |]``.

    Parameters
    ----------
    tensor : ndarray
    rank  : int
        Number of components.
    n_iter_max : int
        Maximum number of iteration
    tol : float, optional
        (Default: 1e-6) Relative reconstruction error tolerance. The
        algorithm is considered to have found the global minimum when the
        reconstruction error is less than `tol`.
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        Level of verbosity
    return_errors : bool, optional
        Activate return of iteration errors


    Returns
    -------
    factors : ndarray list
        List of factors of the CP decomposition element `i` is of shape
        (tensor.shape[i], rank)
    errors : list
        A list of reconstruction errors at each iteration of the algorithms.

    References
    ----------
    .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """

    factors = initialize_factors(tensor, rank, random_state=random_state)
    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    # Mode-3 values that control the country factors are set using the
    # mode_three_val argument.

    fixed_ja = mode_three_val[0]
    fixed_ko = mode_three_val[1]

    for iteration in range(n_iter_max):
        for mode in range(tl.ndim(tensor)):
            pseudo_inverse = tl.tensor(np.ones((rank, rank)),
                                       **tl.context(tensor))

            factors[2][0] = fixed_ja  # set mode-3 values
            factors[2][1] = fixed_ko  # set mode-3 values

            for i, factor in enumerate(factors):
                if i != mode:
                    pseudo_inverse = pseudo_inverse * tl.dot(
                        tl.transpose(factor), factor)
            factor = tl.dot(unfold(tensor, mode),
                            khatri_rao(factors, skip_matrix=mode))
            factor = tl.transpose(
                tl.solve(tl.transpose(pseudo_inverse), tl.transpose(factor)))
            factors[mode] = factor

        if tol:
            rec_error = tl.norm(tensor - kruskal_to_tensor(factors),
                                2) / norm_tensor
            rec_errors.append(rec_error)

            if iteration > 1:
                if verbose:
                    print('reconstruction error={}, variation={}.'.format(
                        rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

                if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                    if verbose:
                        print('converged in {} iterations.'.format(iteration))
                    break

    if return_errors:
        return factors, rec_errors
    else:
        return factors
Пример #26
0
def non_negative_parafac(tensor,
                         rank,
                         n_iter_max=100,
                         init='svd',
                         svd='numpy_svd',
                         tol=10e-7,
                         random_state=None,
                         verbose=0,
                         normalize_factors=False,
                         return_errors=False,
                         mask=None,
                         orthogonalise=False,
                         cvg_criterion='abs_rec_error',
                         fixed_modes=[]):
    """
    Non-negative CP decomposition
    Uses multiplicative updates, see [2]_
    This is the same as parafac(non_negative=True).
    Parameters
    ----------
    tensor : ndarray
    rank   : int
            number of components
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity
    fixed_modes : list, default is []
        A list of modes for which the initial value is not modified.
        The last mode cannot be fixed due to error computation.
    Returns
    -------
    factors : ndarray list
            list of positive factors of the CP decomposition
            element `i` is of shape ``(tensor.shape[i], rank)``
    References
    ----------
    .. [2] Amnon Shashua and Tamir Hazan,
       "Non-negative tensor factorization with applications to statistics and computer vision",
       In Proceedings of the International Conference on Machine Learning (ICML),
       pp 792-799, ICML, 2005
    """
    epsilon = 10e-12
    rank = validate_cp_rank(tl.shape(tensor), rank=rank)

    if mask is not None and init == "svd":
        message = "Masking occurs after initialization. Therefore, random initialization is recommended."
        warnings.warn(message, Warning)

    if orthogonalise and not isinstance(orthogonalise, int):
        orthogonalise = n_iter_max

    weights, factors = initialize_cp(tensor,
                                     rank,
                                     init=init,
                                     svd=svd,
                                     random_state=random_state,
                                     non_negative=True,
                                     normalize_factors=normalize_factors)
    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    if tl.ndim(tensor) - 1 in fixed_modes:
        warnings.warn(
            'You asked for fixing the last mode, which is not supported while tol is fixed.\n The last mode will not be fixed. Consider using tl.moveaxis()'
        )
        fixed_modes.remove(tl.ndim(tensor) - 1)
    modes_list = [
        mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes
    ]

    for iteration in range(n_iter_max):
        if orthogonalise and iteration <= orthogonalise:
            for i, f in enumerate(factors):
                if min(tl.shape(f)) >= rank:
                    factors[i] = tl.abs(tl.qr(f)[0])

        if verbose > 1:
            print("Starting iteration", iteration + 1)
        for mode in modes_list:
            if verbose > 1:
                print("Mode", mode, "of", tl.ndim(tensor))

            accum = 1
            # khatri_rao(factors).tl.dot(khatri_rao(factors))
            # simplifies to multiplications
            sub_indices = [i for i in range(len(factors)) if i != mode]
            for i, e in enumerate(sub_indices):
                if i:
                    accum *= tl.dot(tl.transpose(factors[e]), factors[e])
                else:
                    accum = tl.dot(tl.transpose(factors[e]), factors[e])

            if mask is not None:
                tensor = tensor * mask + tl.cp_to_tensor(
                    (None, factors), mask=1 - mask)

            mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)

            numerator = tl.clip(mttkrp, a_min=epsilon, a_max=None)
            denominator = tl.dot(factors[mode], accum)
            denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
            factor = factors[mode] * numerator / denominator

            factors[mode] = factor

        if normalize_factors:
            weights, factors = cp_normalize((weights, factors))

        if tol:
            # ||tensor - rec||^2 = ||tensor||^2 + ||rec||^2 - 2*<tensor, rec>
            factors_norm = cp_norm((weights, factors))

            # mttkrp and factor for the last mode. This is equivalent to the
            # inner product <tensor, factorization>
            iprod = tl.sum(tl.sum(mttkrp * factor, axis=0) * weights)
            rec_error = tl.sqrt(
                tl.abs(norm_tensor**2 + factors_norm**2 -
                       2 * iprod)) / norm_tensor
            rec_errors.append(rec_error)
            if iteration >= 1:
                rec_error_decrease = rec_errors[-2] - rec_errors[-1]

                if verbose:
                    print(
                        "iteration {}, reconstraction error: {}, decrease = {}"
                        .format(iteration, rec_error, rec_error_decrease))

                if cvg_criterion == 'abs_rec_error':
                    stop_flag = abs(rec_error_decrease) < tol
                elif cvg_criterion == 'rec_error':
                    stop_flag = rec_error_decrease < tol
                else:
                    raise TypeError("Unknown convergence criterion")

                if stop_flag:
                    if verbose:
                        print("PARAFAC converged after {} iterations".format(
                            iteration))
                    break
            else:
                if verbose:
                    print('reconstruction error={}'.format(rec_errors[-1]))

    cp_tensor = CPTensor((weights, factors))

    if return_errors:
        return cp_tensor, rec_errors
    else:
        return cp_tensor
# Parameters of the plot, deduced from the data
n_rows = len(patterns)
n_columns = len(ranks) + 1
# Plot the three images
fig = plt.figure()

for i, pattern in enumerate(patterns):

    print('fitting pattern n.{}'.format(i))

    # Generate the original image
    weight_img = gen_image(region=pattern, image_height=image_height, image_width=image_width)
    weight_img = tl.tensor(weight_img)

    # Generate the labels
    y = tl.dot(partial_tensor_to_vec(X, skip_begin=1), tensor_to_vec(weight_img))

    # Plot the original weights
    ax = fig.add_subplot(n_rows, n_columns, i*n_columns + 1)
    ax.imshow(tl.to_numpy(weight_img), cmap=plt.cm.OrRd, interpolation='nearest')
    ax.set_axis_off()
    if i == 0:
        ax.set_title('Original\nweights')

    for j, rank in enumerate(ranks):
        print('fitting for rank = {}'.format(rank))

        # Create a tensor Regressor estimator
        estimator = TuckerRegressor(weight_ranks=[rank, rank], tol=10e-7, n_iter_max=100, reg_W=1, verbose=0)

        # Fit the estimator to the data
Пример #28
0
def non_negative_tucker(tensor,
                        rank,
                        n_iter_max=10,
                        init='svd',
                        tol=10e-5,
                        random_state=None,
                        verbose=False,
                        return_errors=False,
                        normalize_factors=False):
    """Non-negative Tucker decomposition

        Iterative multiplicative update, see [2]_

    Parameters
    ----------
    tensor : ``ndarray``
    rank : None, int or int list
        size of the core tensor, ``(len(ranks) == tensor.ndim)``
        if int, the same rank is used for all modes
    n_iter_max : int
        maximum number of iteration
    init : {'svd', 'random'}
    random_state : {None, int, np.random.RandomState}
    verbose : int , optional
        level of verbosity
    ranks : None or int list
    size of the core tensor
    normalize_factors : if True, aggregates the core which will contain the norms of the factors.

    Returns
    -------
    core : ndarray
            positive core of the Tucker decomposition
            has shape `ranks`
    factors : ndarray list
            list of factors of the CP decomposition
            element `i` is of shape ``(tensor.shape[i], rank)``

    References
    ----------
    .. [2] Yong-Deok Kim and Seungjin Choi,
       "Non-negative tucker decomposition",
       IEEE Conference on Computer Vision and Pattern Recognition s(CVPR),
       pp 1-8, 2007
    """
    rank = validate_tucker_rank(tl.shape(tensor), rank=rank)

    epsilon = 10e-12

    # Initialisation
    if init == 'svd':
        core, factors = tucker(tensor, rank)
        nn_factors = [tl.abs(f) for f in factors]
        nn_core = tl.abs(core)
    else:
        rng = tl.check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank) + 0.01,
                         **tl.context(tensor))  # Check this
        factors = [
            tl.tensor(rng.random_sample(s), **tl.context(tensor))
            for s in zip(tl.shape(tensor), rank)
        ]
        nn_factors = [tl.abs(f) for f in factors]
        nn_core = tl.abs(core)

    norm_tensor = tl.norm(tensor, 2)
    rec_errors = []

    for iteration in range(n_iter_max):
        for mode in range(tl.ndim(tensor)):
            B = tucker_to_tensor((nn_core, nn_factors), skip_factor=mode)
            B = tl.transpose(unfold(B, mode))

            numerator = tl.dot(unfold(tensor, mode), B)
            numerator = tl.clip(numerator, a_min=epsilon, a_max=None)
            denominator = tl.dot(nn_factors[mode], tl.dot(tl.transpose(B), B))
            denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
            nn_factors[mode] *= numerator / denominator

        numerator = tucker_to_tensor((tensor, nn_factors),
                                     transpose_factors=True)
        numerator = tl.clip(numerator, a_min=epsilon, a_max=None)
        for i, f in enumerate(nn_factors):
            if i:
                denominator = mode_dot(denominator, tl.dot(tl.transpose(f), f),
                                       i)
            else:
                denominator = mode_dot(nn_core, tl.dot(tl.transpose(f), f), i)
        denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
        nn_core *= numerator / denominator

        rec_error = tl.norm(tensor - tucker_to_tensor(
            (nn_core, nn_factors)), 2) / norm_tensor
        rec_errors.append(rec_error)
        if iteration > 1 and verbose:
            print('reconstruction error={}, variation={}.'.format(
                rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

        if iteration > 1 and abs(rec_errors[-2] - rec_errors[-1]) < tol:
            if verbose:
                print('converged in {} iterations.'.format(iteration))
            break
        if normalize_factors:
            nn_core, nn_factors = tucker_normalize((nn_core, nn_factors))
    tensor = TuckerTensor((nn_core, nn_factors))
    if return_errors:
        return tensor, rec_errors
    else:
        return tensor
Пример #29
0
def test_randomized_range_finder():
    size = (7, 5)
    A = T.randn(size)
    Q = T.randomized_range_finder(A, n_dims=min(size))
    assert_array_almost_equal(A, tl.dot(tl.dot(Q, tl.transpose(T.conj(Q))), A))
Пример #30
0
def non_negative_tucker_hals(tensor,
                             rank,
                             n_iter_max=100,
                             init="svd",
                             svd='numpy_svd',
                             tol=1e-8,
                             sparsity_coefficients=None,
                             core_sparsity_coefficient=None,
                             fixed_modes=None,
                             random_state=None,
                             verbose=False,
                             normalize_factors=False,
                             return_errors=False,
                             exact=False,
                             algorithm='fista'):
    """
    Non-negative Tucker decomposition

    Uses HALS to update each factor columnwise and uses
    fista or active set algorithm to update the core, see [1]_ 
    
    Parameters
    ----------
    tensor : ndarray
    rank : None, int or int list
        size of the core tensor, ``(len(ranks) == tensor.ndim)``
        if int, the same rank is used for all modes
    n_iter_max : int
            maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
        Default: 1e-8
    sparsity_coefficients : array of float (as much as the number of modes)
        The sparsity coefficients are used for each factor
        If set to None, the algorithm is computed without sparsity
        Default: None
    core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
        when it is updated with fista.
        Default: None
    fixed_modes : array of integers (between 0 and the number of modes)
        Has to be set not to update a factor, 0 and 1 for U and V respectively
        Default: None
    verbose : boolean
        Indicates whether the algorithm prints the successive
        reconstruction errors or not
        Default: False
    normalize_factors : if True, aggregates the core which will contain the norms of the factors.
    return_errors : boolean
        Indicates whether the algorithm should return all reconstruction errors
        and computation time of each iteration or not
        Default: False
    exact : If it is True, the HALS nnls subroutines give results with high precision but it needs high computational cost. 
        If it is False, the algorithm gives an approximate solution.
        Default: False
    algorithm : {'fista', 'active_set'}
        Non negative least square solution to update the core. 
        Default: 'fista'
    Returns
    -------
    factors : ndarray list
            list of positive factors of the CP decomposition
            element `i` is of shape ``(tensor.shape[i], rank)``
    errors: list
        A list of reconstruction errors at each iteration of the algorithm.

    Notes
    -----
    Tucker decomposes a tensor into a core tensor and list of factors:
    .. math::
        \\begin{equation}
            tensor = [| core; factors[0], ... ,factors[-1] |]
        \\end{equation}

    We solve the following problem for each factor:
    .. math::
        \\begin{equation}
            \\min_{tensor >= 0} ||tensor_[i] - factors[i]\\times core_[i] \\times (\\prod_{i\\neq j}(factors[j]))^T||^2
        \\end{equation}

    If we define two variables such as:
    .. math::
            U = core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
            M = tensor_[i]

    Gradient of the problem becomes:
    .. math::
        \\begin{equation}
            \\delta = -U^TM + factors[i] \\times U^TU
        \\end{equation}

    In order to calculate UTU and UTM, we define two variables:
    .. math::
        \\begin{equation}
            core_cross = \prod_{i\\neq j}(core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
            tensor_cross =  \prod_{i\\neq j} tensor_[i] \\times factors_[i]
        \\end{equation}
    Then UTU and UTM becomes:
    .. math::
        \\begin{equation}
            UTU = core_cross_[j] \\times core_[j]^T  \\
            UTM =  (tensor_cross_[j] \\times \\times core_[j]^T)^T
        \\end{equation}
    References
    ----------
    .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """
    rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
    n_modes = tl.ndim(tensor)
    if sparsity_coefficients is None or not isinstance(sparsity_coefficients,
                                                       Iterable):
        sparsity_coefficients = [sparsity_coefficients] * n_modes

    if fixed_modes is None:
        fixed_modes = []

    # Avoiding errors
    for fixed_value in fixed_modes:
        sparsity_coefficients[fixed_value] = None
    # Generating the mode update sequence
    modes = [
        mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes
    ]

    nn_core, nn_factors = initialize_tucker(tensor,
                                            rank,
                                            modes,
                                            init=init,
                                            svd=svd,
                                            random_state=random_state,
                                            non_negative=True)
    # initialisation - declare local variables
    norm_tensor = tl.norm(tensor, 2)
    rec_errors = []

    # Iterate over one step of NTD
    for iteration in range(n_iter_max):
        # One pass of least squares on each updated mode
        for mode in modes:

            # Computing Hadamard of cross-products
            pseudo_inverse = nn_factors.copy()
            for i, factor in enumerate(nn_factors):
                if i != mode:
                    pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)),
                                               factor)
            # UtU
            core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
            UtU = tl.dot(unfold(core_cross, mode),
                         tl.transpose(unfold(nn_core, mode)))

            # UtM
            tensor_cross = multi_mode_dot(tensor,
                                          nn_factors,
                                          skip=mode,
                                          transpose=True)
            MtU = tl.dot(unfold(tensor_cross, mode),
                         tl.transpose(unfold(nn_core, mode)))
            UtM = tl.transpose(MtU)

            # Call the hals resolution with nnls, optimizing the current mode
            nn_factor, _, _, _ = hals_nnls(
                UtM,
                UtU,
                tl.transpose(nn_factors[mode]),
                n_iter_max=100,
                sparsity_coefficient=sparsity_coefficients[mode],
                exact=exact)
            nn_factors[mode] = tl.transpose(nn_factor)
        # updating core
        if algorithm == 'fista':
            pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]),
                                        nn_factors[-1])
            core_estimation = multi_mode_dot(tensor,
                                             nn_factors,
                                             transpose=True)
            learning_rate = 1

            for MtM in pseudo_inverse:
                learning_rate *= 1 / (tl.partial_svd(MtM)[1][0])
            nn_core = fista(
                core_estimation,
                pseudo_inverse,
                x=nn_core,
                n_iter_max=n_iter_max,
                sparsity_coef=core_sparsity_coefficient,
                lr=learning_rate,
            )
        if algorithm == 'active_set':
            pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]),
                                        nn_factors[-1])
            core_estimation_vec = tl.base.tensor_to_vec(
                tl.tenalg.mode_dot(tensor_cross,
                                   tl.transpose(nn_factors[modes[-1]]),
                                   modes[-1]))
            pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
            vectorcore = active_set_nnls(core_estimation_vec,
                                         pseudo_inverse_kr,
                                         x=nn_core,
                                         n_iter_max=n_iter_max)
            nn_core = tl.reshape(vectorcore, tl.shape(nn_core))

        # Adding the l1 norm value to the reconstruction error
        sparsity_error = 0
        for index, sparse in enumerate(sparsity_coefficients):
            if sparse:
                sparsity_error += 2 * (sparse *
                                       tl.norm(nn_factors[index], order=1))
        # error computation
        rec_error = tl.norm(tensor - tucker_to_tensor(
            (nn_core, nn_factors)), 2) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconstruction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break
        if normalize_factors:
            nn_core, nn_factors = tucker_normalize((nn_core, nn_factors))
    tensor = TuckerTensor((nn_core, nn_factors))
    if return_errors:
        return tensor, rec_errors
    else:
        return tensor