Пример #1
0
def test_tensor_product():
    """Test tensor_dot"""
    rng = random.check_random_state(1234)

    X = tl.tensor(rng.random_sample((4, 5, 6)))
    Y = tl.tensor(rng.random_sample((3, 4, 7)))
    tdot = tl.tensor_to_vec(tensor_dot(X, Y))
    true_dot = tl.tensor_to_vec(
        tenalg.outer([tl.tensor_to_vec(X),
                      tl.tensor_to_vec(Y)]))
    testing.assert_array_almost_equal(tdot, true_dot)
Пример #2
0
def test_batched_tensor_product():
    """Test batched-tensor_dot

    Notes
    -----
    At the time of writing, MXNet doesn't support transpose 
    for tensors of order higher than 6
    """
    rng = random.check_random_state(1234)
    batch_size = 3

    X = tl.tensor(rng.random_sample((batch_size, 4, 5, 6)))
    Y = tl.tensor(rng.random_sample((batch_size, 3, 7)))
    tdot = tl.unfold(batched_tensor_dot(X, Y), 0)
    for i in range(batch_size):
        true_dot = tl.tensor_to_vec(
            tenalg.outer([tl.tensor_to_vec(X[i]),
                          tl.tensor_to_vec(Y[i])]))
        testing.assert_array_almost_equal(tdot[i], true_dot)
Пример #3
0
def power_iteration(tensor, n_repeat=10, n_iteration=10, verbose=False):
    """A single Robust Tensor Power Iteration

    Parameters
    ----------
    tensor : tl.tensor
        input tensor to decompose
    n_repeat : int, default is 10
        number of initializations to be tried
    n_iteration : int, default is 10
        number of power iterations
    verbose : bool
        level of verbosity

    Returns
    -------
    (eigenval, best_factor, deflated)

    eigenval : float
        the obtained eigenvalue
    best_factors : tl.tensor list
        the best estimated eigenvector, for each mode of the input tensor
    deflated : tl.tensor of same shape as `tensor`
        the deflated tensor (i.e. without the estimated component)
    """
    order = tl.ndim(tensor)

    # A list of candidates for each mode
    best_score = 0
    scores = []

    for _ in range(n_repeat):
        factors = [
            tl.tensor(np.random.random_sample(s), **tl.context(tensor))
            for s in tl.shape(tensor)
        ]

        for _ in range(n_iteration):
            for mode in range(order):
                factor = tl.tenalg.multi_mode_dot(tensor, factors, skip=mode)
                factor = factor / tl.norm(factor, 2)
                factors[mode] = factor

        score = tl.tenalg.multi_mode_dot(tensor, factors)
        scores.append(score)  #round(score, 2))

        if score > best_score:
            best_score = score
            best_factors = factors

    if verbose:
        print(f'Best score of {n_repeat}: {best_score}')

    # Refine the init
    for _ in range(n_iteration):
        for mode in range(order):
            factor = tl.tenalg.multi_mode_dot(tensor, best_factors, skip=mode)
            factor = factor / tl.norm(factor, 2)
            best_factors[mode] = factor

    eigenval = tl.tenalg.multi_mode_dot(tensor, best_factors)
    deflated = tensor - outer(best_factors) * eigenval

    if verbose:
        explained = tl.norm(deflated) / tl.norm(tensor)
        print(f'Eingenvalue: {eigenval}, explained: {explained}')

    return eigenval, best_factors, deflated
Пример #4
0
def symmetric_power_iteration(tensor,
                              n_repeat=10,
                              n_iteration=10,
                              verbose=False):
    """A single Robust Symmetric Tensor Power Iteration

    Parameters
    ----------
    tensor : tl.tensor
        input tensor to decompose, must be symmetric of shape (size, )*order
    n_repeat : int, default is 10
        number of initializations to be tried
    n_iterations : int, default is 10
        number of power iterations
    verbose : bool
        level of verbosity

    Returns
    -------
    (eigenval, best_factor, deflated)

    eigenval : float
        the obtained eigenvalue
    best_factor: tl.tensor
        the best estimated eigenvector
    deflated : tl.tensor of same shape as `tensor`
        the deflated tensor (i.e. without the estimated component)
    """
    order = tl.ndim(tensor)
    size = tl.shape(tensor)[0]

    if not tl.shape(tensor) == (size, ) * order:
        raise ValueError(
            'The input tensor does not have the same size along each mode.')

    # A list of candidates for each mode
    best_score = 0
    scores = []
    modes = list(range(1, order))

    for _ in range(n_repeat):
        factor = tl.tensor(np.random.random_sample(size))

        for _ in range(n_iteration):
            for _ in range(order):
                factor = tl.tenalg.multi_mode_dot(tensor,
                                                  [factor] * (order - 1),
                                                  modes=modes)
                factor = factor / tl.norm(factor, 2)

        score = tl.tenalg.multi_mode_dot(tensor, [factor] * order)
        scores.append(score)  #round(score, 2))

        if score > best_score:
            best_score = score
            best_factor = factor

    if verbose:
        print(f'Best score of {n_repeat}: {best_score}')

    # Refine the init
    for _ in range(n_iteration):
        for _ in range(order):
            best_factor = tl.tenalg.multi_mode_dot(tensor,
                                                   [best_factor] * (order - 1),
                                                   modes=modes)
            best_factor = best_factor / tl.norm(best_factor, 2)

    eigenval = tl.tenalg.multi_mode_dot(tensor, [best_factor] * order)
    deflated = tensor - outer([best_factor] * 3) * eigenval

    if verbose:
        explained = tl.norm(deflated) / tl.norm(tensor)
        print(f'Eingenvalue: {eigenval}, explained: {explained}')

    return eigenval, best_factor, deflated