Ejemplo n.º 1
0
def test_validate_kruskal_tensor():
    rng = check_random_state(12345)
    true_shape = (3, 4, 5)
    true_rank = 3
    kruskal_tensor = random_kruskal(true_shape, true_rank)
    (weights, factors) = kruskal_normalise(kruskal_tensor)

    # Check correct rank and shapes are returned
    shape, rank = _validate_kruskal_tensor((weights, factors))
    assert_equal(
        shape,
        true_shape,
        err_msg='Returned incorrect shape (got {}, expected {})'.format(
            shape, true_shape))
    assert_equal(
        rank,
        true_rank,
        err_msg='Returned incorrect rank (got {}, expected {})'.format(
            rank, true_rank))

    # One of the factors has the wrong rank
    factors[0], copy = tl.tensor(rng.random_sample((4, 4))), factors[0]
    with assert_raises(ValueError):
        _validate_kruskal_tensor((weights, factors))

    # Not the correct amount of weights
    factors[0] = copy
    wrong_weights = weights[1:]
    with assert_raises(ValueError):
        _validate_kruskal_tensor((wrong_weights, factors))

    # Not enought factors
    with assert_raises(ValueError):
        _validate_kruskal_tensor((weights[:1], factors[:1]))
Ejemplo n.º 2
0
def test_kruskal_norm():
    """Test for kruskal_norm
    """
    shape = (8, 5, 6, 4)
    rank = 25
    kruskal_tensor = random_kruskal(shape=shape,
                                    rank=rank,
                                    full=False,
                                    normalise_factors=True)
    tol = 10e-5
    rec = tl.kruskal_to_tensor(kruskal_tensor)
    true_res = tl.norm(rec, 2)
    res = kruskal_norm(kruskal_tensor)
    assert_(tl.to_numpy(tl.abs(true_res - res)) <= tol)
Ejemplo n.º 3
0
def test_kruskal_to_tensor_with_weights():
    A = tl.reshape(tl.arange(1, 5), (2, 2))
    B = tl.reshape(tl.arange(5, 9), (2, 2))
    weights = tl.tensor([2, -1])

    out = kruskal_to_tensor((weights, [A, B]))
    expected = tl.tensor([[-2, -2], [6, 10]])  # computed by hand
    assert_array_equal(out, expected)

    (weights, factors) = random_kruskal((5, 5, 5),
                                        rank=5,
                                        normalise_factors=True,
                                        full=False)
    true_res = tl.dot(tl.dot(factors[0], tl.diag(weights)),
                      tl.transpose(tl.tenalg.khatri_rao(factors[1:])))
    true_res = tl.fold(true_res, 0, (5, 5, 5))
    res = kruskal_to_tensor((weights, factors))
    assert_array_almost_equal(true_res,
                              res,
                              err_msg='weights incorrectly incorporated in '
                              'kruskal_to_tensor')
Ejemplo n.º 4
0
def test_unfolding_dot_khatri_rao():
    """Test for unfolding_dot_khatri_rao
    
    Check against other version check sparse safe
    """
    shape = (10, 10, 10, 4)
    rank = 6
    tensor = tl.tensor(np.random.random(shape))
    weights, factors = random_kruskal(shape=shape,
                                      rank=rank,
                                      full=False,
                                      normalise_factors=True)

    for mode in range(tl.ndim(tensor)):
        # Version forming explicitly the khatri-rao product
        unfolded = unfold(tensor, mode)
        kr_factors = khatri_rao(factors, weights=weights, skip_matrix=mode)
        true_res = tl.dot(unfolded, kr_factors)

        # Efficient sparse-safe version
        res = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
        assert_array_almost_equal(true_res, res, decimal=3)
Ejemplo n.º 5
0
def test_kruskal_mode_dot():
    """Test for kruskal_mode_dot
    
        We will compare kruskal_mode_dot 
        (which operates directly on decomposed tensors)
        with mode_dot (which operates on full tensors)
        and check that the results are the same.
    """
    rng = check_random_state(12345)
    shape = (5, 4, 6)
    rank = 3
    kruskal_ten = random_kruskal(shape, rank=rank, orthogonal=True, full=False)
    full_tensor = tl.kruskal_to_tensor(kruskal_ten)
    # matrix for mode 1
    matrix = tl.tensor(rng.random_sample((7, shape[1])))
    # vec for mode 2
    vec = tl.tensor(rng.random_sample(shape[2]))

    # Test kruskal_mode_dot with matrix
    res = kruskal_mode_dot(kruskal_ten, matrix, mode=1, copy=True)
    # Note that if copy=True is not respected, factors will be changes
    # And the next test will fail
    res = tl.kruskal_to_tensor(res)
    true_res = mode_dot(full_tensor, matrix, mode=1)
    assert_array_almost_equal(true_res, res)

    # Check that the data was indeed copied
    rec = tl.kruskal_to_tensor(kruskal_ten)
    assert_array_almost_equal(full_tensor, rec)

    # Test kruskal_mode_dot with vec
    res = kruskal_mode_dot(kruskal_ten, vec, mode=2, copy=True)
    res = tl.kruskal_to_tensor(res)
    true_res = mode_dot(full_tensor, vec, mode=2)
    assert_equal(res.shape, true_res.shape)
    assert_array_almost_equal(true_res, res)
Ejemplo n.º 6
0
Example on how to use :func:`tensorly.decomposition.parafac` with line search to accelerate convergence.
"""

from time import time
import numpy as np
import tensorly as tl
from tensorly.random import random_kruskal
from tensorly.decomposition import parafac
import matplotlib.pyplot as plt

tol = np.logspace(-1, -9)
err = np.empty_like(tol)
err_ls = np.empty_like(tol)
tt = np.empty_like(tol)
tt_ls = np.empty_like(tol)
tensor = random_kruskal((10, 10, 10), 3, random_state=1234, full=True)

# Get a high-accuracy decomposition for comparison
fac = parafac(tensor, rank=3, n_iter_max=2000000, tol=1.0e-15, linesearch=True)
err_min = tl.norm(tl.kruskal_to_tensor(fac) - tensor)

for ii, toll in enumerate(tol):
    # Run PARAFAC decomposition without line search and time
    start = time()
    fac = parafac(tensor, rank=3, n_iter_max=2000000, tol=toll)
    tt[ii] = time() - start
    # Run PARAFAC decomposition with line search and time
    start = time()
    fac_ls = parafac(tensor,
                     rank=3,
                     n_iter_max=2000000,
Ejemplo n.º 7
0
import matplotlib.pyplot as plt
import tensorly.kruskal_tensor as tl_kruskal
import tensorly.random as tl_rand
import numpy as np
from timeit import default_timer as timer

from BLOCK_SPG_CPD import bras_CPD
from BLOCK_SPG_CPD import ada_CPD

# Set up
for i in range(10):
    # Each entry of the factor matrix is uniformly sampled from (0,1) and kruskal_tensor is used to from X
    rank = 100
    F = tl_rand.random_kruskal((300, 300, 300),
                               rank,
                               full=False,
                               random_state=np.random.RandomState(seed=i))
    X = tl_kruskal.kruskal_to_tensor(F)

    # Hetero noise

    # H**o noise

    # Parameters
    B = 18
    eta = 1
    b = 10**-6
    eps = 0
    num_iterations = 0
    max_time = 600
Ejemplo n.º 8
0

# Set up
congruence = 0.9
lamb = 0.001
shape = (300,300,300)
nu = 2
rank = 20
num_iterations = 100000
eps = 1/num_iterations

# Sketching rates
sketching_rates = list(np.linspace(10**(-3), 10**(-1), 4)) + [1]

# Generate random latent factors
F = np.array(tl_rand.random_kruskal(shape=shape, rank=rank, full=False, random_state=np.random.RandomState(seed=0)))
X = tl_kruskal.kruskal_to_tensor(F)

# Generate ill conditioned factors
F_ill = generate_collinear_factors(shape, rank, congruence)
X_ill = tl_kruskal.kruskal_to_tensor(F_ill)

# Run experiment for sketching with weight update
sketching_rates = list(np.linspace(10**(-3), 10**(-1), 4)) + [1]
start = timer()
A,B,C, error, res_time = CPD_MWU(X, F, sketching_rates, lamb, eps, nu, rank, num_iterations)
end = timer()
# Print out total time
print("Total time", end-start)
print("CPD time", end-start-res_time)
print("Residual error time", res_time)