def test_gcp_continuous_loss_functions():
    cont_losses = ['normal', 'gaussian']
    opts = ['lbfgsb', 'sgd']
    rng = tl.check_random_state(1234)
    shp = (4, 5, 6)
    rank = 4
    #tensor = generate_test_tensor('normal', shp)
    size = 1
    for i in shp:
        size *= i
    data1 = rng.random(size)
    tensor = tl.tensor(data1.reshape(shp, order='F'), dtype=tl.float64)
    ## CHECK CONTINUOUS DATA-CENTRIC LOSS
    print("\n***************************************************")
    print("\t Testing continuous data")
    for loss in cont_losses:
        print("***************************************************\n")
        print("Loss function type: {}".format(loss))
        for opt in opts:
            mTen = gcp(tensor, rank, type=loss, opt=opt, maxiters=1000, epciters=100)
            assert (mTen is not None), "gcp({}}) returned null".format(opt)
            mTen = tl.cp_to_tensor(mTen)
            assert (tensor.size == mTen.size), "Unequal number of tensor elements. \
                                        Tensor: {} CPTensor: {}".format(tensor.size, tl.cp_to_tensor(mTen).size)
            score = 1 - (tl.norm(tensor - mTen) / tl.norm(tensor))
            print("Score: {0:0.4f}\n".format(score))
Esempio n. 2
0
def test_parafac_power_iteration(monkeypatch):
    """Test for symmetric Parafac optimized with robust tensor power iterations"""
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    shape = (5, 3, 4)
    rank = 4
    tensor = random_cp(shape, rank=rank, full=True, random_state=rng)
    ktensor = parafac_power_iteration(tensor,
                                      rank=10,
                                      n_repeat=10,
                                      n_iteration=10)

    rec = tl.cp_to_tensor(ktensor)
    error = tl.norm(rec - tensor, 2) / tl.norm(tensor, 2)
    assert_(
        error < tol_norm_2,
        f'Norm 2 of reconstruction error={error} higher than tol={tol_norm_2}.'
    )
    error = tl.max(tl.abs(rec - tensor))
    assert_(
        error < tol_max_abs,
        f'Absolute norm of reconstruction error={error} higher than tol={tol_max_abs}.'
    )

    assert_class_wrapper_correctly_passes_arguments(monkeypatch,
                                                    parafac_power_iteration,
                                                    CPPower,
                                                    ignore_args={},
                                                    rank=3)
Esempio n. 3
0
def test_sum():
    rng = tl.check_random_state(0)
    tensor = tl.tensor(rng.random_sample((5, 6, 7)))
    all_kwargs = [{}, {
        'axis': 1
    }, {
        'axis': 1,
        'keepdims': True
    }, {
        'axis': 1,
        'keepdims': False
    }, {
        'keepdims': True
    }, {
        'keepdims': False
    }, {
        'axis': None,
        'keepdims': True
    }, {
        'axis': (0, 2),
        'keepdims': True
    }, {
        'axis': (0, 2),
        'keepdims': False
    }, {
        'axis': (0, 2)
    }]
    for kwargs in all_kwargs:
        np.testing.assert_allclose(
            tl.to_numpy(tl.sum(tensor, **kwargs)),
            np.sum(tl.to_numpy(tensor), **kwargs),
            rtol=1e-5,  # Single precision
            err_msg=f"Sum not same as numpy with kwargs: {kwargs}")
Esempio n. 4
0
def test_parafac_linesearch():
    """ Test that we more rapidly converge to a solution with line search. """
    rng = tl.check_random_state(1234)
    eps = 10e-2
    tensor = T.tensor(rng.random_sample((5, 5, 5)))
    kt = parafac(tensor,
                 rank=5,
                 init='random',
                 random_state=1234,
                 n_iter_max=10,
                 tol=10e-9)
    rec = tl.cp_to_tensor(kt)
    kt_ls = parafac(tensor,
                    rank=5,
                    init='random',
                    random_state=1234,
                    n_iter_max=10,
                    tol=10e-9,
                    linesearch=True)
    rec_ls = tl.cp_to_tensor(kt_ls)

    rec_error = T.norm(tensor - rec) / T.norm(tensor)
    rec_error_ls = T.norm(tensor - rec_ls) / T.norm(tensor)
    assert_(
        rec_error_ls - rec_error < eps,
        f'Relative reconstruction error with line-search={rec_error_ls} VS {rec_error} without.'
        'CP with line-search seems to have converged more slowly.')
Esempio n. 5
0
def test_validate_tr_tensor():
    rng = tl.check_random_state(12345)
    true_shape = (6, 4, 5)
    true_rank = (3, 2, 2, 3)
    factors = random_tr(true_shape, rank=true_rank).factors

    # Check that the correct shape/rank are returned
    shape, rank = _validate_tr_tensor(factors)
    assert_equal(
        shape,
        true_shape,
        err_msg='Returned incorrect shape (got {}, expected {})'.format(
            shape, true_shape))
    assert_equal(
        rank,
        true_rank,
        err_msg='Returned incorrect rank (got {}, expected {})'.format(
            rank, true_rank))

    # One of the factors has the wrong ndim
    factors[0] = tl.tensor(rng.random_sample((4, 4)))
    with assert_raises(ValueError):
        _validate_tr_tensor(factors)

    # Consecutive factors ranks don't match
    factors[0] = tl.tensor(rng.random_sample((3, 6, 4)))
    with assert_raises(ValueError):
        _validate_tr_tensor(factors)

    # Boundary conditions not respected
    factors[0] = tl.tensor(rng.random_sample((2, 6, 2)))
    with assert_raises(ValueError):
        _validate_tr_tensor(factors)
Esempio n. 6
0
def test_randomised_parafac():
    """ Test for randomised_parafac
    """
    rng = tl.check_random_state(1234)
    t_shape = (10, 10, 10)
    n_samples = 8
    tensor = T.tensor(rng.random_sample(t_shape))
    rank = 4
    _, factors_svd = randomised_parafac(tensor,
                                        rank,
                                        n_samples,
                                        n_iter_max=1000,
                                        init='svd',
                                        tol=10e-5,
                                        verbose=True)
    for i, f in enumerate(factors_svd):
        assert_(
            T.shape(f) == (t_shape[i], rank), 'Factors are of incorrect size')

    # test tensor reconstructed properly
    tolerance = 0.05
    tensor = random_cp(shape=(10, 10, 10), rank=4, full=True)
    cp_tensor = randomised_parafac(tensor,
                                   rank=5,
                                   n_samples=100,
                                   max_stagnation=20,
                                   n_iter_max=100,
                                   tol=0,
                                   verbose=0)
    reconstruction = cp_to_tensor(cp_tensor)
    error = float(T.norm(reconstruction - tensor, 2) / T.norm(tensor, 2))
    assert_(error < tolerance,
            msg='reconstruction of {} (higher than tolerance of {})'.format(
                error, tolerance))
Esempio n. 7
0
def test_parafac2_normalize_factors():
    rng = tl.check_random_state(1234)
    rank = 2  # Rank 2 so we only need to test rank of minimum and maximum

    random_parafac2_tensor = random_parafac2(
        shapes=[(15 + rng.randint(5), 30) for _ in range(25)],
        rank=rank,
        random_state=rng,
    )
    random_parafac2_tensor.factors[0] = random_parafac2_tensor.factors[0] + 0.1
    norms = tl.ones(rank)
    for factor in random_parafac2_tensor.factors:
        norms = norms * tl.norm(factor, axis=0)

    slices = parafac2_to_tensor(random_parafac2_tensor)

    unnormalized_rec = parafac2(slices,
                                rank,
                                random_state=rng,
                                normalize_factors=False,
                                n_iter_max=100)
    assert unnormalized_rec.weights[0] == 1

    normalized_rec = parafac2(slices,
                              rank,
                              random_state=rng,
                              normalize_factors=True,
                              n_iter_max=1000)
    assert tl.max(tl.abs(T.norm(normalized_rec.factors[0], axis=0) - 1)) < 1e-5
    assert abs(tl.max(norms) -
               tl.max(normalized_rec.weights)) / tl.max(norms) < 1e-2
    assert abs(tl.min(norms) -
               tl.min(normalized_rec.weights)) / tl.min(norms) < 1e-2
Esempio n. 8
0
def test_partial_tucker():
    """Test for the Partial Tucker decomposition"""
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-3
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)))
    modes = [1, 2]
    core, factors = partial_tucker(tensor, modes, rank=None, n_iter_max=200, verbose=True)
    reconstructed_tensor = multi_mode_dot(core, factors, modes=modes)
    norm_rec = tl.norm(reconstructed_tensor, 2)
    norm_tensor = tl.norm(tensor, 2)
    assert_((norm_rec - norm_tensor)/norm_rec < tol_norm_2)

    # Test the max abs difference between the reconstruction and the tensor
    assert_(tl.max(tl.abs(norm_rec - norm_tensor)) < tol_max_abs)

    # Test the shape of the core and factors
    ranks = [3, 1]
    core, factors = partial_tucker(tensor, modes=modes, rank=ranks, n_iter_max=100, verbose=1)
    for i, rank in enumerate(ranks):
        assert_equal(factors[i].shape, (tensor.shape[i+1], ranks[i]),
                     err_msg="factors[{}].shape={}, expected {}".format(
                         i, factors[i].shape, (tensor.shape[i+1], ranks[i])))
    assert_equal(core.shape, [tensor.shape[0]]+ranks, err_msg="Core.shape={}, "
                     "expected {}".format(core.shape, [tensor.shape[0]]+ranks))

    # Test random_state fixes the core and the factor matrices
    core1, factors1 = partial_tucker(tensor, modes=modes, rank=ranks, random_state=0)
    core2, factors2 = partial_tucker(tensor, modes=modes, rank=ranks, random_state=0)
    assert_array_equal(core1, core2)
    for factor1, factor2 in zip(factors1, factors2):
        assert_array_equal(factor1, factor2)
Esempio n. 9
0
def test_clip():
    # Test that clip can work with single arguments
    X = T.tensor([0.0, -1.0, 1.0])
    X_low = T.tensor([0.0, 0.0, 1.0])
    X_high = T.tensor([0.0, -1.0, 0.0])
    assert_array_equal(tl.clip(X, a_min=0.0), X_low)
    assert_array_equal(tl.clip(X, a_max=0.0), X_high)

    # More extensive test with a larger random tensor
    rng = tl.check_random_state(0)
    tensor = tl.tensor(rng.random_sample((10, 10, 10)).astype('float32'))

    val1 = np.float32(rng.random_sample())
    val2 = np.float32(rng.random_sample())
    limits = [(min(val1, val2), max(val1, val2)), (-1, 2),
              (tl.max(tensor) + 1, None), (None, tl.min(tensor) - 1),
              (tl.max(tensor), None), (tl.min(tensor), None),
              (None, tl.max(tensor)), (None, tl.min(tensor))]

    for min_val, max_val in limits:
        message = f"Tensor clipped incorrectly with min_val={min_val} and max_val={max_val}. Tensor bounds are ({tl.to_numpy(tl.min(tensor))}, {tl.to_numpy(tl.max(tensor))}"
        if min_val is not None:
            assert tl.all(tl.clip(tensor, min_val, None) >= min_val), message
            assert tl.all(
                tl.clip(tensor, min_val, max_val) >= min_val), message
        if max_val is not None:
            assert tl.all(tl.clip(tensor, None, max_val) <= max_val), message
            assert tl.all(
                tl.clip(tensor, min_val, max_val) <= max_val), message
Esempio n. 10
0
def test_sample_khatri_rao():
    """ Test for sample_khatri_rao
    """

    rng = tl.check_random_state(1234)
    t_shape = (8, 9, 10)
    rank = 3
    tensor = T.tensor(rng.random_sample(t_shape) + 1)
    weights, factors = parafac(tensor, rank=rank, n_iter_max=120)
    num_samples = 4
    skip_matrix = 1
    sampled_kr, sampled_indices, sampled_rows = sample_khatri_rao(
        factors,
        num_samples,
        skip_matrix=skip_matrix,
        return_sampled_rows=True)
    assert_(
        T.shape(sampled_kr) == (num_samples, rank),
        'Sampled shape of khatri-rao product is inconsistent')
    assert_(
        np.max(sampled_rows) < (t_shape[0] * t_shape[2]),
        'Largest sampled row index is bigger than number of columns of'
        'unfolded matrix')
    assert_(
        np.min(sampled_rows) >= 0,
        'Smallest sampled row index index is smaller than 0')
    true_kr = khatri_rao(factors, skip_matrix=skip_matrix)
    for ix, j in enumerate(sampled_rows):
        assert_array_equal(
            true_kr[j],
            sampled_kr[int(ix)],
            err_msg='Sampled khatri_rao product doesnt correspond to product')
Esempio n. 11
0
def test_symmetric_parafac_power_iteration(monkeypatch):
    """Test for symmetric Parafac optimized with robust tensor power iterations"""
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    size = 5
    rank = 4
    true_factor = tl.tensor(rng.random_sample((size, rank)))
    true_weights = tl.ones(rank)
    tensor = tl.cp_to_tensor((true_weights, [true_factor] * 3))
    weights, factor = symmetric_parafac_power_iteration(tensor,
                                                        rank=10,
                                                        n_repeat=10,
                                                        n_iteration=10)

    rec = tl.cp_to_tensor((weights, [factor] * 3))
    error = tl.norm(rec - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol_norm_2, 'norm 2 of reconstruction higher than tol')
    # Test the max abs difference between the reconstruction and the tensor
    assert_(
        tl.max(tl.abs(rec - tensor)) < tol_max_abs,
        'abs norm of reconstruction error higher than tol')
    assert_class_wrapper_correctly_passes_arguments(
        monkeypatch,
        symmetric_parafac_power_iteration,
        SymmetricCP,
        ignore_args={},
        rank=3)
Esempio n. 12
0
def test_validate_cp_tensor():
    rng = tl.check_random_state(12345)
    true_shape = (3, 4, 5)
    true_rank = 3
    cp_tensor = random_cp(true_shape, true_rank)
    (weights, factors) = cp_normalize(cp_tensor)

    # Check correct rank and shapes are returned
    shape, rank = _validate_cp_tensor((weights, factors))
    assert_equal(
        shape,
        true_shape,
        err_msg='Returned incorrect shape (got {}, expected {})'.format(
            shape, true_shape))
    assert_equal(
        rank,
        true_rank,
        err_msg='Returned incorrect rank (got {}, expected {})'.format(
            rank, true_rank))

    # One of the factors has the wrong rank
    factors[0], copy = tl.tensor(rng.random_sample((4, 4))), factors[0]
    with assert_raises(ValueError):
        _validate_cp_tensor((weights, factors))

    # Not the correct amount of weights
    factors[0] = copy
    wrong_weights = weights[1:]
    with assert_raises(ValueError):
        _validate_cp_tensor((wrong_weights, factors))

    # Not enough factors
    with assert_raises(ValueError):
        _validate_cp_tensor((weights[:1], factors[:1]))
Esempio n. 13
0
def test_parafac2_to_tensor():
    rng = tl.check_random_state(1234)
    rank = 3

    I = 25
    J = 15
    K = 30

    weights, factors, projections = random_parafac2(shapes=[(J, K)] * I,
                                                    rank=rank,
                                                    random_state=rng)

    constructed_tensor = parafac2_to_tensor((weights, factors, projections))
    tensor_manual = T.zeros((I, J, K), **T.context(weights))

    for i in range(I):
        Bi = T.dot(projections[i], factors[1])
        for j in range(J):
            for k in range(K):
                for r in range(rank):
                    tensor_manual = tl.index_update(
                        tensor_manual, tl.index[i, j,
                                                k], tensor_manual[i, j, k] +
                        factors[0][i][r] * Bi[j][r] * factors[2][k][r])

    assert_(tl.max(tl.abs(constructed_tensor - tensor_manual)) < 1e-6)
Esempio n. 14
0
def test_parafac2_init_error():
    rng = tl.check_random_state(1234)
    rank = 3

    random_parafac2_tensor = random_parafac2(shapes=[(15, 30)] * 25,
                                             rank=rank,
                                             random_state=rng)
    tensor = parafac2_to_tensor(random_parafac2_tensor)

    with np.testing.assert_raises(ValueError):
        _ = initialize_decomposition(tensor, rank, init='bogus init type')

    with np.testing.assert_raises(ValueError):
        _ = initialize_decomposition(tensor,
                                     rank,
                                     init=('another', 'bogus', 'init', 'type'))

    rank = 4
    random_parafac2_tensor = random_parafac2(shapes=[(15, 3)] * 25,
                                             rank=rank,
                                             random_state=rng)
    tensor = parafac2_to_tensor(random_parafac2_tensor)

    with pytest.raises(Exception):
        _ = initialize_decomposition(tensor, rank, init='svd')
def test_gcp_1():
    """ Test for generalized CP"""

    ## Test 1 - shapes and dimensions

    # Create tensor with random elements
    rng = tl.check_random_state(1234)
    d = 3
    n = 4
    shape = (40, 50, 60)
    tensor = tl.tensor(rng.random(shape), dtype=tl.float32)
    # tensor = (np.arange(n**d, dtype=float).reshape((n,)*d))
    # tensor = tl.tensor(tensor)  # a 4 x 4 x 4 tensor

    tensor_shape = tensor.shape

    # Find gcp decomposition of the tensor
    rank = 20
    mTen = gcp(tensor, rank, type='normal', state=rng, maxiters=1e5)
    print(mTen)
    assert(mTen is not None), "gcp returned null"
    assert(len(mTen[1]) == d), "Number of factors should be 3, currently has " + str(len(mTen[1]))

    # Check each factor matrices has the correct number of columns
    for k in range(d):
        rows, columns = tl.shape(mTen[1][k])
        assert(columns == rank), "Factor matrix {} needs {} columns, but only has {}".format(i+1, rank, columns)

    # Check CPTensor has same number of elements as tensor
    mTen = tl.cp_to_tensor(mTen)
    assert(tensor.size == mTen.size), "Unequal number of tensor elements. Tensor: {} CPTensor: {}".format(tensor.size,tl.cp_to_tensor(mTen).size)
    score = 1 - (tl.norm(tensor - mTen)/tl.norm(tensor))
    print("Score: {}".format(score))
Esempio n. 16
0
def test_masked_tucker():
    """Test for the masked Tucker decomposition.
    This checks that a mask of 1's is identical to the unmasked case.
    """
    rng = tl.check_random_state(1234)
    tensor = tl.tensor(rng.random_sample((3, 3, 3)))
    mask = tl.tensor(np.ones((3, 3, 3)))

    mask_fact = tucker(tensor, rank=(2, 2, 2), mask=mask)
    fact = tucker(tensor, rank=(2, 2, 2))
    diff = tucker_to_tensor(mask_fact) - tucker_to_tensor(fact)
    assert_(tl.norm(diff) < 0.001, 'norm 2 of reconstruction higher than 0.001')

    # Mask an outlier value, and check that the decomposition ignores it
    tensor = random_tucker((5, 5, 5), (1, 1, 1), full=True, random_state=1234)
    mask = tl.tensor(np.ones((5, 5, 5)))

    mask_tensor = tl.tensor(tensor)
    mask_tensor = tl.index_update(mask_tensor, tl.index[0, 0, 0], 1.0)
    mask = tl.index_update(mask, tl.index[0, 0, 0], 0)

    # We won't use the SVD decomposition, but check that it at least runs successfully
    mask_fact = tucker(mask_tensor, rank=(1, 1, 1), mask=mask, init="svd")
    mask_fact = tucker(mask_tensor, rank=(1, 1, 1), mask=mask, init="random", random_state=1234)
    mask_err = tl.norm(tucker_to_tensor(mask_fact) - tensor)

    assert_(mask_err < 0.001, 'norm 2 of reconstruction higher than 0.001')
Esempio n. 17
0
def initialize_tucker(tensor, rank, modes, random_state, init='svd', svd='numpy_svd', non_negative= False):
    """
    Initialize core and factors used in `tucker`.
    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.
    
    Parameters
    ----------
    tensor : ndarray
    rank : int
           number of components
    modes : int list
    random_state : {None, int, np.random.RandomState}
    init : {'svd', 'random', cptensor}, optional
    svd : str, default is 'numpy_svd'
          function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned
    
    Returns
    -------
    core    : ndarray
              initialized core tensor 
    factors : list of factors
    """
    try:
        svd_fun = tl.SVD_FUNS[svd]
    except KeyError:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)
    # Initialisation
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            U, S, V = svd_fun(unfold(tensor, mode), n_eigenvecs=rank[index], random_state=random_state)   
            
            if non_negative is True: 
                U = make_svd_non_negative(tensor, U, S, V, nntype="nndsvd")
            
            factors.append(U[:, :rank[index]])        
        # The initial core approximation is needed here for the masking step
        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)        
        if non_negative is True:
            core = tl.abs(core) 
            
    elif init == 'random':
        rng = tl.check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank) + 0.01, **tl.context(tensor))  # Check this
        factors = [tl.tensor(rng.random_sample(s), **tl.context(tensor)) for s in zip(tl.shape(tensor), rank)]
        if non_negative is True:
            factors = [tl.abs(f) for f in factors]
            core = tl.abs(core) 
    else:
        (core, factors) = init
 
    return core, factors
def test_tensor_train():
    """ Test for tensor_train """
    rng = tl.check_random_state(1234)

    ## Test 1

    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
    tensor_shape = tensor.shape

    # Find TT decomposition of the tensor
    rank = [1, 3, 3, 4, 2, 2, 1]
    factors = tensor_train(tensor, rank)

    assert (
        len(factors) == 6
    ), "Number of factors should be 6, currently has " + str(len(factors))

    # Check that the ranks are correct and that the second mode of each factor
    # has the correct number of elements
    r_prev_iteration = 1
    for k in range(6):
        (r_prev_k, n_k, r_k) = factors[k].shape
        assert (tensor_shape[k] == n_k
                ), "Mode 1 of factor " + str(k) + "needs " + str(
                    tensor_shape[k]) + " dimensions, currently has " + str(n_k)
        assert (r_prev_k == r_prev_iteration), " Incorrect ranks of factors "
        r_prev_iteration = r_k

    ## Test 2
    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
    tensor_shape = tensor.shape

    # Find TT decomposition of the tensor
    rank = [1, 5, 4, 3, 8, 10, 1]
    factors = tensor_train(tensor, rank)

    for k in range(6):
        (r_prev, n_k, r_k) = factors[k].shape

        first_error_message = "TT rank " + str(
            k) + " is greater than the maximum allowed "
        first_error_message += str(r_prev) + " > " + str(rank[k])
        assert (r_prev <= rank[k]), first_error_message

        first_error_message = "TT rank " + str(
            k + 1) + " is greater than the maximum allowed "
        first_error_message += str(r_k) + " > " + str(rank[k + 1])
        assert (r_k <= rank[k + 1]), first_error_message

    ## Test 3
    tol = 10e-5
    tensor = tl.tensor(rng.random_sample([3, 3, 3]))
    factors = tensor_train(tensor, (1, 3, 3, 1))
    reconstructed_tensor = tl.tt_to_tensor(factors)
    error = tl.norm(reconstructed_tensor - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol, 'norm 2 of reconstruction higher than tol')
Esempio n. 19
0
def test_congruence_coefficient(I, R, absolute_value):
    rng = tl.check_random_state(1234)
    A = tl.tensor(rng.standard_normal((I, R)))
    B = tl.tensor(rng.standard_normal((I, R)))

    fast_congruence, fast_permutation = congruence_coefficient(A, B, absolute_value=absolute_value)
    slow_congruence, slow_permutation = _congruence_coefficient_slow(A, B, absolute_value=absolute_value)
    assert fast_congruence == pytest.approx(slow_congruence)
    if I != 1:
        assert fast_permutation == list(slow_permutation)
Esempio n. 20
0
def test_outer_product():
    """Test outer_dot"""
    rng = tl.check_random_state(1234)

    X = tl.tensor(rng.random_sample((4, 5, 6)))
    Y = tl.tensor(rng.random_sample((3, 4)))
    Z = tl.tensor(rng.random_sample((2)))
    tdot = outer([X, Y, Z])
    true_dot = tenalg.tensordot(X, Y, ())
    true_dot = tenalg.tensordot(true_dot, Z, ())
    testing.assert_array_almost_equal(tdot, true_dot)
def test_tensor_product():
    """Test tensor_dot"""
    rng = tl.check_random_state(1234)

    X = tl.tensor(rng.random_sample((4, 5, 6)))
    Y = tl.tensor(rng.random_sample((3, 4, 7)))
    tdot = tl.tensor_to_vec(tensor_dot(X, Y))
    true_dot = tl.tensor_to_vec(
        tenalg.outer([tl.tensor_to_vec(X),
                      tl.tensor_to_vec(Y)]))
    testing.assert_array_almost_equal(tdot, true_dot)
Esempio n. 22
0
def test_tensor_train_cross_3():
    """ Test for tensor-train """
    rng = tl.check_random_state(1234)

    ## Test 3
    tol = 10e-5
    tensor = tl.tensor(rng.random_sample([3, 3, 3]))
    factors = tensor_train_cross(tensor, (1, 3, 3, 1))
    reconstructed_tensor = tt_to_tensor(factors)
    error = tl.norm(reconstructed_tensor - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol, 'norm 2 of reconstruction higher than tol')
Esempio n. 23
0
def test_FactorizedLinear(factorization):
    random_state = 12345
    rng = tl.check_random_state(random_state)
    batch_size = 2
    in_features = 9
    in_shape = (3, 3)
    out_features = 16
    out_shape = (4, 4)
    data = tl.tensor(rng.random_sample((batch_size, in_features)))

    # Creat from a tensor factorization
    tensor = TensorizedMatrix.new(out_shape,
                                  in_shape,
                                  rank='same',
                                  factorization=factorization)
    tensor.normal_()
    fc = nn.Linear(in_features, out_features, bias=True)
    fc.weight.data = tensor.to_matrix()
    tfc = FactorizedLinear(in_shape,
                           out_shape,
                           rank='same',
                           factorization=tensor,
                           bias=True)
    tfc.bias.data = fc.bias
    res_fc = fc(data)
    res_tfc = tfc(data)
    testing.assert_array_almost_equal(res_fc, res_tfc, decimal=2)

    # Decompose an existing layer
    fc = nn.Linear(in_features, out_features, bias=True)
    tfc = FactorizedLinear.from_linear(fc, (3, 3), (4, 4), rank=34, bias=True)
    res_fc = fc(data)
    res_tfc = tfc(data)
    testing.assert_array_almost_equal(res_fc, res_tfc, decimal=2)

    # Multi-layer factorization
    fc1 = nn.Linear(in_features, out_features, bias=True)
    fc2 = nn.Linear(in_features, out_features, bias=True)
    tfc = FactorizedLinear.from_linear_list([fc1, fc2],
                                            in_shape,
                                            out_shape,
                                            rank=38,
                                            bias=True)
    ## Test first parametrized conv
    res_fc = fc1(data)
    res_tfc = tfc[0](data)
    testing.assert_array_almost_equal(res_fc, res_tfc, decimal=2)
    ## Test second parametrized conv
    res_fc = fc2(data)
    res_tfc = tfc[1](data)
    testing.assert_array_almost_equal(res_fc, res_tfc, decimal=2)
Esempio n. 24
0
def test_parafac2(normalize_factors, init):
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-2
    rank = 3

    random_parafac2_tensor = random_parafac2(shapes=[(15 + rng.randint(5), 30)
                                                     for _ in range(25)],
                                             rank=rank,
                                             random_state=rng)
    # It is difficult to correctly identify B[i, :, r] if A[i, r] is small.
    # This is sensible, since then B[i, :, r] contributes little to the total value of X.
    # To test the PARAFAC2 decomposition in the precence of roundoff errors, we therefore add
    # 0.01 to the A factor matrix.
    random_parafac2_tensor.factors[
        0] = random_parafac2_tensor.factors[0] + 0.01

    tensor = parafac2_to_tensor(random_parafac2_tensor)
    slices = parafac2_to_slices(random_parafac2_tensor)

    rec = parafac2(
        slices,
        rank,
        random_state=rng,
        init=init,
        n_iter_parafac=2,  # Otherwise, the SVD init will converge too quickly
        normalize_factors=normalize_factors)
    rec_tensor = parafac2_to_tensor(rec)

    error = T.norm(rec_tensor - tensor, 2)
    error /= T.norm(tensor, 2)
    assert_(error < tol_norm_2, 'norm 2 of reconstruction higher than tol')

    # Test factor correlation
    A_sign = T.sign(random_parafac2_tensor.factors[0])
    rec_A_sign = T.sign(rec.factors[0])
    A_corr = best_correlation(A_sign * random_parafac2_tensor.factors[0],
                              rec_A_sign * rec.factors[0])
    assert_(T.prod(A_corr) > 0.98**rank)

    C_corr = best_correlation(random_parafac2_tensor.factors[2],
                              rec.factors[2])
    assert_(T.prod(C_corr) > 0.98**rank)

    for i, (true_proj, rec_proj) in enumerate(
            zip(random_parafac2_tensor.projections, rec.projections)):
        true_Bi = T.dot(true_proj,
                        random_parafac2_tensor.factors[1]) * A_sign[i]
        rec_Bi = T.dot(rec_proj, rec.factors[1]) * rec_A_sign[i]
        Bi_corr = best_correlation(true_Bi, rec_Bi)
        assert_(T.prod(Bi_corr) > 0.98**rank)
Esempio n. 25
0
def test_non_negative_tucker(monkeypatch):
    """Test for non-negative Tucker"""
    rng = tl.check_random_state(1234)

    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)) + 1)
    core, factors = tucker(tensor, rank=[3, 4, 3], n_iter_max=200, verbose=1)
    nn_core, nn_factors = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=100)

    # Make sure all components are positive
    for factor in nn_factors:
        assert_(tl.all(factor >= 0))
    assert_(tl.all(nn_core >= 0))

    reconstructed_tensor = tucker_to_tensor((core, factors))
    nn_reconstructed_tensor = tucker_to_tensor((nn_core, nn_factors))
    error = tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 2)
    error /= tl.norm(reconstructed_tensor, 2)
    assert_(error < tol_norm_2,
            'norm 2 of reconstruction error higher than tol')

    # Test the max abs difference between the reconstruction and the tensor
    assert_(tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 'inf') < tol_max_abs,
              'abs norm of reconstruction error higher than tol')

    core_svd, factors_svd = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=500, init='svd', verbose=1)
    core_random, factors_random = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=200, init='random', random_state=1234)
    rec_svd = tucker_to_tensor((core_svd, factors_svd))
    rec_random = tucker_to_tensor((core_random, factors_random))
    error = tl.norm(rec_svd - rec_random, 2)
    error /= tl.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(tl.norm(rec_svd - rec_random, 'inf') < tol_max_abs,
            'abs norm of difference between svd and random init too high')

    # Test for a single rank passed
    # (should be used for all modes)
    rank = 3
    target_shape = (rank, )*tl.ndim(tensor)
    core, factors = non_negative_tucker(tensor, rank=rank)
    assert_(tl.shape(core) == target_shape, 'core has the wrong shape, got {}, but expected {}.'.format(tl.shape(core), target_shape))
    for i, f in enumerate(factors):
        expected_shape = (tl.shape(tensor)[i], rank)
        assert_(tl.shape(f) == expected_shape, '{}-th factor has the wrong shape, got {}, but expected {}.'.format(
                i, tl.shape(f), expected_shape))

    assert_class_wrapper_correctly_passes_arguments(monkeypatch, non_negative_tucker, Tucker_NN, ignore_args={'return_errors'}, rank=3)
Esempio n. 26
0
def test_parafac2_init_valid():
    rng = tl.check_random_state(1234)
    rank = 3

    random_parafac2_tensor = random_parafac2(shapes=[(15, 30)] * 25,
                                             rank=rank,
                                             random_state=rng)
    tensor = parafac2_to_tensor(random_parafac2_tensor)
    weights, (A, B, C), projections = random_parafac2_tensor
    B = T.dot(projections[0], B)

    for init_method in [
            'random', 'svd', random_parafac2_tensor, (weights, (A, B, C))
    ]:
        init = initialize_decomposition(tensor, rank, init=init_method)
        assert init.shape == random_parafac2_tensor.shape
Esempio n. 27
0
def test_tcl():
    random_state = 12345
    rng = tl.check_random_state(random_state)
    batch_size = 2
    in_shape = (4, 5, 6)
    out_shape = (2, 3, 5)
    data = tl.tensor(rng.random_sample((batch_size, ) + in_shape))

    expected_shape = (batch_size, ) + out_shape
    tcl = TCL(input_shape=in_shape, rank=out_shape, bias=False)
    res = tcl(data)
    testing.assert_(
        res.shape == expected_shape,
        msg=
        f'Wrong output size of TCL, expected {expected_shape} but got {res.shape}'
    )
Esempio n. 28
0
def test_tucker(monkeypatch):
    """Test for the Tucker decomposition"""
    rng = tl.check_random_state(1234)

    tol_norm_2 = 10e-3
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)))
    core, factors = tucker(tensor, rank=None, n_iter_max=200, verbose=True)
    reconstructed_tensor = tucker_to_tensor((core, factors))
    norm_rec = tl.norm(reconstructed_tensor, 2)
    norm_tensor = tl.norm(tensor, 2)
    assert((norm_rec - norm_tensor)/norm_rec < tol_norm_2)

    # Test the max abs difference between the reconstruction and the tensor
    assert(tl.max(tl.abs(reconstructed_tensor - tensor)) < tol_max_abs)

    # Test the shape of the core and factors
    ranks = [2, 3, 1]
    core, factors = tucker(tensor, rank=ranks, n_iter_max=100, verbose=1)
    for i, rank in enumerate(ranks):
        assert_equal(factors[i].shape, (tensor.shape[i], ranks[i]),
                     err_msg="factors[{}].shape={}, expected {}".format(
                         i, factors[i].shape, (tensor.shape[i], ranks[i])))
        assert_equal(tl.shape(core)[i], rank, err_msg="Core.shape[{}]={}, "
                     "expected {}".format(i, core.shape[i], rank))

    # try fixing the core
    factors_init = [tl.copy(f) for f in factors]
    _, factors = tucker(tensor, rank=ranks, init=(core, factors), fixed_factors=[1], n_iter_max=100, verbose=1)
    assert_array_equal(factors[1], factors_init[1])

    # Random and SVD init should converge to a similar solution
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    core_svd, factors_svd = tucker(tensor, rank=[3, 4, 3], n_iter_max=200, init='svd', verbose=1)
    core_random, factors_random = tucker(tensor, rank=[3, 4, 3], n_iter_max=200, init='random', random_state=1234)
    rec_svd = tucker_to_tensor((core_svd, factors_svd))
    rec_random = tucker_to_tensor((core_random, factors_random))
    error = tl.norm(rec_svd - rec_random, 2)
    error /= tl.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(tl.max(tl.abs(rec_svd - rec_random)) < tol_max_abs,
            'abs norm of difference between svd and random init too high')
    assert_class_wrapper_correctly_passes_arguments(monkeypatch, tucker, Tucker, ignore_args={}, rank=3)
def test_gcp_nnContinuous_loss_functions():
    cont_nn_losses = ['rayleigh', 'gamma']
    opts = ['lbfgsb', 'sgd']
    rng = tl.check_random_state(1234)
    shp = (4, 5, 6)
    rank = 4
    tensor = generate_test_tensor('rayleigh', shp)
    for loss in cont_nn_losses:
        print("***************************************************\n")
        print("Loss function type: {}".format(loss))
        for opt in opts:
            mTen = gcp(tensor, rank, type=loss, opt=opt, maxiters=1000, epciters=100)
            assert (mTen is not None), "gcp({}}) returned null".format(opt)
            mTen = tl.cp_to_tensor(mTen)
            assert (tensor.size == mTen.size), "Unequal number of tensor elements. \
                                                Tensor: {} CPTensor: {}".format(tensor.size, tl.cp_to_tensor(mTen).size)
            score = 1 - (tl.norm(tensor - mTen) / tl.norm(tensor))
            print("Score: {0:0.4f}\n".format(score))
Esempio n. 30
0
def test_pad_by_zeros():
    """Test that if we pad a tensor by zeros, then it doesn't change.

    This failed for TensorFlow at some point.
    """
    rng = tl.check_random_state(1234)
    rank = 3

    I = 25
    J = 15
    K = 30

    weights, factors, projections = random_parafac2(shapes=[(J, K)] * I,
                                                    rank=rank,
                                                    random_state=rng)
    constructed_tensor = parafac2_to_tensor((weights, factors, projections))
    padded_tensor = _pad_by_zeros(constructed_tensor)
    assert_(tl.max(tl.abs(constructed_tensor - padded_tensor)) < 1e-10)