Exemple #1
0
def initialize_factors(tensor, rank, random_state=None, non_negative=False):
    """Initialize factors used in `parafac`.

    Factor matrices are initialized using `random_state`.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    random_state: int
        set to ensure reproducibility
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    factors = [
        tl.tensor(rng.random_sample((tensor.shape[i], rank)),
                  **tl.context(tensor)) for i in range(tl.ndim(tensor))
    ]
    if non_negative:
        return [tl.abs(f) for f in factors]
    else:
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
Exemple #2
0
def test_validate_cp_tensor():
    rng = check_random_state(12345)
    true_shape = (3, 4, 5)
    true_rank = 3
    cp_tensor = random_cp(true_shape, true_rank)
    (weights, factors) = cp_normalize(cp_tensor)
    
    # Check correct rank and shapes are returned
    shape, rank = _validate_cp_tensor((weights, factors))
    assert_equal(shape, true_shape,
                    err_msg='Returned incorrect shape (got {}, expected {})'.format(
                        shape, true_shape))
    assert_equal(rank, true_rank,
                    err_msg='Returned incorrect rank (got {}, expected {})'.format(
                        rank, true_rank))
    
    # One of the factors has the wrong rank
    factors[0], copy = tl.tensor(rng.random_sample((4, 4))), factors[0]
    with assert_raises(ValueError):
        _validate_cp_tensor((weights, factors))
    
    # Not the correct amount of weights
    factors[0] = copy
    wrong_weights = weights[1:]
    with assert_raises(ValueError):
        _validate_cp_tensor((wrong_weights, factors))

    # Not enough factors
    with assert_raises(ValueError):
        _validate_cp_tensor((weights[:1], factors[:1]))
def test_tensor_product():
    """Test tensor_dot"""
    rng = random.check_random_state(1234)

    X = tl.tensor(rng.random_sample((4, 5, 6)))
    Y = tl.tensor(rng.random_sample((3, 4, 7)))
    tdot = tl.tensor_to_vec(tensor_dot(X, Y))
    true_dot = tl.tensor_to_vec(
        tenalg.outer([tl.tensor_to_vec(X),
                      tl.tensor_to_vec(Y)]))
    testing.assert_array_almost_equal(tdot, true_dot)
def test_batched_tensor_product():
    """Test batched-tensor_dot

    Notes
    -----
    At the time of writing, MXNet doesn't support transpose 
    for tensors of order higher than 6
    """
    rng = random.check_random_state(1234)
    batch_size = 3

    X = tl.tensor(rng.random_sample((batch_size, 4, 5, 6)))
    Y = tl.tensor(rng.random_sample((batch_size, 3, 7)))
    tdot = tl.unfold(batched_tensor_dot(X, Y), 0)
    for i in range(batch_size):
        true_dot = tl.tensor_to_vec(
            tenalg.outer([tl.tensor_to_vec(X[i]),
                          tl.tensor_to_vec(Y[i])]))
        testing.assert_array_almost_equal(tdot[i], true_dot)
Exemple #5
0
def test_cp_mode_dot():
    """Test for cp_mode_dot
    
        We will compare cp_mode_dot 
        (which operates directly on decomposed tensors)
        with mode_dot (which operates on full tensors)
        and check that the results are the same.
    """
    rng = check_random_state(12345)
    shape = (5, 4, 6)
    rank = 3
    cp_ten = random_cp(shape, rank=rank, orthogonal=True, full=False)
    full_tensor = tl.cp_to_tensor(cp_ten)
    # matrix for mode 1
    matrix = tl.tensor(rng.random_sample((7, shape[1])))
    # vec for mode 2
    vec = tl.tensor(rng.random_sample(shape[2]))

    # Test cp_mode_dot with matrix
    res = cp_mode_dot(cp_ten, matrix, mode=1, copy=True)
    # Note that if copy=True is not respected, factors will be changes
    # And the next test will fail
    res = tl.cp_to_tensor(res)
    true_res = mode_dot(full_tensor, matrix, mode=1)
    assert_array_almost_equal(true_res, res)
    
    # Check that the data was indeed copied
    rec = tl.cp_to_tensor(cp_ten)
    assert_array_almost_equal(full_tensor, rec)
    
    # Test cp_mode_dot with vec
    res = cp_mode_dot(cp_ten, vec, mode=2, copy=True)
    res = tl.cp_to_tensor(res)
    true_res = mode_dot(full_tensor, vec, mode=2)
    assert_equal(res.shape, true_res.shape)
    assert_array_almost_equal(true_res, res)
def initialize_cp(tensor,
                  rank,
                  init='svd',
                  svd='numpy_svd',
                  random_state=None,
                  non_negative=False,
                  normalize_factors=False):
    r"""Initialize factors used in `parafac`.
    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.
    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned
    Returns
    -------
    factors : CPTensor
        An initial cp tensor.
    """
    rng = check_random_state(random_state)

    if init == 'random':
        # factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        # kt = CPTensor((None, factors))
        return random_cp(tl.shape(tensor),
                         rank,
                         normalise_factors=False,
                         random_state=rng,
                         **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Put SVD initialization on the same scaling as the tensor in case normalize_factors=False
            if mode == 0:
                idx = min(rank, tl.shape(S)[0])
                U = tl.index_update(U, tl.index[:, :idx], U[:, :idx] * S[:idx])

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(
                    rng.random_sample(
                        (U.shape[0], rank - tl.shape(tensor)[mode])),
                    **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

        kt = CPTensor((None, factors))

    elif isinstance(init, (tuple, list, CPTensor)):
        # TODO: Test this
        try:
            kt = CPTensor(init)
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance')
    else:
        raise ValueError(
            'Initialization method "{}" not recognized'.format(init))

    if non_negative:
        kt.factors = [tl.abs(f) for f in kt[1]]

    if normalize_factors:
        kt = cp_normalize(kt)

    return kt
from tensorly.base import tensor_to_vec, partial_tensor_to_vec
from tensorly.datasets.synthetic import gen_image
from tensorly.random import check_random_state
from tensorly.regression.tucker_regression import TuckerRegressor
import tensorly as tl

# Parameter of the experiment
image_height = 25
image_width = 25
# shape of the images
patterns = ['rectangle', 'swiss', 'circle']
# ranks to test
ranks = [1, 2, 3, 4, 5]

# Generate random samples
rng = check_random_state(1)
X = tl.tensor(rng.normal(size=(1000, image_height, image_width), loc=0, scale=1))

# Parameters of the plot, deduced from the data
n_rows = len(patterns)
n_columns = len(ranks) + 1
# Plot the three images
fig = plt.figure()

for i, pattern in enumerate(patterns):

    print('fitting pattern n.{}'.format(i))

    # Generate the original image
    weight_img = gen_image(region=pattern, image_height=image_height, image_width=image_width)
    weight_img = tl.tensor(weight_img)
def decomp_plot(edge_len=25,
                iterations=[1, 2, 3, 4],
                ranks=[1, 5, 25, 50, 125, 130, 150, 200],
                decomp='CP'):
    #Params
    print(ranks)

    #Generate random samples
    rng = check_random_state(7)
    X = T.tensor(rng.normal(size=(1000, edge_len, edge_len), loc=0, scale=1))

    #For plotting
    n_rows = len(iterations)
    n_columns = len(ranks) + 1

    fig = plt.figure()

    for i, _ in enumerate(iterations):
        #Generate tensor
        weight_img = X[i * edge_len:(i + 1) * edge_len, :, :]

        ax = fig.add_subplot(n_rows, n_columns, i * n_columns + 1)

        #Plot image corresponding to 3-D Tensor
        ax.imshow(T.to_numpy(np.sum(weight_img, axis=0)),
                  cmap=plt.cm.OrRd,
                  interpolation='nearest')
        ax.set_axis_off()
        if i == 0:
            ax.set_title('Original')

        for j, rank in enumerate(ranks):
            #Tensor decomposition, image_edge x rank (25x1, 25x5, 25x25 ...)

            if decomp == 'CP':
                #CP decomposition
                components = parafac(weight_img, rank=rank)

                ax = fig.add_subplot(n_rows, n_columns, i * n_columns + j + 2)
                # Aggregate the factors for visualization
                simg = np.sum(components[k] for k in range(len(components)))
                ax.imshow(T.to_numpy(simg),
                          cmap=plt.cm.OrRd,
                          interpolation='nearest')
                ax.text(.5,
                        2.0,
                        '{:.2f}'.format(
                            tensor_distance(kruskal_to_tensor(components),
                                            weight_img)),
                        color='r')
                # ax.set_autoscaley_on(False)
                ax.set_axis_off()
            else:
                #Tucker decomposition
                components, f = tucker(weight_img, ranks=[3, 25, rank])
                #print(components.shape)

                ax = fig.add_subplot(n_rows, n_columns, i * n_columns + j + 2)
                # Aggregate the factors for visualization
                simg = np.sum(components[k] for k in range(len(components)))
                ax.imshow(T.to_numpy(simg),
                          cmap=plt.cm.OrRd,
                          interpolation='nearest')
                ax.text(.5,
                        2.0,
                        '{:.2f}'.format(
                            tensor_distance(kruskal_to_tensor(components),
                                            weight_img)),
                        color='r')
                # ax.set_autoscaley_on(False)
                ax.set_axis_off()

            if i == 0:
                ax.set_title('\n{}'.format(rank))

    plt.suptitle('Tensor Decompositions')
    plt.show()