def test_tensor_train_cross_2():
    """ Test for tensor-train """
    rng = tl.check_random_state(1234)

    ## Test 2
    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))

    # Find TT decomposition of the tensor
    rank = [1, 2, 2, 3, 2, 2, 1]
    factors = tensor_train_cross(tensor, rank, random_state=rng)

    for k in range(6):
        (r_prev, n_k, r_k) = factors[k].shape

        first_error_message = "TT rank " + str(k) + " is greater than the maximum allowed "
        first_error_message += str(r_prev) + " > " + str(rank[k])
        assert(r_prev<=rank[k]), first_error_message

        first_error_message = "TT rank " + str(k+1) + " is greater than the maximum allowed "
        first_error_message += str(r_k) + " > " + str(rank[k+1])
        assert(r_k<=rank[k+1]), first_error_message
Esempio n. 2
0
def test_lstsq():
    m, n, k = 4, 3, 2

    # test dimensions
    a = T.randn((m, n))
    b = T.randn((m, k))
    x, res = T.lstsq(a, b)
    assert_equal(x.shape, (n, k))

    # test residuals
    assert_array_almost_equal(T.norm(T.dot(a, x) - b, axis=0)**2, res)
    rank = 2
    a = T.dot(T.randn((m, rank)), T.randn((rank, n)))
    _, res = T.lstsq(a, b)
    assert_array_almost_equal(tl.tensor([]), res)

    # test least squares solution
    a = T.randn((m, n))
    x = T.randn((n, ))
    b = T.dot(a, x)
    x_lstsq, res = T.lstsq(a, b)
    assert_array_almost_equal(T.dot(a, x_lstsq), b, decimal=5)
Esempio n. 3
0
    def __custom_parafac(self, tensor, neg_fac=0, tol=1e-7):
        """Customized PARAFAC algorithm
			
		Parameters
		----------
		tensor : torch.Tensor
			The tensor of activity of N neurons, T timepoints and K trials of shape N, T, K
		neg_fac : int, optional
			Index of the factor which is allowed to be negative (default is 0)
		tol : float, optional
			Threshold for convergence (default is 1e-7)

		Returns
		-------
		list
			List of optimized factors
		"""
        factors = self.__initialize_factors(tensor,
                                            self.rank,
                                            self.init,
                                            custom=neg_fac)
        pseudo_inverse = tl.tensor(np.ones((self.rank, self.rank)),
                                   **tl.context(tensor))
        for iteration in range(self.max_iteration):
            for mode in range(self.dimension):
                if mode == neg_fac:
                    factors[mode] = self.__factor(self, tensor, factors, mode,
                                                  pseudo_inverse)
                else:
                    factors[mode] = factors[mode] * self.__factor_non_negative(
                        tensor, factors, mode)

            if (iteration % 25 == 0 or iteration % 25 == 1) and iteration > 1:
                if self.__get_error(iteration, tensor, factors, tol,
                                    self.verbose):
                    break

        return factors
Esempio n. 4
0
def valid_3ord():
    print('\nCorrectness benchmark for 3 order tensor CP decomposition.\n')
    
    shape = (2, 3, 4)
    max_iter = 30
    print('----------TensorNP----------')
    norm_errors = 0
    for _ in range(max_iter):
        tensor = tnp.randn(2, 3, 4)
        factors, lamda = tnp.cp(tensor, r=3, stop_iter=500, tol=1e-5, normalize_factor=True)
        rec_tensor = tnp.reconstruct_cp(factors, lamda, shape)
        norm_error = tnp.linalg.norm(rec_tensor - tensor) / tnp.linalg.norm(tensor)
        norm_errors += norm_error
    print(f'error ({norm_errors/max_iter})')

    print('----------scikit-tensor----------')
    norm_errors = 0
    for _ in range(max_iter):
        tensor = tnp.randn(2, 3, 4)
        skt_tensor = skt.dtensor(tensor)
        P, _, _ = skt.cp_als(skt_tensor, rank=3, init='random')
        rec_tensor = P.toarray()
        norm_error = tnp.linalg.norm(rec_tensor - skt_tensor) / tnp.linalg.norm(tensor)
        norm_errors += norm_error
    print(f'error ({norm_errors/max_iter})')


    print('----------Tensorly----------')
    norm_errors = 0
    for _ in range(max_iter):
        tensor = tnp.randn(2, 3, 4)
        tl_tensor = tl.tensor(tensor)
        cp_tensor = tensorly.decomposition.parafac(
            tl_tensor, rank=3, n_iter_max=500, tol=1e-6, normalize_factors=True, init='random')
        rec_tensor = tnp.reconstruct_cp(cp_tensor.factors, cp_tensor.weights, shape)
        norm_error = tnp.linalg.norm(rec_tensor - tensor) / tnp.linalg.norm(tensor)
        norm_errors += norm_error
    print(f'error ({norm_errors/max_iter})')
Esempio n. 5
0
def test_set_backend():
    print('Testing set_backend for backend = {}'.format(tl._BACKEND))
    tensor = T.tensor(np.arange(12).reshape((4, 3)))
    tensor2 = tl.tensor(np.arange(12).reshape((4, 3)))
    if tl._BACKEND == 'pytorch':
        import torch
        assert torch.is_tensor(tensor) and torch.is_tensor(tensor2)
        # assert type(tensor) == type(tensor2) == torch.FloatTensor
    elif tl._BACKEND == 'numpy':
        assert type(tensor) == type(tensor2) == np.ndarray
    elif tl._BACKEND == 'mxnet':
        import mxnet as mx
        assert type(tensor) == type(tensor2) == mx.nd.NDArray
    elif tl._BACKEND == 'tensorflow':
        import tensorflow as tf
        assert isinstance(tensor, tf.Tensor) and isinstance(tensor2, tf.Tensor)
    elif tl._BACKEND == 'cupy':
        import cupy as cp
        assert isinstance(tensor, cp.ndarray) and isinstance(
            tensor2, cp.ndarray)
    else:
        raise ValueError('_BACKEND not recognised (got {})'.format(
            tl._BACKEND))
Esempio n. 6
0
    def setUp(self):
        """
        setUp function, not to redefine the objects in each test function.
        """
        self.tensor = tl.tensor(
            np.random.rand(random.randint(20, 100), random.randint(20, 100),
                           random.randint(20, 100)))

        self.random_ranks = (random.randint(20, 40), random.randint(20, 40),
                             random.randint(20, 40))
        self.random_shape_tens = (random.randint(50,
                                                 100), random.randint(50, 100),
                                  random.randint(50, 100))
        self.factors_0 = random.randint(1, 10) * np.random.rand(
            self.random_shape_tens[0], self.random_ranks[0])
        self.factors_1 = random.randint(1, 10) * np.random.rand(
            self.random_shape_tens[1], self.random_ranks[1])
        self.factors_2 = random.randint(1, 10) * np.random.rand(
            self.random_shape_tens[2], self.random_ranks[2])
        self.core = np.random.rand(self.random_ranks[0], self.random_ranks[1],
                                   self.random_ranks[2])
        self.init_by_product_tensor = tl.tenalg.multi_mode_dot(
            self.core, [self.factors_0, self.factors_1, self.factors_2])
def test_matrix_product_state_cross_2():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 2
    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
    tensor_shape = tensor.shape

    # Find MPS decomposition of the tensor
    rank = [1, 3, 3, 4, 2, 2, 1]
    factors = matrix_product_state_cross(tensor, rank)

    for k in range(6):
        (r_prev, n_k, r_k) = factors[k].shape

        first_error_message = "MPS rank " + str(k) + " is greater than the maximum allowed "
        first_error_message += str(r_prev) + " > " + str(rank[k])
        assert(r_prev<=rank[k]), first_error_message

        first_error_message = "MPS rank " + str(k+1) + " is greater than the maximum allowed "
        first_error_message += str(r_k) + " > " + str(rank[k+1])
        assert(r_k<=rank[k+1]), first_error_message
Esempio n. 8
0
def homo_encryption(original_tensor, key_Matrix, n):
    #初始化张量
    # tensor_1 = tl.tensor(np.arange(27).reshape(3, 3, 3).astype('float32'))
    # print(tensor_1)
    #加密矩阵
    # key_matrix = tl.tensor(np.arange(1,10).reshape(3, 3).astype('float32'))
    # print(key_matrix)

    encrypted_tensor = []
    for k in range(original_tensor.shape[0]):
        encrypted_factors = []
        for i in range(original_tensor.shape[2]):
            # X = key_matrix[i,0]*tensor_1[0,0,:] + key_matrix[i,1]*tensor_1[0,1,:] + key_matrix[i,2] * tensor_1[0,2,:]
            X = tl.tensor(np.zeros(n))
            for j in range(original_tensor.shape[1]):
                X = X + key_Matrix[i, j] * original_tensor[k, j, :]
            encrypted_factors.append(X)
        encrypted_factors = tl.fold(encrypted_factors, mode=0, shape=(n, n))
        # print(encrypted_factors)
        encrypted_tensor.append(encrypted_factors)
    encrypted_tensor = tl.fold(encrypted_tensor, mode=0, shape=(n, n, n))
    # print(encrypted_tensor)
    return encrypted_tensor
Esempio n. 9
0
def hard_thresholding(tensor, number_of_non_zero):
    """
    Proximal operator of the l0 ``norm''
    Keeps greater "number_of_non_zero" elements untouched and sets other elements to zero.

    Parameters
    ----------
    tensor : ndarray
    number_of_non_zero : int

    Returns
    -------
    ndarray
          Thresholded tensor on which the operator has been applied
    """
    tensor_vec = tl.copy(tl.tensor_to_vec(tensor))
    sorted_indices = tl.argsort(tl.argsort(tl.abs(tensor_vec),
                                           axis=0,
                                           descending=True),
                                axis=0)
    return tl.reshape(
        tl.where(sorted_indices < number_of_non_zero, tensor_vec,
                 tl.tensor(0, **tl.context(tensor_vec))), tensor.shape)
def vectToTens(vect):
    dimA = len(vect)
    for v in vect:
        dimB = len(vect[v])
        for t in vect[v]:
            dimC = len(vect[v][t])
            break
        break
    #print(dimA,dimB,dimC)
    arr = np.zeros((dimA, dimB, dimC))
    i = 0
    for a in vect:
        j = 0
        for b in vect[a]:
            k = 0
            for c in vect[a][b]:
                arr[i][j][k] = vect[a][b][c]
                k += 1
            j += 1
        i += 1
    tens = tl.tensor(arr)
    #print(len(tens[0][0]))
    return tens
Esempio n. 11
0
def test_unfolding_dot_khatri_rao():
    """Test for unfolding_dot_khatri_rao
    
    Check against other version check sparse safe
    """
    shape = (10, 10, 10, 4)
    rank = 5
    tensor = tl.tensor(np.random.random(shape))
    weights, factors = random_kruskal(shape=shape,
                                      rank=rank,
                                      full=False,
                                      normalise_factors=True)

    for mode in range(tl.ndim(tensor)):
        # Version forming explicitely the khatri-rao product
        unfolded = unfold(tensor, mode)
        kr_factors = khatri_rao(factors, weights=weights, skip_matrix=mode)
        true_res = tl.dot(unfolded, kr_factors)

        # Efficient sparse-safe version
        res = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)

        assert_array_almost_equal(true_res, res, decimal=4)
Esempio n. 12
0
def test_masked_parafac(linesearch):
    """Test for the masked CANDECOMP-PARAFAC decomposition.
    This checks that a mask of 1's is identical to the unmasked case.
    """
    tensor = random_cp((4, 4, 4), rank=1, full=True)
    mask = np.ones((4, 4, 4))
    mask[1, :, 3] = 0
    mask[:, 2, 3] = 0
    mask = tl.tensor(mask)
    tensor_mask = tensor * mask - 10000.0 * (1 - mask)

    fac = parafac(tensor_mask,
                  svd_mask_repeats=0,
                  mask=mask,
                  n_iter_max=0,
                  rank=1,
                  init="svd")
    fac_resvd = parafac(tensor_mask,
                        svd_mask_repeats=10,
                        mask=mask,
                        n_iter_max=0,
                        rank=1,
                        init="svd")
    err = tl.norm(tl.cp_to_tensor(fac) - tensor, 2)
    err_resvd = tl.norm(tl.cp_to_tensor(fac_resvd) - tensor, 2)
    assert_(err_resvd < err, 'restarting SVD did not help')

    # Check that we get roughly the same answer with the full tensor and masking
    mask_fact = parafac(tensor,
                        rank=1,
                        mask=mask,
                        init='random',
                        random_state=1234,
                        linesearch=linesearch)
    fact = parafac(tensor, rank=1)
    diff = cp_to_tensor(mask_fact) - cp_to_tensor(fact)
    assert_(T.norm(diff) < 0.001, 'norm 2 of reconstruction higher than 0.001')
Esempio n. 13
0
def test_tt_to_tensor_random():
    """ Test for tt_to_tensor

        Uses random tensor as input
    """

    # Create tensor with random elements
    tensor = tl.tensor(np.random.rand(3, 4, 5, 6, 2, 10))
    tensor_shape = tensor.shape

    # Find TT decomposition of the tensor
    rank = 10
    factors = tensor_train(tensor, rank)

    # Reconstruct the original tensor
    reconstructed_tensor = tl.tt_to_tensor(factors)
    assert_(tl.shape(reconstructed_tensor) == tensor_shape)

    # Check that the rank is 10
    D = len(factors)
    for k in range(D):
        (r_prev, _, r_k) = factors[k].shape
        assert(r_prev<=rank), "TT rank with index " + str(k) + "exceeds rank"
        assert(r_k<=rank), "TT rank with index " + str(k+1) + "exceeds rank"
Esempio n. 14
0
 def niftyList(self, smoothImg):
     # check that input is nifty image fransformed to numpday.ndarray
     if isinstance(smoothImg[0], numpy.ndarray):
         # check that is 4d
         shape_img = smoothImg[0].get_data().shape
         if len(shape_img) == 4:
             # instanciate a TF tensor of the same shape as the smooth img
             # tensor = tf.get_variable("tensor_fMRI", smoothImg.get_data().shape)
             # number of elements in the tensor
             dim = shape_img[0] * shape_img[1] * shape_img[2] * shape_img[3]
             X = tl.tensor(
                 np.zeros(dim).reshape(shape_img[0], shape_img[1],
                                       shape_img[2], shape_img[3]))
             # this list will be used to check whether any wrangling needs to be done
             listTemporalDim = []
             for i in range(0, (len(smoothImg) - 1)):
                 # this doesn't get returned as a np object
                 X = smoothImg[i].get_data()
                 # append the "numpy-fied" nifty data
                 # X = tl.to_numpy(X)
                 self.niftyList.append(X)
                 # append 4th value in shape tuple to listTemporalDim
                 listTemporalDim.append(X.shape[3])
             # check if the temporal dimensions are equal over the subjects
             # if not so, then they need to be altered somehow. How that is
             # done will have to depend on changeDimension parameter.
             if (not self.checkEqual(listTemporalDim)
                 ) and self._changeDimension:
                 # TODO helper function that determines the smallest value
                 # in listTemporalDim and then removes the extra slices from
                 # the other subjects
                 self.reshapeSubjectTensor(listTemporalDim)
         else:
             raise ValueError("Shape of nifty must be 4d")
     else:
         raise TypeError("Must be a nifty type object")
Esempio n. 15
0
def test_symmetric_parafac_power_iteration():
    """Test for symmetric Parafac optimized with robust tensor power iterations"""
    rng = check_random_state(1234)
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    size = 5
    rank = 4
    true_factor = tl.tensor(rng.random_sample((size, rank)))
    true_weights = tl.ones(rank)
    tensor = tl.cp_to_tensor((true_weights, [true_factor] * 3))
    weights, factor = symmetric_parafac_power_iteration(tensor,
                                                        rank=10,
                                                        n_repeat=10,
                                                        n_iteration=10)

    rec = tl.cp_to_tensor((weights, [factor] * 3))
    error = tl.norm(rec - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol_norm_2, 'norm 2 of reconstruction higher than tol')
    # Test the max abs difference between the reconstruction and the tensor
    assert_(
        tl.max(tl.abs(rec - tensor)) < tol_max_abs,
        'abs norm of reconstruction error higher than tol')
Esempio n. 16
0
Example on how to use :func:`tensorly.decomposition.parafac` and :func:`tensorly.decomposition.tucker` on images.
"""

import matplotlib.pyplot as plt
import tensorly as tl
import numpy as np
from scipy.misc import face
from scipy.ndimage import zoom
from tensorly.decomposition import parafac
from tensorly.decomposition import tucker
from math import ceil

random_state = 12345

image = face()
image = tl.tensor(zoom(face(), (0.3, 0.3, 1)), dtype='float64')


def to_image(tensor):
    """A convenience function to convert from a float dtype back to uint8"""
    im = tl.to_numpy(tensor)
    im -= im.min()
    im /= im.max()
    im *= 255
    return im.astype(np.uint8)


# Rank of the CP decomposition
cp_rank = 25
# Rank of the Tucker decomposition
tucker_rank = [100, 100, 2]
from tensorly.datasets.synthetic import gen_image
from tensorly.random import check_random_state
from tensorly.regression.tucker_regression import TuckerRegressor
import tensorly as tl

# Parameter of the experiment
image_height = 25
image_width = 25
# shape of the images
patterns = ['rectangle', 'swiss', 'circle']
# ranks to test
ranks = [1, 2, 3, 4, 5]

# Generate random samples
rng = check_random_state(1)
X = tl.tensor(rng.normal(size=(1000, image_height, image_width), loc=0, scale=1))

# Parameters of the plot, deduced from the data
n_rows = len(patterns)
n_columns = len(ranks) + 1
# Plot the three images
fig = plt.figure()

for i, pattern in enumerate(patterns):

    print('fitting pattern n.{}'.format(i))

    # Generate the original image
    weight_img = gen_image(region=pattern, image_height=image_height, image_width=image_width)
    weight_img = tl.tensor(weight_img)
Esempio n. 18
0
def test_tensor_creation():
    tensor = T.tensor(np.arange(12).reshape((4, 3)))
    tensor2 = tl.tensor(np.arange(12).reshape((4, 3)))

    assert T.is_tensor(tensor)
    assert T.is_tensor(tensor2)
Esempio n. 19
0
from utils_np import *
from parafac_np import parafac

from utils.core_parafac_analysis import *

### DONT RUN THIS ON THE LAPTOP, ITS RIP

# img=mpimg.imread('stinkbug.png')
img = mpimg.imread('image\\lost_in_trans.jpg')

# plt.imshow(img)
# plt.show()
img = np.array(img, dtype=np.float64)

# Get tucker core
img_tl = tl.tensor(img)
tucker_rank = [30, 80, 3]
core, tucker_factors = tucker(img_tl,
                              ranks=tucker_rank,
                              init='random',
                              tol=10e-5,
                              random_state=1234,
                              n_iter_max=100,
                              verbose=True)

core_np = tl.to_numpy(core)

# train on original data and core
max_r = 40
t0 = time.time()
original_data_error = error_parafac(tensor=img,
d = create_tensor(imageids4)
e = create_tensor(imageids5)
f = create_tensor(imageids6)
g = create_tensor(imageids7)
h = create_tensor(imageids8)

# Decompose tensor using CP-ALS


def write_to_file(i, list):
    path = "test" + str(i) + ".npy"
    numpy.save(path, list)


k = 3
tensor1 = t.tensor(a)
write_to_file(1, tensor1)
del tensor1
tensor2 = t.tensor(b)
write_to_file(2, tensor2)
del tensor2
tensor3 = t.tensor(c)
write_to_file(3, tensor3)
del tensor3
tensor4 = t.tensor(d)
write_to_file(4, tensor4)
del tensor4
tensor5 = t.tensor(e)
write_to_file(5, tensor5)
del tensor5
tensor6 = t.tensor(f)
Esempio n. 21
0
Example on how to use :func:`tensorly.decomposition.parafac`and :func:`tensorly.decomposition.tucker` on images.
"""
import math
import matplotlib.pyplot as plt
import tensorly as tl
import numpy as np
from scipy.misc import face, imresize
from tensorly.decomposition import parafac
from tensorly.decomposition import tucker
from math import ceil

from PIL import Image

random_state = 12345

image = tl.tensor(imresize(face(), 0.3), dtype='float64')

target = "1.png"
imgobj = Image.open(target)
imgarray = np.array(imgobj)
imgarray_21 = imgarray
for i in range(6):
    imgarray_21 = np.append(imgarray_21, imgarray, axis=2)

image = tl.tensor(imgarray_21)

print("##image shape##")
print(image.shape)


def to_image(tensor):
Esempio n. 22
0
def tensorize_barwise(spectrogram,
                      bars,
                      hop_length_seconds,
                      subdivision,
                      midi=False):
    """
    Returns a tensor-spectrogram from a spectrogram and bars starts and ends.
    Each bar of the tensor_spectrogram will contain the same number of frames, define by the "subdivision" parameter.
    These frames are selected from an over-sampled spectrogram, to adapt to the specific size of each bar.

    Parameters
    ----------
    spectrogram : list of list of floats or numpy array
        The spectrogram to return as a tensor-spectrogram.
    bars : list of tuples
        List of the bars (start, end), in seconds, to cut the spectrogram at bar delimitation.
    hop_length_seconds : float
        The hop_length, in seconds.
    subdivision : integer
        The number of subdivision of the bar to be contained in each slice of the tensor.
    midi : boolean, optional
        A boolean to know if the spectrogram is in midi.
        If it is, adds a correction to deletes void bars.
        The default is False.

    Returns
    -------
    tensorly tensor
        The tensor-spectrogram as a tensorly tensor.

    """
    freq_len = spectrogram.shape[0]
    hop = int(hop_length_seconds * 44100)
    if hop != 32 and hop != 64:
        print("hop_length a 44100Hz = " + str(hop) + ", normal ?")
    bars_idx = dm.segments_from_time_to_frame_idx(bars[1:], hop_length_seconds)
    #if hop == 512:
    #raise NotImplementedError("Probably wrong hop here, to debug")
    samples_init = [
        int(
            round(bars_idx[0][0] + k *
                  (bars_idx[0][1] - bars_idx[0][0]) / subdivision))
        for k in range(subdivision)
    ]

    if midi:
        raise err.OutdatedBehaviorException("Should'nt be used, still bugged")

    tens = np.array(spectrogram[:,
                                samples_init]).reshape(freq_len, subdivision,
                                                       1)
    #tens = padded_tens_with_zeros(tens_init, longest_bar)

    for bar in bars_idx[1:]:
        t_0 = bar[0]
        t_1 = bar[1]
        samples = [
            int(round(t_0 + k * (t_1 - t_0) / subdivision))
            for k in range(subdivision)
        ]
        if samples[-1] < spectrogram.shape[1]:
            current_bar_tensor_spectrogram = spectrogram[:, samples].reshape(
                freq_len, subdivision, 1)
            tens = np.append(tens, current_bar_tensor_spectrogram, axis=2)
        else:
            break

    return tl.tensor(tens, dtype=tl.float32)
Esempio n. 23
0
def initialize_cp(tensor,
                  rank,
                  init='svd',
                  svd='numpy_svd',
                  random_state=None,
                  non_negative=False,
                  normalize_factors=False):
    r"""Initialize factors used in `parafac`.
    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.
    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned
    Returns
    -------
    factors : CPTensor
        An initial cp tensor.
    """
    rng = check_random_state(random_state)

    if init == 'random':
        # factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        # kt = CPTensor((None, factors))
        return random_cp(tl.shape(tensor),
                         rank,
                         normalise_factors=False,
                         random_state=rng,
                         **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Put SVD initialization on the same scaling as the tensor in case normalize_factors=False
            if mode == 0:
                idx = min(rank, tl.shape(S)[0])
                U = tl.index_update(U, tl.index[:, :idx], U[:, :idx] * S[:idx])

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(
                    rng.random_sample(
                        (U.shape[0], rank - tl.shape(tensor)[mode])),
                    **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

        kt = CPTensor((None, factors))

    elif isinstance(init, (tuple, list, CPTensor)):
        # TODO: Test this
        try:
            kt = CPTensor(init)
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance')
    else:
        raise ValueError(
            'Initialization method "{}" not recognized'.format(init))

    if non_negative:
        kt.factors = [tl.abs(f) for f in kt[1]]

    if normalize_factors:
        kt = cp_normalize(kt)

    return kt
def Proximal_operator(X,step):#The parameter is a tensor
    Res=np.copy(mxnet_backend.to_numpy(X))
    Res=np.sign(Res)*np.maximum(np.abs(Res)-step,0)
    return tl.tensor(Res)
def FittingErrorComputation(X_set,G_set,listoffactors):
    L=len(X_set)
    Fitting_error=[]
    for t in range(L):
        Fitting_error.append(Error(tl.tensor(X_set[t]),tl.tensor(G_set[t]),listoffactors,"Single"))
    return Fitting_error
def RobustsubspaceLearning_Single(X, Pre_existingprojectionmatrices,
                                  Pre_existingenergymatrices, Pre_existingmean,
                                  beta, alpha, p):  #All parameters are arrays
    Tensor = tl.tensor(X)
    listoffactors = list(Pre_existingprojectionmatrices)
    listoffactors = Operations_listmatrices(listoffactors, "Tensorize")
    Energymatrices = list(Pre_existingenergymatrices)
    Mean = np.copy(Pre_existingmean)
    N = len(list(Tensor.shape))
    R = Tensor - Tensor_matrixproduct(
        X, Operations_listmatrices(listoffactors, "Transposetimes"))
    Weightmatriceslist = []
    for n in range(N):
        Eigenvalue = np.linalg.eig(Energymatrices[n])[0]
        U = listoffactors[n]
        [I, J] = np.array(U.shape, dtype=int)
        Xn = unfold(Tensor, mode=n)
        [In, Jn] = np.array(Xn.shape, dtype=int)
        Weightmatrix = np.zeros((In, Jn))
        Sigma = np.zeros((In, Jn))
        for i in range(In):
            for j in range(Jn):
                Sigma[i, j] = np.max(
                    np.multiply(np.sqrt(np.abs(Eigenvalue[1:p])),
                                mxnet_backend.to_numpy(U[1:p, i])))
        k = beta * Sigma
        if (n == 1):
            R = R.T
        for i in range(In):
            for j in range(Jn):

                #Weightmatrix[i,j]=1/(1+np.power(mxnet_backend.to_numpy(R[i,j])/np.maximum(k[i,j],0.001),2)):#This was the initial line
                Weightmatrix[i, j] = 1 / (
                    1 + np.power(R[i, j] / np.maximum(k[i, j], 0.001), 2))
        Weightmatriceslist.append(Weightmatrix)
    W = np.minimum(Weightmatriceslist[0], Weightmatriceslist[1].T)

    WeightTensor = tl.tensor(
        np.multiply(np.sqrt(mxnet_backend.to_numpy(W)),
                    mxnet_backend.to_numpy(Tensor)))
    Mean = alpha * Mean + (1 - alpha) * mxnet_backend.to_numpy(WeightTensor)
    Projectionmatricesresult = []
    Energymatreicesresult = []
    for n in range(N):
        Xn = unfold(WeightTensor, mode=n)
        Covariancematrix = np.dot(
            np.dot(
                mxnet_backend.to_numpy(listoffactors[n]).T, Energymatrices[n]),
            mxnet_backend.to_numpy(listoffactors[n]))
        Covariancematrix = alpha * Covariancematrix + (1 - alpha) * np.dot(
            mxnet_backend.to_numpy(Xn),
            mxnet_backend.to_numpy(Xn).T)
        [Un, diagn, V] = np.linalg.svd(Covariancematrix)

        diagn = diagn / np.power(tl.norm(Xn, 2), 2)
        indices = np.argsort(diagn)
        indices = np.flip(indices, axis=0)

        [J, I] = np.array(listoffactors[n].shape, dtype=int)
        Unew = np.zeros((J, I))
        for j in range(J):
            Unew[j, :] = Un[indices[j], :]
        Sn = np.diag(diagn)
        Projectionmatricesresult.append(Unew)
        Energymatreicesresult.append(Sn)
    return Projectionmatricesresult, Energymatreicesresult, Mean, WeightTensor
Esempio n. 27
0
def parafac(tensor,
            rank,
            n_iter_max=100,
            tol=1e-8,
            random_state=None,
            verbose=False,
            return_errors=False,
            mode_three_val=[[0.5, 0.5, 0.0], [0.0, 0.5, 0.5]]):
    """CANDECOMP/PARAFAC decomposition via alternating least squares (ALS)

    Computes a rank-`rank` decomposition of `tensor` [1]_ such that,

        ``tensor = [| factors[0], ..., factors[-1] |]``.

    Parameters
    ----------
    tensor : ndarray
    rank  : int
        Number of components.
    n_iter_max : int
        Maximum number of iteration
    tol : float, optional
        (Default: 1e-6) Relative reconstruction error tolerance. The
        algorithm is considered to have found the global minimum when the
        reconstruction error is less than `tol`.
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        Level of verbosity
    return_errors : bool, optional
        Activate return of iteration errors


    Returns
    -------
    factors : ndarray list
        List of factors of the CP decomposition element `i` is of shape
        (tensor.shape[i], rank)
    errors : list
        A list of reconstruction errors at each iteration of the algorithms.

    References
    ----------
    .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """

    factors = initialize_factors(tensor, rank, random_state=random_state)
    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    # Mode-3 values that control the country factors are set using the
    # mode_three_val argument.

    fixed_ja = mode_three_val[0]
    fixed_ko = mode_three_val[1]

    for iteration in range(n_iter_max):
        for mode in range(tl.ndim(tensor)):
            pseudo_inverse = tl.tensor(np.ones((rank, rank)),
                                       **tl.context(tensor))

            factors[2][0] = fixed_ja  # set mode-3 values
            factors[2][1] = fixed_ko  # set mode-3 values

            for i, factor in enumerate(factors):
                if i != mode:
                    pseudo_inverse = pseudo_inverse * tl.dot(
                        tl.transpose(factor), factor)
            factor = tl.dot(unfold(tensor, mode),
                            khatri_rao(factors, skip_matrix=mode))
            factor = tl.transpose(
                tl.solve(tl.transpose(pseudo_inverse), tl.transpose(factor)))
            factors[mode] = factor

        if tol:
            rec_error = tl.norm(tensor - kruskal_to_tensor(factors),
                                2) / norm_tensor
            rec_errors.append(rec_error)

            if iteration > 1:
                if verbose:
                    print('reconstruction error={}, variation={}.'.format(
                        rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

                if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                    if verbose:
                        print('converged in {} iterations.'.format(iteration))
                    break

    if return_errors:
        return factors, rec_errors
    else:
        return factors
#!/usr/bin/env python
# coding: utf-8

# In[71]:

import numpy as np
import tensorly as tl

# In[72]:

tensor = tl.tensor(np.arange(48).reshape((6, 4, 2)), dtype=tl.float32)

# In[73]:

tensor.shape

# In[74]:

tensor

# In[75]:

tensor[:, :, 0]

# In[76]:

tensor[:, :, 1]

# In[77]:

type(tensor)
Esempio n. 29
0
def extract_Hankel_and_encoder_from_NN(model):
    Han = [tl.tensor(model.H[i].detach().numpy()) for i in range(len(model.H))]
    return Han
def ExperimentToyGeneral(etavalues,Nonnegative,Numberofexamples,Minibatchsize,max_iter,step,alpha,theta,nbepochs,randomarray,trainratio,period,pool):    
    L=len(etavalues)
    Nbmean=len(randomarray)
    
    RMSEbatch=np.zeros(L)
   

    StdbatchRMSE=np.zeros(L)  
   
    
    Fittingbatch=np.zeros(L)
   
    MREbatch=np.zeros(L)
   
    
    Stdbatchfitting=np.zeros(L)  
    
    
    Stdbatchmre=np.zeros(L)  
    
    for l in range(L):
      eta=etavalues[l]
      
      Stdbatchrmselist=[]

      
      Stdbatchfittlist=[]

      Stdbatchmre=[]  

      
      #for m in range(Nbmean):
      for k in range(len(randomarray)):
        
         m=randomarray[k]
         print("The noise number is")
         print(k+1)
         
         X_set=GenerateTensorsGeneral(Numberofexamples,eta,m)
         
         Xtrain_set,Xtest_set=Split_into_two_subsets(X_set,trainratio)
         Xtrain=np.zeros((len(Xtrain_set),30,40,50))
         Xtest=np.zeros((len(Xtest_set),30,40,50))
         for t in range(len(Xtrain_set)):
             Xtrain[t,:,:,:]=Xtrain_set[t]  
         for t in range(len(Xtest_set)):
             Xtest[t,:,:,:]=Xtest_set[t]
         
         
         Xtest_set=Operations_listmatrices(Xtest_set,"Tensorize")
       
         #sigma=1/2
         #Noise=np.random.normal(loc=0,scale=sigma,size=(len(Xtrain_set),30,40,50))
         #Noise=np.random.rand(len(Xtrain_set),30,40,50)
         listoffactorsinit=[np.identity(len(Xtrain_set)),np.random.normal(loc=0,scale=1/100,size=(30,eta)),np.random.normal(loc=0,scale=1/100,size=(40,eta)),np.random.normal(loc=0,scale=1/100,size=(50,eta))]   
         epsilon=np.power(10,-15,dtype=float)     
         
         Coretensorsize=np.array([len(Xtrain_set),eta,eta,eta])# The first dimensions must be equal for mathematical coherence purpose
         Ginittrain=np.random.normal(loc=0,scale=1/10,size=([len(Xtrain_set),eta,eta,eta]))    #np.random.normal(loc=0,scale=1/100,size=Coretensorsize)        
        
         Ginittest=np.random.normal(loc=0,scale=1/10,size=(len(Xtest_set),eta,eta,eta))
         Reprojectornot=False
    
         Pre_existingG_settrain=[]
         for n in range(len(Xtrain_set)):
            Pre_existingG_settrain.append(Ginittrain[n,:,:,:])
            
      
         Pre_existingG_settest=[]
         for n in range(len(Xtest_set)):
            Pre_existingG_settest.append(Ginittest[n,:,:,:])
         
         Ltest=len(Xtest_set)
         listoffactorsresult1,errorlist,nbiter=TuckerBatch(Xtrain,Coretensorsize,max_iter,listoffactorsinit,Ginittrain,Nonnegative,Reprojectornot,alpha,theta,step,epsilon)                                    
         
         Coretensorsize=np.array([len(Xtest_set),eta,eta,eta])# The first dimensions must be equal for mathematical coherence purpose
         
       
         listoffactorsresult1[0]=np.identity(len(Xtest_set))
         
         Gtest1=Sparse_coding(tl.tensor(Xtest),tl.tensor(Ginittest),Operations_listmatrices(listoffactorsresult1,"Tensorize"),Nonnegative,"Single",step,max_iter,alpha,theta,epsilon,pool)                                     
         
         error1=Error(tl.tensor(Xtest),Gtest1,Operations_listmatrices(listoffactorsresult1,"Tensorize"),"Single",pool)
         fittingerror1=error1/Ltest
         rmse1=np.sqrt(error1/Ltest)
         mrebatch=Mean_relative_error(tl.tensor(Xtest),Gtest1,Operations_listmatrices(listoffactorsresult1,"Tensorize"),"Single",pool)
    
         
         RMSEbatch[l]=RMSEbatch[l]+rmse1
         Fittingbatch[l]=Fittingbatch[l]+fittingerror1
         MREbatch[l]=MREbatch[l]+mrebatch
         Stdbatchrmselist.append(rmse1)         
         Stdbatchfittlist.append(fittingerror1)          
         Stdbatchmre.append(mrebatch) 
   
      print("The value of eta is")
      print(eta)
      RMSEbatch[l]=RMSEbatch[l]/Nbmean                  
      
      print("The root mean sqaure errors RMSEs are")
      print(RMSEbatch[l])                 

      
      StdbatchRMSE[l]=np.std(np.array(Stdbatchrmselist))


      print("The standard deviations associated to the RMSEs are")
      print(StdbatchRMSE[l])

      
      Fittingbatch[l]=Fittingbatch[l]/Nbmean

      print("The fitting errors FEs are")
      print(Fittingbatch[l])

           
      Stdbatchfitting[l]=np.std(np.array(Stdbatchfittlist) )

      print("The standard deviations associated to the FEs are")
      print(Stdbatchfitting[l]) 

      
      MREbatch[l]=MREbatch[l]/Nbmean

      print("The mean relative errors MRE are")
      print(MREbatch[l])

      Stdbatchmre[l]=np.std(Stdbatchmre) 
 
      print("The standard deviation associated to the MREs are")
      print(Stdbatchmre[l])
      pdb.set_trace()
      #First test launched: I=5 in OnlineTensorDictionaryLearning    
 
    
    return RMSEbatch,StdbatchRMSE,Fittingbatch,Stdbatchfitting,MREbatch,Stdbatchmre