예제 #1
0
    def principal_components(self, parameter=None):
        '''
        Compute the principal components of an order-2 tensor.

        Parameters
        ----------
        parameter : float or int, optional
            A parameter controlling the number of principal components.
            - If it is an integer, the number of principal components is the
            minimum between parameter and self.shape[0].
            - If it is a float smaller than 1, the number of principal
            components is determined such that ||x - VV'x||_F < t ||x||_F,
            with x the tensor, V the matrix of principal components, t the
            parameter, V' the transpose of the matrix V and ||.||_F the
            Frobenius norm.
            The default is self.shape[0].

        Returns
        -------
        principal_components : numpy.ndarray
            The principal components of the tensor.
        singular_values : numpy.ndarray
            The diagonal matrix of the associated singular values.

        '''
        assert self.order == 2, 'The order of the tensor must be 2.'
        if parameter is None or parameter > self.shape[0]:
            parameter = self.shape[0]

        if parameter < 1:
            truncator = tensap.Truncator(tolerance=parameter, max_rank=np.inf)
        else:
            truncator = tensap.Truncator(tolerance=0, max_rank=parameter)
        tensor = truncator.truncate(self)
        principal_components = tensor.space[0]
        singular_values = np.diag(tensor.core.data)
        return principal_components, singular_values
TENSORIZED_FUN.fun.evaluation_at_multiple_points = True

# Interpolation of the function in the tensor product feature space
DEGREE = 3
BASES = T.tensorized_function_functional_bases(DEGREE)
H = tensap.FullTensorProductFunctionalBasis(BASES)
FUN_INTERP, _ = H.tensor_product_interpolation(TENSORIZED_FUN)
TENSORIZED_FUN_INTERP = tensap.TensorizedFunction(FUN_INTERP, T)
X_TEST = T.X.random(100)
F_X_TEST = TENSORIZED_FUN_INTERP(X_TEST)
Y_TEST = FUN(X_TEST)
ERR_L2 = np.linalg.norm(Y_TEST - F_X_TEST) / np.linalg.norm(Y_TEST)
print('Mean squared error for the interpolation = %2.5e' % ERR_L2)

# Truncation in tensor train format
TR = tensap.Truncator()
tens = TENSORIZED_FUN_INTERP.fun.tensor
for k in range(1, 9):
    TR.tolerance = 10**(-k)
    print('Tolerance =%s' % TR.tolerance)
    TENSORIZED_FUN_TT = TENSORIZED_FUN_INTERP
    TENSORIZED_FUN_TT.fun.tensor = TR.ttsvd(tens)
    print('Representation ranks = %s' %
          TENSORIZED_FUN_TT.fun.tensor.representation_rank)
    print('Complexity = %s' % TENSORIZED_FUN_TT.fun.tensor.storage())
    X_TEST = T.X.random(1000)
    F_X_TEST = TENSORIZED_FUN_TT(X_TEST)
    Y_TEST = FUN(X_TEST)
    ERR_L2 = np.linalg.norm(Y_TEST - F_X_TEST) / np.linalg.norm(Y_TEST)
    print('Mean squared error = %2.5e' % ERR_L2)
예제 #3
0
TREE = SOLVER.tree
IS_ACTIVE_NODE = SOLVER.is_active_node

# %% Random shuffling of the dimensions associated to the leaves
RANDOMIZE = False
if RANDOMIZE:
    SOLVER.tree.dim2ind = np.random.permutation(SOLVER.tree.dim2ind)
    SOLVER.tree = SOLVER.tree.update_dims_from_leaves()

# %% Initial guess: known entries in a rank-1 tree-based tensor
guess = np.zeros(np.prod(sz))
guess[loc_TRAIN] = Y_TRAIN
guess = guess.reshape(sz, order='F')
guess = tensap.FullTensor(guess, order=ORDER, shape=sz)
tr = tensap.Truncator(tolerance=0, max_rank=1)
guess = tr.hsvd(guess, SOLVER.tree, SOLVER.is_active_node)

# %% Learning in tree-based tensor format
SOLVER.bases_eval = FEATURES_TRAIN
SOLVER.training_data = [None, Y_TRAIN]

SOLVER.tolerance['on_stagnation'] = 1e-8
SOLVER.tolerance['on_error'] = 1e-8

SOLVER.initialization_type = 'canonical'

SOLVER.linear_model_learning.regularization = False
SOLVER.linear_model_learning.basis_adaptation = True
SOLVER.linear_model_learning.error_estimation = True
예제 #4
0
# %% Random dimension tree
ORDER = 10
ARITY_INTERVAL = [2, 3]
TREE = tensap.DimensionTree.random(ORDER, ARITY_INTERVAL)
TREE.plot(title='Nodes indices')
TREE.plot_dims(title='Nodes dimensions')

# %% TreeBasedTensor: random generation
TENSOR = tensap.TreeBasedTensor.rand(TREE)
TENSOR.plot(title='Active nodes')
TENSOR.plot([x.storage() for x in TENSOR.tensors],
            title='Tensors\' storage complexity')
TENSOR.plot(TENSOR.representation_rank, title='Representation ranks')

# %% Truncation of a TreeBasedTensor with prescribed rank
TRUNCATOR = tensap.Truncator(tolerance=0, max_rank=np.random.randint(1, 5))
TENSOR = tensap.TreeBasedTensor.rand(TREE)
TENSOR_TRUNCATED = TRUNCATOR.hsvd(TENSOR)
print('Prescribed rank, error = %2.5e\n' %
      ((TENSOR - TENSOR_TRUNCATED).norm() / TENSOR.norm()))

# %% Truncation of a TreeBasedTensor with prescribed relative precision
ORDER = 10
TREE = tensap.DimensionTree.random(ORDER, 3)
TENSOR = tensap.TreeBasedTensor.rand(TREE)

TRUNCATOR = tensap.Truncator(tolerance=1e-8)
TRUNCATOR._hsvd_type = 1  # Root to leaves truncation
TENSOR_TRUNCATED_1 = TRUNCATOR.hsvd(TENSOR)
ERR_1 = (TENSOR - TENSOR_TRUNCATED_1).norm() / TENSOR.norm()
print('Root to leaves: prescribed tolerance = %2.5e, error = %2.5e\n' %
예제 #5
0
    def initialize(self):
        if self.tree_adaptation:
            print('tree_adaptation not defined for CanonicalTensorLearning.')
            self.tree_adaptation = False

        if 'one_by_one_factor' in self.alternating_minimization_parameters and\
            self.alternating_minimization_parameters['one_by_one_factor'] and \
                self.rank != 1:
            self._exploration_strategy = np.arange(
                1, self.alternating_minimization_parameters['inner_loops'] *
                self.rank * self.order + 2)
            self._number_of_parameters = self._exploration_strategy.size
        else:
            self.alternating_minimization_parameters['one_by_one_factor'] = \
                False
            self._exploration_strategy = np.arange(1, self.order + 2)
            self._number_of_parameters = self.order + 1

        shape = [x.shape[1] for x in self.bases_eval]
        if self.initialization_type == 'random':
            f = tensap.CanonicalTensor.randn(self.rank, shape)
        elif self.initialization_type == 'ones':
            f = tensap.CanonicalTensor.ones(self.rank, shape)
        elif self.initialization_type == 'initial_guess':
            f = self.initial_guess
        elif self.initialization_type == 'mean' or \
                self.initialization_type == 'mean_randomized':
            if not isinstance(self.training_data, list) or \
                    (isinstance(self.training_data, list) and
                     len(self.training_data) == 1):
                raise NotImplementedError('Initialization type not ' +
                                          'implemented in unsupervised ' +
                                          'learning.')
            if isinstance(self.bases, tensap.FunctionalBases):
                means = self.bases.mean()
            else:
                means = [np.mean(x, 0) for x in self.bases_eval]
            if self.initialization_type == 'mean_randomized':
                means = [x + 0.01 * np.random.randn(*x.shape) for x in means]
            means = [np.reshape(x, [-1, 1]) for x in means]

            f = tensap.CanonicalTensor(
                means, np.atleast_1d(np.mean(self.training_data[1])))
        elif self.initialization_type == 'greedy':
            s_ini = deepcopy(self)
            s_ini.rank_adaptation = False
            s_ini.algorithm = 'greedy'
            s_ini.initialization_type = 'mean'
            s_ini.alternating_minimization_parameters['display'] = False
            s_ini.linear_model_learning.error_estimation = False
            s_ini.test_error = False
            s_ini.display = False
            f, output_ini = s_ini.solve()

            if self.display and 'error' in output_ini:
                print('Greedy initialization: rank = %i, error = %2.5e' %
                      (len(f.tensor.core.data), output_ini['error']))
        else:
            raise ValueError('Wrong initialization type.')

        if isinstance(f, tensap.FunctionalTensor):
            f = f.tensor

        if self.rank > len(f.core.data):
            fx = f.tensor_matrix_product(self.bases_eval).eval_diag().data

            s_ini = deepcopy(self)
            s_ini.rank_adaptation = False
            s_ini.algorithm = 'standard'
            s_ini.initialization_type = 'greedy'
            s_ini.rank = self.rank - len(f.core.data)
            s_ini.alternating_minimization_parameters['display'] = False
            s_ini.linear_model_learning.error_estimation = False
            s_ini.test_error = False
            s_ini.display = False
            if isinstance(s_ini.training_data, list) and \
                    len(s_ini.training_data) == 2:
                s_ini.training_data[1] -= fx
            elif isinstance(s_ini.loss_function, tensap.DensityL2LossFunction):
                s_ini.training_data = [s_ini.training_data, fx]

            f_add = s_ini.solve()[0]
            if isinstance(f_add, tensap.FunctionalTensor):
                f += f_add.tensor
            else:
                f += f_add

        if self.truncate_initialization:
            if self.order == 2:
                tr = tensap.Truncator()
                f = tr.truncate(f)
                f = tensap.CanonicalTensor(f.space, f.core.data)
                self.rank = len(f.core.data)
        return self, f