Beispiel #1
0
    def hosvd(self, tensor):
        '''
        Compute the truncated hosvd of tensor.

        Parameters
        ----------
        tensor : np.ndarray or tensap.FullTensor or tensap.TreeBasedTensor
            The tensor to truncate.

        Raises
        ------
        ValueError
            If the input tensor is of the wrong type.

        Returns
        -------
        out : tensap.CanonicalTensor or tensap.TreeBasedTensor
            The truncated tensor.

        '''
        if isinstance(tensor, np.ndarray):
            tensor = tensap.FullTensor(tensor)

        order = tensor.order
        if order == 2:
            out = self.svd(tensor)
        else:
            max_rank = np.atleast_1d(self.max_rank)
            if max_rank.size == 1:
                max_rank = np.repeat(max_rank, order)
            local_tol = self.tolerance / np.sqrt(order)
            if isinstance(tensor, tensap.FullTensor):
                vec = np.empty(order, dtype=object)
                for dim in range(order):
                    self.max_rank = max_rank[dim]
                    vec[dim] = self.trunc_svd(tensor.matricize(dim).numpy(),
                                              local_tol)
                    vec[dim] = vec[dim].space[0]
                core = tensor.tensor_matrix_product(np.transpose(vec[0]), 0)
                for dim in np.arange(1, order):
                    core = core.tensor_matrix_product(
                        np.transpose(vec[dim]), dim)
                tensors = [core] + [tensap.FullTensor(x) for x in vec]
                tree = tensap.DimensionTree.trivial(order)
                out = tensap.TreeBasedTensor(tensors, tree)
            else:
                raise ValueError('Wrong type.')
        return out
Beispiel #2
0
    def _project(self, t):
        '''
        Compute the projection of a tensor on the functional bases.

        The method takes an AlgebraicTensor t whose entries are the values of
        the function on a product grid, and returns a FunctionalTensor obtained
        by applying the projections obtained by the method
        _projectionOperators.

        Parameters
        ----------
        t : tensap.Tensor
            The tensor used for the projection.

        Returns
        -------
        tensap.FunctionalTensor
            The obtained projection.

        '''
        tensor = deepcopy(t)
        P = self._projection_operators()
        for nu in range(tensor.order):
            alpha = tensor.tree.dim2ind[nu]
            if tensor.is_active_node[alpha - 1]:
                data = np.matmul(P[nu], tensor.tensors[alpha - 1].data)
                tensor.tensors[alpha - 1] = tensap.FullTensor(
                    data, 2, data.shape)
            else:
                pa = tensor.tree.parent(alpha)
                ch = tensor.tree.child_number(alpha)
                tensor.tensors[pa-1] = \
                    tensor.tensors[pa-1].tensor_matrix_product(P[nu], ch-1)
        return tensap.FunctionalTensor(tensor, self.bases)
Beispiel #3
0
    def tree_based_tensor(self, tree=None, is_active_node=None):
        '''
        Convert the tensap.DiagonalTensor into a tensap.TreeBasedTensor.

        Parameters
        ----------
        tree : tensap.DimensionTree, optional
            The tree associated with the tree-based tensor representation. The
            default is a linear tree.
        is_active_node : list or numpy.ndarray, optional
            List or array of booleans indicating if each node of the tree is
            active. The default is True for all nodes except the leaves.

        Raises
        ------
        ValueError
            If the internal nodes are not all active.

        Returns
        -------
        tensap.TreeBasedTensor
            A tree-based tensor representation of the diagonal tensor.

        '''
        if tree is None:
            tree = tensap.DimensionTree.linear(self.order)

        if is_active_node is None:
            is_active_node = np.full(tree.nb_nodes, True)
            is_active_node[tree.is_leaf] = False

        tensors = np.empty(tree.nb_nodes, dtype=object)
        tensors[np.logical_not(is_active_node)] = tensap.FullTensor([])
        r = self.shape[0]
        for nod in np.arange(1, tree.nb_nodes + 1):
            ch = tree.children(nod)
            if tree.parent(nod) == 0:
                tensors[nod - 1] = tensap.FullTensor.diag(self.data, ch.size)
            elif tree.is_leaf[nod - 1] and is_active_node[nod - 1]:
                tensors[nod - 1] = tensap.FullTensor(np.eye(r), 2, [r, r])
            elif is_active_node[nod - 1]:
                tensors[nod - 1] = tensap.FullTensor.diag(
                    np.ones(r), ch.size + 1)
            elif not tree.is_leaf[nod - 1] and not is_active_node[nod - 1]:
                raise ValueError('The internal nodes should be active.')
        return tensap.TreeBasedTensor(tensors, tree)
Beispiel #4
0
    def parameter_gradient_eval_diag(self, mu, matrices=None):
        '''
        Compute the diagonal of the gradient of the tensor with respect to a
        given parameter.

        Parameters
        ----------
        mu : int
            Index of the parameter.
        matrices : list or numpy.array, optional
            Matrices with which to compute outer_product_eval_diag if alpha is
            associated with some dimensions. Useful for evaluation the gradient
            of a tensap.FunctionalTensor. The default is None, indicating
            identity matrices.

        Returns
        -------
        out : tensap.FullTensor
            The diagonal of the gradient of the tensor with respect to
            the parameter with index mu.

        '''
        rank = len(self.core.data)
        if mu == self.order + 1:
            N = self.space[0].shape[0]
            out = np.ones((N, rank))
            for nu in range(self.order):
                out *= self.space[nu]
            out = tensap.FullTensor(out)
        else:
            no_mu = np.setdiff1d(np.arange(1, self.order + 1), mu)
            N = self.space[no_mu[0] - 1].shape[0]
            f_mu = np.ones((N, rank))
            for nu in no_mu:
                f_mu *= self.space[nu - 1]
            if matrices is not None:
                out = tensap.FullTensor(f_mu).outer_product_eval_diag(
                    tensap.FullTensor(matrices[mu - 1]), 0, 0)
            else:
                out = tensap.FullTensor(f_mu).outer_product_eval_diag(
                    tensap.FullTensor(np.eye(self.shape[mu - 1])), [], [],
                    True)
        return out
Beispiel #5
0
    def tensor_product_interpolation(self, fun, grid=None):
        '''
        Return the interpolation of function fun on a product grid.

        Parameters
        ----------
        fun : function or tensap.Function or tensap.Tensor
            The function to interpolate, or a tensor of order d whose entries
            are the evaluations of the function on a product grid.
        grid : list, optional
            The grid of points used for the interpolation. If one grid has more
            points than the dimension of the corresponding basis, use
            magicPoints for the selection of a subset of points adapted to the
            basis. The default is None, indicating to use the method
            self.bases.interpolation_points().

        Raises
        ------
        ValueError
            If the argument fun is neither a tensap.Function, a function nor
            a tensap.Tensor.

        Returns
        -------
        tensap.FunctionalTensor
            The interpolation of the function.
        output : dict
            A dictionnary of outputs of the method.

        '''
        if grid is None:
            grid = self.bases.interpolation_points()
        else:
            grid = self.bases.interpolation_points(grid)
        grid = tensap.FullTensorGrid(grid)

        output = {}
        if hasattr(fun, '__call__') or isinstance(fun, tensap.Function):
            x_grid = grid.array()
            y = fun(x_grid)
            y = tensap.FullTensor(y, grid.dim, grid.shape)
            output['number_of_evaluations'] = y.storage()
        # TODO Create an empty class Tensor for when using isinstance?
        elif isinstance(fun, (tensap.FullTensor, tensap.CanonicalTensor,
                              tensap.TreeBasedTensor), tensap.DiagonalTensor):
            y = fun
        else:
            raise ValueError('The argument fun should be a Function, ' +
                             'function, or a Tensor.')
        output['grid'] = grid

        B = self.bases.eval(grid.grids)
        B = [np.linalg.inv(x) for x in B]
        y = y.tensor_matrix_product(B)
        return tensap.FunctionalTensor(y, self.bases), output
Beispiel #6
0
    def full(self):
        '''
        Convert the SparseTensor to a tensap.FullTensor.

        Returns
        -------
        y : tensap.FullTensor
            The SparseTensor as a tensap.FullTensor.

        '''
        y = tensap.FullTensor(np.zeros(self.shape))
        ind = tuple(self.indices.to_list())
        y.data[ind] = self.data
        return y
Beispiel #7
0
    def eval_diag(self, dims=None):
        '''
        Extract the diagonal of the tensor.

        The tensor must be such that self.shape[mu] = n for all mu (in dims if
        provided).

        Parameters
        ----------
        dims : list of numpy.ndarray, optional
            The dimensions associated with the indices of the diagonal. The
            default is None,indicating that the indices refer to all the
            dimensions.

        Returns
        -------
        data : CanonicalTensor or tensap.FullTensor
            The evaluations of the diagonal of the tensor.

        '''
        if dims is None:
            is_none = True
            dims = np.arange(self.order)
        else:
            is_none = False
            dims = np.atleast_1d(dims)
            if dims.size == 1:
                print('Only one dimension: degenerate case for eval_diag, ' +
                      'returning the tensor itself.')
                return deepcopy(self)
            dims = np.sort(dims)
            new_dims = np.setdiff1d(range(self.order), dims[1:])

        out = self.space[dims[0]]
        for k in dims[1:]:
            out *= self.space[k]

        if is_none or dims.size == self.order:
            out = tensap.FullTensor(np.matmul(out, self.core.data))
        else:
            space = list(self.space)
            core = deepcopy(self.core)

            space[dims[0]] = out
            space = space[new_dims]
            core.shape = core.shape[new_dims]
            core.order = new_dims.size
            out = CanonicalTensor(space, core)
        return out
Beispiel #8
0
    def eval_on_tensor_grid(self, x):
        '''
        Evaluate the Function on a grid x.

        Parameters
        ----------
        x : tensap.TensorGrid
            The tensap.ensorGrid used for the evaluation..

        Raises
        ------
        NotImplementedError
            If the method is not implemented.
        ValueError
            If x is not a tensap.TensorGrid object.

        Returns
        -------
        fx : numpy.ndarray
            The evaluation of the Function on the grid.

        '''
        x_a = x.array()
        fx = self.eval(x_a)

        if isinstance(x, tensap.SparseTensorGrid):
            if np.all(self.output_shape == 1):
                fx = tensap.SparseTensor(fx, x.indices, x.shape)
            else:
                raise NotImplementedError('Method not implemented.')
        elif isinstance(x, tensap.FullTensorGrid):
            if np.all(self.output_shape == 1):
                shape = x.shape
                if self.dim > 1:
                    fx = np.reshape(fx, shape, order='F')
            else:
                shape = np.concatenate((np.atleast_1d(x.shape),
                                        np.atleast_1d(self.output_shape)))
                fx = np.reshape(fx, shape, order='F')
            fx = tensap.FullTensor(fx, np.size(shape), shape)
        else:
            raise ValueError('A TensorGrid object must be provided.')

        return fx
Beispiel #9
0
    def truncate(self, tensor):
        '''
        Compute the truncation of the tensor with relative precision
        self.tolerance and maximal rank self.max_rank.

        Parameters
        ----------
        tensor : numpy.ndarray or tensap.FullTensor or tensorflow.Tensor or
        tensap.TreeBasedTensor
            The tensor to truncate.

        Raises
        ------
        NotImplementedError
            If the decomposition is not implemented for the tensor format.
        ValueError
            If the tensor is of order 1.

        Returns
        -------
        out : tensap.CanonicalTensor or tensap.TreeBasedTensor
            The truncated tensor.

        '''

        if isinstance(tensor, (tf_Tensor, np.ndarray)):
            tensor = tensap.FullTensor(tensor)

        if tensor.order == 2:
            out = self.svd(tensor)
        elif tensor.order > 2:
            if isinstance(tensor, tensap.FullTensor):
                out = self.hosvd(tensor)
            elif isinstance(tensor, tensap.TreeBasedTensor):
                out = self.hsvd(tensor)
            else:
                raise NotImplementedError(
                    'Not implemented with this tensor format.')
        else:
            raise ValueError('Wrong tensor order.')
        return out
Beispiel #10
0
    def tensor_diagonal_matrix_product(self, matrices, dims=None):
        '''
        Contract a SparseTensor with matrices built from their diagonals.

        The second dimension of the matrix matrices[k] is contracted with the
        k-th dimension of self, with the indices k given in dims (if provided).

        FIXME: not optimal, does not exploit sparsity.

        Parameters
        ----------
        matrices : numpy.ndarray or list of numpy.ndarray
            The diagonals of the matrices to use in the product.
        dims : list or numpy.ndarray, optional
            Indices of the contractions. The default is None, indicating all
            the dimensions.

        Returns
        -------
        SparseTensor
            The tensor after the contractions with the matrices.

        '''
        if not isinstance(matrices, list):
            matrices = [matrices[:, i] for i in range(np.shape(matrices)[1])]

        if dims is None:
            assert len(matrices) == self.order, \
                'len(matrices) must be self.order.'
            dims = range(self.order)
        else:
            dims = np.array(dims)
            assert len(matrices) == dims.size, \
                'len(matrices) must be equal to dims.size.'

        matrices = [
            tensap.FullTensor(np.diag(np.reshape(x, [-1]))) for x in matrices
        ]
        return self.tensor_matrix_product(matrices, dims)
    def _tensor_product_b_alpha(Bs):
        '''
        Function used in the method tree_based_approximation.

        Parameters
        ----------
        Bs : list
            List containing s matricices B1 , ..., Bs where Bi is a
            n[i]-by-r[i] array.

        Returns
        -------
        numpy.ndarray
            An array of shape prod(n)-by-prod(r) whose entries are
            B[I , J] = B1[i_1, j_1] ... Bs[i_s, j_s]
            with I = (i_1, ..., is) and J = (j1, ..., js).

        '''
        Bs = [tensap.FullTensor(x, 2, x.shape) for x in Bs]
        B = Bs[0]
        for k in np.arange(1, len(Bs)):
            B = B.tensordot(Bs[k], 0)
        return B.matricize(np.arange(0, B.order - 1, 2)).data
    def alpha_principal_components(self, fun, shape, alpha, tol, B_alpha,
                                   I_alpha):
        '''
        Evaluate the alpha-principal components of a tensor f.

        For alpha in {0,...,d-1}, it evaluates the alpha-principal components
        of a tensor f, meaning the principal components of the matricisations
        f_alpha(i_alpha,i_notalpha), where i_alpha and i_notalpha are groups of
        indices.

        It evaluates f_alpha on the product of a set of indices in dimension
        alpha (of size Nalpha) and a set of random indices (N samples) in the
        complementary dimensions.
        Then, it computes approximations of the alpha-principal components
        in a given basis phi_1(i_alpha) ... phi_Nalpha(i_alpha).

        If t is an integer, t is the rank (number of principal components).
        If t<1, the rank (number of principal components) is determined such
        that the relative error after truncation is t.

        Parameters
        ----------
        fun : fun or tensap.Function
            Function of d variables i_1, ..., i_d which returns the entries of
            the tensor.
        shape : list or numpy.ndarray
            The shape of the tensor.
        alpha : int
            An array containing a tuple in {0,...,d-1}.
        tol : int or float
            The number of principal components or a positive number smaller
            than 1 (tolerance).
        B_alpha : numpy.ndarray
            Array of shape (N_\alpha, N_\alpha) whose i-th column is the
            evaluation of phi_i at the set of indices i_alpha in Ialpha.
        I_alpha : numpy.ndarray
            Array of shape (N_alpha, #alpha) containing N_alpha tuples i_alpha.

        Returns
        -------
        pc : numpy.ndarray
            The principal components of the tensor.
        output : dict
            A dictionnary of outputs, containing the singular values
            corresponding to the principal components, as well as the set of
            indices at which the tensor has been evaluated.

        '''
        alpha = np.atleast_1d(alpha)
        B_alpha = np.atleast_2d(B_alpha)
        I_alpha = np.array(I_alpha)

        X = tensap.random_multi_indices(shape)
        d = len(shape)
        not_alpha = np.setdiff1d(range(d), alpha)

        if tol < 1:
            N = self.pca_sampling_factor * B_alpha.shape[1]
        else:
            N = self.pca_sampling_factor * tol
        N = int(np.ceil(N))

        X_not_alpha = tensap.RandomVector(X.random_variables[not_alpha])
        I_not_alpha = X_not_alpha.random(N)

        alpha_not_alpha = np.concatenate((alpha, not_alpha))
        ind = [np.nonzero(alpha_not_alpha == x)[0][0] for x in range(d)]

        output = {}
        if tol < 1 and self.pca_adaptive_sampling:
            A = tensap.FullTensor(np.zeros((I_alpha.shape[0], 0)), 2,
                                  [I_alpha.shape[0], 0])
            for k in range(N):
                product_grid = tensap.FullTensorGrid(
                    [I_alpha, I_not_alpha[k, :]]).array()
                A_k = np.linalg.solve(B_alpha, fun(product_grid[:, ind]))
                A.data = np.column_stack((A.data, A_k))
                pc, sin_val = A.principal_components(tol)
                if sin_val[-1, -1] < 1e-15 or pc.shape[1] < \
                        np.ceil((k+1) / self.pca_sampling_factor):
                    break
            output['number_of_evaluations'] = I_alpha.shape[0] * (k + 1)
        else:
            product_grid = tensap.FullTensorGrid([I_alpha,
                                                  I_not_alpha]).array()
            A = fun(product_grid[:, ind])
            A = np.reshape(A, [B_alpha.shape[0], N], 'F')
            A = np.linalg.solve(B_alpha, A)
            A = tensap.FullTensor(A, 2, [B_alpha.shape[0], N])
            pc, sin_val = A.principal_components(tol)
            output['number_of_evaluations'] = I_alpha.shape[0] * N

        output['singular_values'] = sin_val
        output['samples'] = product_grid
        return pc, output
    def tree_based_approximation(self, fun, shape, tree, is_active_node=None):
        '''
        Approximation of a tensor of order d in tree based tensor format based
        on a Principal Component Analysis.

        For a prescribed precision, set TPCA.max_rank = np.inf and TPCA.tol to
        the desired precision (possibly an array of length d-1).

        For a prescribed rank, set TPCA.tol = np.inf and TPCA.max_rank to the
        desired rank (possibly an array of length d-1).

        See also the documentation of the class
        TensorPrincipalComponentAnalysis.

        Parameters
        ----------
        fun : fun or tensap.Function
            Function of d variables i_1, ..., i_d which returns the entries of
            the tensor.
        shape : list or numpy.ndarray
            The shape of the tensor.
        tree : tensap.DimensionTree
            The required dimension tree.
        is_active_node : list or numpy.ndarray, optional
            An array of booleans indicating which nodes of the tree are active.
            The default is None, settings all the nodes active.

        Raises
        ------
        ValueError
            If the provided tolerance and max ranks are not correct.

        Returns
        -------
        tensap.TreeBasedTensor
            A tensor in tree based format.
        dict
            Dictionnary containing the outputs of the method.

        '''
        solver = deepcopy(self)
        d = len(shape)

        if is_active_node is None:
            is_active_node = np.full(tree.nb_nodes, True)

        if (np.ndim(self.tol) == 0 or len(self.tol) == 1) and self.tol < 1:
            solver.tol /= np.sqrt(np.count_nonzero(is_active_node) - 1)

        if np.ndim(self.tol) == 0 or len(self.tol) == 1:
            solver.tol = np.full(tree.nb_nodes, solver.tol)
        elif len(self.tol) != tree.nb_nodes:
            raise ValueError('tol should be a scalar or an array of length ' +
                             'tree.nb_nodes.')

        if np.ndim(self.max_rank) == 0 or len(self.max_rank) == 1:
            solver.max_rank = np.full(tree.nb_nodes, self.max_rank)
        elif len(self.max_rank) != tree.nb_nodes:
            raise ValueError('max_rank should be a scalar or an array of ' +
                             'length tree.nb_nodes.')

        grids = [np.reshape(np.arange(x), (-1, 1)) for x in shape]
        alpha_basis = np.empty(tree.nb_nodes, dtype=object)
        alpha_grids = np.empty(tree.nb_nodes, dtype=object)
        outputs = np.empty(tree.nb_nodes, dtype=object)
        samples = np.empty(tree.nb_nodes, dtype=object)
        tensors = [[]] * tree.nb_nodes
        number_of_evaluations = 0
        for nu in range(d):
            alpha = tree.dim2ind[nu]
            B_alpha = np.eye(shape[nu])
            if is_active_node[alpha - 1]:
                tol_alpha = np.min(
                    (solver.tol[alpha - 1], solver.max_rank[alpha - 1]))
                pc_alpha, outputs[alpha-1] = \
                    solver.alpha_principal_components(fun, shape, nu,
                                                      tol_alpha, B_alpha,
                                                      grids[nu])
                samples[alpha - 1] = outputs[alpha - 1]['samples']
                shape_alpha = [shape[nu], pc_alpha.shape[1]]
                tensors[alpha - 1] = tensap.FullTensor(pc_alpha, 2,
                                                       shape_alpha)

                B_alpha = np.matmul(B_alpha, pc_alpha)
                I_alpha = tensap.magic_indices(B_alpha)[0]
                alpha_grids[alpha - 1] = grids[nu][I_alpha, :]
                alpha_basis[alpha - 1] = B_alpha[I_alpha, :]

                number_of_evaluations += outputs[alpha -
                                                 1]['number_of_evaluations']
                if solver.display:
                    print('alpha = %i : rank = %i, nb_eval = %i' %
                          (alpha, shape_alpha[-1],
                           outputs[alpha - 1]['number_of_evaluations']))
            else:
                alpha_grids[alpha - 1] = grids[nu]
                alpha_basis[alpha - 1] = B_alpha

        for level in np.arange(np.max(tree.level), 0, -1):
            for alpha in np.intersect1d(tree.nodes_with_level(level),
                                        tree.internal_nodes):
                S_alpha = tree.children(alpha)
                B_alpha = TensorPrincipalComponentAnalysis.\
                    _tensor_product_b_alpha(alpha_basis[S_alpha-1])
                alpha_grids[alpha-1] = \
                    tensap.FullTensorGrid(alpha_grids[S_alpha-1]).array()

                tol_alpha = np.min(
                    (solver.tol[alpha - 1], solver.max_rank[alpha - 1]))
                pc_alpha, outputs[alpha-1] = \
                    solver.alpha_principal_components(fun, shape,
                                                      tree.dims[alpha-1],
                                                      tol_alpha,
                                                      B_alpha,
                                                      alpha_grids[alpha-1])
                samples[alpha - 1] = outputs[alpha - 1]['samples']
                shape_alpha = np.concatenate(
                    ([x.shape[1]
                      for x in alpha_basis[S_alpha - 1]], [pc_alpha.shape[1]]))
                tensors[alpha - 1] = tensap.FullTensor(pc_alpha,
                                                       len(S_alpha) + 1,
                                                       shape_alpha)

                B_alpha = np.matmul(B_alpha, pc_alpha)
                I_alpha = tensap.magic_indices(B_alpha)[0]
                alpha_grids[alpha - 1] = alpha_grids[alpha - 1][I_alpha, :]
                alpha_basis[alpha - 1] = B_alpha[I_alpha, :]
                number_of_evaluations += outputs[alpha -
                                                 1]['number_of_evaluations']
                if solver.display:
                    print('alpha = %i : rank = %i, nb_eval = %i' %
                          (alpha, shape_alpha[-1],
                           outputs[alpha - 1]['number_of_evaluations']))

        alpha = tree.root
        S_alpha = tree.children(alpha)
        B_alpha = TensorPrincipalComponentAnalysis.\
            _tensor_product_b_alpha(alpha_basis[S_alpha-1])
        I_alpha = tensap.FullTensorGrid(alpha_grids[S_alpha - 1]).array()
        shape_alpha = [x.shape[1] for x in alpha_basis[S_alpha - 1]]
        ind = [np.nonzero(tree.dims[alpha - 1] == x)[0][0] for x in range(d)]
        tensors[alpha - 1] = tensap.FullTensor(
            np.linalg.solve(B_alpha, fun(I_alpha[:, ind])), len(S_alpha),
            shape_alpha)
        alpha_grids[alpha - 1] = I_alpha
        number_of_evaluations += I_alpha.shape[0]
        samples[alpha - 1] = I_alpha
        if solver.display:
            print('Interpolation - nb_eval = %i' % I_alpha.shape[0])

        f = tensap.TreeBasedTensor(tensors, tree)

        output = {
            'number_of_evaluations': number_of_evaluations,
            'samples': samples,
            'alpha_basis': alpha_basis,
            'alpha_grids': alpha_grids,
            'outputs': outputs
        }

        return f, output
Beispiel #14
0
    raise NotImplementedError('Not implemented.')

TREE = SOLVER.tree
IS_ACTIVE_NODE = SOLVER.is_active_node

# %% Random shuffling of the dimensions associated to the leaves
RANDOMIZE = False
if RANDOMIZE:
    SOLVER.tree.dim2ind = np.random.permutation(SOLVER.tree.dim2ind)
    SOLVER.tree = SOLVER.tree.update_dims_from_leaves()

# %% Initial guess: known entries in a rank-1 tree-based tensor
guess = np.zeros(np.prod(sz))
guess[loc_TRAIN] = Y_TRAIN
guess = guess.reshape(sz, order='F')
guess = tensap.FullTensor(guess, order=ORDER, shape=sz)
tr = tensap.Truncator(tolerance=0, max_rank=1)
guess = tr.hsvd(guess, SOLVER.tree, SOLVER.is_active_node)

# %% Learning in tree-based tensor format
SOLVER.bases_eval = FEATURES_TRAIN
SOLVER.training_data = [None, Y_TRAIN]

SOLVER.tolerance['on_stagnation'] = 1e-8
SOLVER.tolerance['on_error'] = 1e-8

SOLVER.initialization_type = 'canonical'

SOLVER.linear_model_learning.regularization = False
SOLVER.linear_model_learning.basis_adaptation = True
SOLVER.linear_model_learning.error_estimation = True
Beispiel #15
0
    Returns
    -------
    numpy.ndarray
        The evaluations of the function at the input points.

    '''
    return 1/(3+inputs[:, 0]+inputs[:, 2]) + inputs[:, 1] + \
        np.cos(inputs[:, 3] + inputs[:, 4])


GRID = tensap.FullTensorGrid(np.linspace(0, 1, NK), ORDER)
ARRAY = GRID.array()

U = fun(ARRAY)
U = tensap.FullTensor(U, ORDER, np.full(ORDER, NK))

# %% Higher Order SVD - truncation in Tucker format
TR = tensap.Truncator()
TR.tolerance = 1e-8
UR = TR.hosvd(U)
print('Error = %2.5e' % ((UR.full() - U).norm() / U.norm()))
print('Storage = %d' % UR.storage())
print('Dimension of spaces = %s\n' % UR.tensors[0].shape)

# %% Tree-based format
ARITY_INTERVAL = [2, 3]
TREE = tensap.DimensionTree.random(ORDER, ARITY_INTERVAL)
TR = tensap.Truncator()
TR.tolerance = 1e-8
UR = TR.hsvd(U, TREE)
Beispiel #16
0
    def hsvd(self, tensor, tree=None, is_active_node=None):
        '''
        Compute the truncated svd in tree-based tensor format of tensor.

        Parameters
        ----------
        tensor : tensap.FullTensor or tensap.TreeBasedTensor
            The tensor to truncate.
        tree : tensap.DimensionTree, optional
            The tree of the output tree-based tensor. The default is None,
            indicating if tensor is a tensap.TreeBasedTensor to take
            tensor.tree.
        is_active_node : numpy.ndarray, optional
            Logical array indicating if the nodes are active.. The default is
            None, indicating if tensor is a tensap.TreeBasedTensor to take
            tensor.is_active_node.

        Raises
        ------
        ValueError
            If the wrong value of the atttribude _hsvd_type is provided.
        NotImplementedError
            If the method is not implemented for the format.

        Returns
        -------
        out : tensap.TreeBasedTensor
            The truncated tensor in tree-based tensor format.

        '''
        if isinstance(tensor, tensap.TreeBasedTensor):
            if tree is not None or is_active_node is not None:
                warnings.warn('The provided tree and/or is_active_node '
                              'are not taken into account when x is a '
                              'tensap.TreeBasedTensor.')
            is_active_node = tensor.is_active_node
            tree = tensor.tree
        elif is_active_node is None:
            is_active_node = np.full(tree.nb_nodes, True)

        max_rank = np.atleast_1d(self.max_rank)
        if max_rank.size == 1:
            max_rank = np.repeat(max_rank, tree.nb_nodes)
            max_rank[tree.root-1] = 1

        local_tol = self.tolerance / np.sqrt(
            np.count_nonzero(is_active_node)-1)

        if isinstance(tensor, tensap.FullTensor):
            root_rank_greater_than_one = tensor.order == len(tree.dim2ind)+1

            tensors = np.empty(tree.nb_nodes, dtype=object)
            shape = np.array(tensor.shape)
            nodes_x = tree.dim2ind
            ranks = np.ones(tree.nb_nodes, dtype=int)

            for level in np.arange(np.max(tree.level), 0, -1):
                for nod in tree.nodes_with_level(level):
                    if is_active_node[nod-1]:
                        if tree.is_leaf[nod-1]:
                            rep = np.nonzero(nod == nodes_x)[0][0]
                        else:
                            children = tree.children(nod)
                            rep = [np.nonzero(np.isin(nodes_x, x))[0][0]
                                   for x in children]
                        rep_c = tensap.fast_setdiff(np.arange(nodes_x.size),
                                                    rep)

                        if root_rank_greater_than_one:
                            rep_c = np.concatenate((rep_c, [tensor.order-1]))

                        self.max_rank = max_rank[nod-1]
                        tmp = self.trunc_svd(tensor.matricize(rep).numpy(),
                                             local_tol)
                        tensors[nod-1] = tmp.space[0]
                        ranks[nod-1] = tensors[nod-1].shape[1]
                        shape_loc = np.hstack((shape[rep], ranks[nod-1]))
                        tensors[nod-1] = tensap.FullTensor(tensors[nod-1],
                                                           shape=shape_loc)
                        tmp = np.matmul(tmp.space[1],
                                        np.diag(tmp.core.data))
                        shape = np.hstack((shape[rep_c], ranks[nod-1]))
                        tensor = tensap.FullTensor(tmp, shape=shape)

                        if root_rank_greater_than_one:
                            perm = np.concatenate((np.arange(tensor.order-2),
                                                   [tensor.order-1],
                                                   [tensor.order-2]))
                            tensor = tensor.transpose(perm)
                            shape = shape[perm]
                            rep_c = rep_c[:-1]

                        nodes_x = np.hstack((nodes_x[rep_c], nod))
                    else:
                        tensors[nod-1] = []

            root_ch = tree.children(tree.root)
            rep = [np.nonzero(np.isin(nodes_x, x))[0][0] for x in root_ch]
            if root_rank_greater_than_one:
                rep = np.concatenate((rep, [tensor.order-1]))
            tensors[tree.root-1] = tensor.transpose(rep)
            out = tensap.TreeBasedTensor(tensors, tree)
        elif isinstance(tensor, tensap.TreeBasedTensor):
            if self._hsvd_type == 1:
                out = tensor.orth()
                gram = out.gramians()[0]
                mat = np.empty(gram.shape, dtype=object)
                shape = np.zeros(gram.shape)
                for nod in range(gram.size):
                    # Truncation of the Gramian in trace norm for a control
                    # of Frobenius norm of the tensor
                    if gram[nod] is not None:
                        self.max_rank = max_rank[nod]
                        tmp = self.trunc_svd(gram[nod], local_tol ** 2)
                        shape[nod] = tmp.core.shape[0]
                        mat[nod] = np.transpose(tmp.space[0])

                # Interior nodes without the root
                for level in np.arange(1, np.max(tree.level)):
                    nod_level = tensap.fast_setdiff(
                        tree.nodes_with_level(level),
                        np.nonzero(tree.is_leaf)[0]+1)
                    for nod in tree.nodes_indices[nod_level-1]:
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(
                                mat[nod-1], order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = \
                            out.tensors[parent-1].tensor_matrix_product(
                                mat[nod-1], ch_nb-1)

                # Leaves
                for nod in tree.dim2ind:
                    if out.is_active_node[nod-1]:
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(
                                mat[nod-1], order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = \
                            out.tensors[parent-1].tensor_matrix_product(
                                mat[nod-1], ch_nb-1)
                # Update the shape
                out = out.update_attributes()
                out.is_orth = False

            elif self._hsvd_type == 2:
                out = tensor.orth()
                gram = out.gramians()[0]
                for level in np.arange(np.max(tree.level), 0, -1):
                    for nod in tensap.fast_intersect(
                            tree.nodes_with_level(level), out.active_nodes):
                        # Truncation of the Gramian in trace norm for a control
                        # of Frobenius norm of the tensor
                        self.max_rank = max_rank[nod-1]
                        tmp = self.trunc_svd(gram[nod-1], local_tol ** 2)
                        tmp = np.transpose(tmp.space[0])
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(tmp,
                                                                     order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = out.tensors[parent-1].\
                            tensor_matrix_product(tmp, ch_nb-1)
                out = out.update_attributes()
                out.is_orth = True
                out.orth_node = tree.root
            else:
                raise ValueError('Wrong value of _hsvd_type.')
        else:
            raise NotImplementedError('Method not implemented.')
        return out