Exemplo n.º 1
0
    def tensordot_matrix_product_except_dim(self, tensor2, matrices, dim):
        '''
        Particular type of contraction.

        Compute a special contraction of two tensors self, tensor2, a list of
        matrices matrices and a particular dimension dim. Note that dim must
        be a scalar, while matrices must be a list array with self.order
        elements.

        Parameters
        ----------
        tensor2 : FullTensor
            The second tensor of the contraction.
        matrices : list
            The list of matrices of the contraction.
        dim : int
            The excluded dimension.

        Returns
        -------
        numpy.ndarray
            The result of the contraction.

        '''
        assert isinstance(matrices, list), 'matrices should be a list.'
        assert len(matrices) == self.order, \
            'len(matrices) must be self.order.'

        dims = tensap.fast_setdiff(np.arange(self.order), dim)
        matrices = [matrices[i] for i in dims]
        tmp = tensor2.tensor_matrix_product(matrices, dims)
        tmp = self.tensordot(tmp, dims, dims)
        return tmp
Exemplo n.º 2
0
    def matricize(self, dims1, dims2=None):
        '''
        Return the matricization of the tensor.

        Parameters
        ----------
        dims1 : list or numpy.ndarray
            The dimensions of the tensor corresponding to the first dimension
            of the matricization.
        dims2 : list or numpy.ndarray, optional
            The dimensions of the tensor corresponding to the first dimension
            of the matricization. The default is None, for which they are
            deduced from dims1.

        Returns
        -------
        FullTensor
            The matricization of the tensor.

        '''
        dims1 = np.atleast_1d(dims1)
        if dims1.size == 1 and dims1 == -1:
            dims1 = np.array([self.order - 1])
        if dims2 is None:
            dims2 = tensap.fast_setdiff(np.arange(self.order), dims1)
        else:
            dims2 = np.atleast_1d(dims2)
        shape1 = [self.shape[i] for i in dims1]
        shape2 = [self.shape[i] for i in dims2]

        tensor = FullTensor(self)
        tensor = tensor.transpose(np.concatenate((dims1, dims2)))
        tensor = tensor.reshape([np.prod(shape1), np.prod(shape2)])
        return FullTensor(tensor)
Exemplo n.º 3
0
    def eval(self, x):
        '''
        Evaluate the CompositionalModelFunction at points x.

        Parameters
        ----------
        x : list or numpy.ndarray
            The points at which the function is to be evaluated.

        Returns
        -------
        numpy.ndarray
            The evaluations of the function at points x.

        '''
        x = np.atleast_2d(x)
        tree = self.tree
        z = np.empty(tree.nb_nodes, dtype=object)
        for nu in range(self.dim):
            z[tree.dim2ind[nu] - 1] = x[:, nu]

        for level in np.arange(np.max(tree.level), -1, -1):
            nodes = tree.nodes_with_level(level)
            for nod in tensap.fast_setdiff(nodes, tree.dim2ind):
                ch = tree.children(nod)
                z[nod - 1] = self.fun[nod - 1](*z[ch - 1])

        return z[tree.root - 1]
Exemplo n.º 4
0
    def cat(self, tensor2, dims=None):
        '''
        Concatenate the tensors.

        Concatenates self and tensor2 in a tensor z such that:
        z(i_1 ,..., i_d) = x(i_1, ..., i_d) if i_k <= sz[k]-1 for k in dims,
        z(i_1, ..., i_d) = y(i_1-sz[0], ..., i_d-sz[d-1]) if i_k >= sz[k]
        for k in dims,
        z(i_1, ..., i_d) = 0 otherwise, with sz = self.shape and
        dims = range(self.order) if not provided.

        Parameters
        ----------
        tensor2 : FullTensor
            The second tensor to be concatenaed.
        dims : list or numpy.ndarray, optional
            The dimensions of the concatenation. The default is None,
            indicating all the dimensions.
        Returns
        -------
        data : FullTensor
            The concatenated tensors.

        '''
        assert self.order == tensor2.order, \
            'The orders of the tensors must be equal.'

        tensor1 = FullTensor(self)
        order = self.order
        shape1 = np.atleast_1d(self.shape)
        shape2 = np.atleast_1d(tensor2.shape)

        if dims is None:
            dims = range(self.order)

        dims = np.atleast_1d(dims)
        dims_not = tensap.fast_setdiff(np.arange(order), dims)
        assert np.all([a == b for a, b in zip(shape1[dims_not],
                                              shape2[dims_not])]), \
            'The dimensions of the tensors are not compatible.'

        if dims.size == 1:
            data = np.concatenate([tensor1.data, tensor2.data], dims[0])
        else:
            shape_out = np.array(shape1)
            shape_out[dims] = shape1[dims] + shape2[dims]

            padding = np.transpose([[0] * order, shape_out - shape1])
            data = np.pad(tensor1.data, padding)
            padding = np.transpose([shape_out - shape2, [0] * order])
            data += np.pad(tensor2.data, padding)

        return FullTensor(data)
Exemplo n.º 5
0
    def tensor_matrix_product(self, matrices, dims=None):
        '''
        Contract a tensor with matrices.

        The second dimension of the matrix matrices[k] is contracted with the
        k-th dimension of self, with the indices k given in dims (if provided).

        Parameters
        ----------
        matrices : numpy.ndarray or list of numpy.ndarray
            The matrices to use in the product.
        dims : list or numpy.ndarray, optional
            Indices of the contractions. The default is None, indicating all
            the dimensions.

        Returns
        -------
        FullTensor
            The tensor after the contractions with the matrices.

        '''
        if dims is None:
            assert isinstance(matrices, (list, np.ndarray)), \
                'matrices should be a list or a numpy.ndarray.'
            assert len(matrices) == self.order, \
                'len(matrices) must be self.order.'
            dims = range(self.order)
        else:
            dims = np.atleast_1d(dims)
            if not isinstance(matrices, list):
                matrices = [matrices]
            assert len(matrices) == dims.size, \
                'len(matrices) must be equal to dims.size.'

        # Numpy implementation
        # matrices = [np.array(x) for x in matrices]
        # data = self.numpy()
        # if self.order == 1:
        #     data = np.matmul(matrices[0], data)
        # else:
        #     k = 0
        #     for dim in np.nditer(dims):
        #         perm_dims = np.concatenate(([dim], np.arange(dim),
        #                                     np.arange(dim+1, self.order)))
        #         data = np.transpose(data, perm_dims)
        #         shape0 = np.array(data.shape)
        #         data = np.reshape(data, [shape0[0], np.prod(shape0[1:])])
        #         data = np.matmul(matrices[k], data)
        #         shape0[0] = matrices[k].shape[0]
        #         data = np.reshape(data, shape0)
        #         data = np.transpose(data, np.argsort(perm_dims))
        #         k += 1
        # return FullTensor(data)

        tensor = FullTensor(self)
        matrices = [FullTensor(x) for x in matrices]
        for i, dim in enumerate(dims):
            index = np.concatenate(
                (tensap.fast_setdiff(np.arange(tensor.order), dim), [dim]))
            tensor = tensor.tensordot(matrices[i], dim, 1).itranspose(index)
        return tensor
Exemplo n.º 6
0
    def eval_at_indices(self, indices, dims=None):
        '''
        Evaluate the tensor at indices.

        If dims is None, return
        s(k) = x(indices(k, 1), indices(k, 2), ..., indices(k, d)),
        1 <= k <= self.shape[0].

        If dims is not None, return a partial evaluation: up to a permutation
        (placing the dimensions dims on the left), return
        s(k, i_1, ..., i_d') = x(indices(k, 1), indices(k, 2), ...,
        indices(k, M), i_1, ..., i_d'),
        1 <= k <= self.shape[0], with M = dims.size and d' = self.order - M.

        Parameters
        ----------
        indices : list of numpy.ndarray
            The indices of the tensor.
        dims : list of numpy.ndarray, optional
            The dimensions associated with the indices. The default is None,
            indicating that indices refers to all the dimensions.

        Returns
        -------
        evaluations : numpy.ndarray or FullTensor
            The evaluations of the tensor.

        '''
        indices = np.atleast_2d(indices)
        if dims is None:
            dims = np.arange(self.order)
        else:
            dims = np.atleast_1d(dims)
            if indices.shape[1] != dims.size:
                indices = np.transpose(indices)
            assert dims.size == indices.shape[1], \
                'Wrong size of multi-indices.'
            sort_ind = np.argsort(dims)
            dims = dims[sort_ind]
            indices = indices[:, sort_ind]
        assert dims.size == indices.shape[1], 'Wrong size of multi-indices.'

        if dims.size == self.order:
            data = self
            evaluations = np.array([data[tuple(i)] for i in indices.tolist()])
        elif dims.size == 1:
            ind = [':'] * self.order
            ind[dims[0]] = np.ravel(indices).tolist()
            evaluations = self.sub_tensor(*ind)
        else:
            no_dims = tensap.fast_setdiff(np.arange(self.order), dims)
            indices = np.ravel_multi_index(np.transpose(indices),
                                           [self.shape[i] for i in dims])
            evaluations = self.matricize(dims).sub_tensor(indices, ':')
            evaluations = evaluations.reshape([indices.size] +
                                              [self.shape[i] for i in no_dims])
            left_dims = np.arange(dims[0])
            evaluations = evaluations.transpose(
                np.concatenate((np.arange(1, left_dims.size + 1), [0],
                                np.arange(left_dims.size + 1,
                                          self.order - dims.size + 1))))
        return evaluations
Exemplo n.º 7
0
    def hsvd(self, tensor, tree=None, is_active_node=None):
        '''
        Compute the truncated svd in tree-based tensor format of tensor.

        Parameters
        ----------
        tensor : tensap.FullTensor or tensap.TreeBasedTensor
            The tensor to truncate.
        tree : tensap.DimensionTree, optional
            The tree of the output tree-based tensor. The default is None,
            indicating if tensor is a tensap.TreeBasedTensor to take
            tensor.tree.
        is_active_node : numpy.ndarray, optional
            Logical array indicating if the nodes are active.. The default is
            None, indicating if tensor is a tensap.TreeBasedTensor to take
            tensor.is_active_node.

        Raises
        ------
        ValueError
            If the wrong value of the atttribude _hsvd_type is provided.
        NotImplementedError
            If the method is not implemented for the format.

        Returns
        -------
        out : tensap.TreeBasedTensor
            The truncated tensor in tree-based tensor format.

        '''
        if isinstance(tensor, tensap.TreeBasedTensor):
            if tree is not None or is_active_node is not None:
                warnings.warn('The provided tree and/or is_active_node '
                              'are not taken into account when x is a '
                              'tensap.TreeBasedTensor.')
            is_active_node = tensor.is_active_node
            tree = tensor.tree
        elif is_active_node is None:
            is_active_node = np.full(tree.nb_nodes, True)

        max_rank = np.atleast_1d(self.max_rank)
        if max_rank.size == 1:
            max_rank = np.repeat(max_rank, tree.nb_nodes)
            max_rank[tree.root-1] = 1

        local_tol = self.tolerance / np.sqrt(
            np.count_nonzero(is_active_node)-1)

        if isinstance(tensor, tensap.FullTensor):
            root_rank_greater_than_one = tensor.order == len(tree.dim2ind)+1

            tensors = np.empty(tree.nb_nodes, dtype=object)
            shape = np.array(tensor.shape)
            nodes_x = tree.dim2ind
            ranks = np.ones(tree.nb_nodes, dtype=int)

            for level in np.arange(np.max(tree.level), 0, -1):
                for nod in tree.nodes_with_level(level):
                    if is_active_node[nod-1]:
                        if tree.is_leaf[nod-1]:
                            rep = np.nonzero(nod == nodes_x)[0][0]
                        else:
                            children = tree.children(nod)
                            rep = [np.nonzero(np.isin(nodes_x, x))[0][0]
                                   for x in children]
                        rep_c = tensap.fast_setdiff(np.arange(nodes_x.size),
                                                    rep)

                        if root_rank_greater_than_one:
                            rep_c = np.concatenate((rep_c, [tensor.order-1]))

                        self.max_rank = max_rank[nod-1]
                        tmp = self.trunc_svd(tensor.matricize(rep).numpy(),
                                             local_tol)
                        tensors[nod-1] = tmp.space[0]
                        ranks[nod-1] = tensors[nod-1].shape[1]
                        shape_loc = np.hstack((shape[rep], ranks[nod-1]))
                        tensors[nod-1] = tensap.FullTensor(tensors[nod-1],
                                                           shape=shape_loc)
                        tmp = np.matmul(tmp.space[1],
                                        np.diag(tmp.core.data))
                        shape = np.hstack((shape[rep_c], ranks[nod-1]))
                        tensor = tensap.FullTensor(tmp, shape=shape)

                        if root_rank_greater_than_one:
                            perm = np.concatenate((np.arange(tensor.order-2),
                                                   [tensor.order-1],
                                                   [tensor.order-2]))
                            tensor = tensor.transpose(perm)
                            shape = shape[perm]
                            rep_c = rep_c[:-1]

                        nodes_x = np.hstack((nodes_x[rep_c], nod))
                    else:
                        tensors[nod-1] = []

            root_ch = tree.children(tree.root)
            rep = [np.nonzero(np.isin(nodes_x, x))[0][0] for x in root_ch]
            if root_rank_greater_than_one:
                rep = np.concatenate((rep, [tensor.order-1]))
            tensors[tree.root-1] = tensor.transpose(rep)
            out = tensap.TreeBasedTensor(tensors, tree)
        elif isinstance(tensor, tensap.TreeBasedTensor):
            if self._hsvd_type == 1:
                out = tensor.orth()
                gram = out.gramians()[0]
                mat = np.empty(gram.shape, dtype=object)
                shape = np.zeros(gram.shape)
                for nod in range(gram.size):
                    # Truncation of the Gramian in trace norm for a control
                    # of Frobenius norm of the tensor
                    if gram[nod] is not None:
                        self.max_rank = max_rank[nod]
                        tmp = self.trunc_svd(gram[nod], local_tol ** 2)
                        shape[nod] = tmp.core.shape[0]
                        mat[nod] = np.transpose(tmp.space[0])

                # Interior nodes without the root
                for level in np.arange(1, np.max(tree.level)):
                    nod_level = tensap.fast_setdiff(
                        tree.nodes_with_level(level),
                        np.nonzero(tree.is_leaf)[0]+1)
                    for nod in tree.nodes_indices[nod_level-1]:
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(
                                mat[nod-1], order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = \
                            out.tensors[parent-1].tensor_matrix_product(
                                mat[nod-1], ch_nb-1)

                # Leaves
                for nod in tree.dim2ind:
                    if out.is_active_node[nod-1]:
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(
                                mat[nod-1], order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = \
                            out.tensors[parent-1].tensor_matrix_product(
                                mat[nod-1], ch_nb-1)
                # Update the shape
                out = out.update_attributes()
                out.is_orth = False

            elif self._hsvd_type == 2:
                out = tensor.orth()
                gram = out.gramians()[0]
                for level in np.arange(np.max(tree.level), 0, -1):
                    for nod in tensap.fast_intersect(
                            tree.nodes_with_level(level), out.active_nodes):
                        # Truncation of the Gramian in trace norm for a control
                        # of Frobenius norm of the tensor
                        self.max_rank = max_rank[nod-1]
                        tmp = self.trunc_svd(gram[nod-1], local_tol ** 2)
                        tmp = np.transpose(tmp.space[0])
                        order = out.tensors[nod-1].order
                        out.tensors[nod-1] = \
                            out.tensors[nod-1].tensor_matrix_product(tmp,
                                                                     order-1)
                        parent = tree.parent(nod)
                        ch_nb = tree.child_number(nod)
                        out.tensors[parent-1] = out.tensors[parent-1].\
                            tensor_matrix_product(tmp, ch_nb-1)
                out = out.update_attributes()
                out.is_orth = True
                out.orth_node = tree.root
            else:
                raise ValueError('Wrong value of _hsvd_type.')
        else:
            raise NotImplementedError('Method not implemented.')
        return out
Exemplo n.º 8
0
    def parameter_gradient_eval_dmrg(self,
                                     alpha,
                                     x=None,
                                     dmrg_type='dmrg',
                                     *args):
        if self.evaluated_bases:
            bases_eval = self.bases
        elif x is not None:
            bases_eval = self.bases.eval(x)
        else:
            raise ValueError('Must provide the evaluation points or the ' +
                             'bases evaluations.')

        dims = np.arange(self.tensor.order)
        if isinstance(self.tensor, tensap.TreeBasedTensor):
            # Compute fH, the TimesMatrixEvalDiag of f with bases_eval in all
            # the dimensions except the ones associated with alpha (if alpha
            # is a leaf node) or with the inactive children of alpha (if
            # alpha is an internal node). The tensor fH is used to compute
            # the gradient of f with respect to f.tensor.tensors[alpha-1].
            tree = self.tensor.tree
            if tree.is_leaf[alpha - 1]:
                dims = dims[tree.dim2ind != alpha]
            else:
                children = tree.children(alpha)
                ind = tensap.fast_intersect(
                    tree.dim2ind, children[np.logical_not(
                        self.tensor.is_active_node[children - 1])])
                dims = dims[np.logical_not(np.isin(tree.dim2ind, ind))]

            fH = self.tensor.tensor_matrix_product(
                [bases_eval[x] for x in dims], dims)
        else:
            if alpha <= self.tensor.order:
                dims = np.delete(dims, alpha - 1)
            fH = self.tensor.tensor_matrix_product(
                [bases_eval[x] for x in dims], dims)

        grad, g_alpha, g_gamma = \
            fH.parameter_gradient_eval_diag_dmrg(alpha, bases_eval)

        if isinstance(self.tensor, tensap.TreeBasedTensor):
            # If the order of the children has been modified in grad, compute
            # the inverse permutation.
            ch = tree.children(alpha)

            if ch.size == 0:
                perm_1 = np.array([0])
            else:
                perm_1 = np.argsort(
                    np.concatenate(
                        (np.atleast_1d(ch[fH.is_active_node[ch - 1]]),
                         np.atleast_1d(ch[np.logical_not(
                             fH.is_active_node[ch - 1])]))))
            gamma = tree.parent(alpha)
            ch = tensap.fast_setdiff(tree.children(gamma), alpha)
            perm_1b = np.argsort(
                np.concatenate((np.atleast_1d(ch[fH.is_active_node[ch - 1]]),
                                np.atleast_1d(ch[np.logical_not(
                                    fH.is_active_node[ch - 1])]))))

            if dmrg_type == 'dmrg':
                perm_1 = np.concatenate((perm_1, perm_1.size + perm_1b))
                perm_2 = []
                if alpha != tree.root and gamma != tree.root:
                    perm_2 = [
                        fH.tensors[alpha - 1].order +
                        fH.tensors[gamma - 1].order - 2
                    ]
                perm_3 = []
                if alpha != tree.root and self.tensor.ranks[tree.root - 1] > 1:
                    perm_3 = [grad.order - 1]
                grad = grad.transpose(
                    np.concatenate(
                        ([0], perm_1 + 1, perm_2, perm_3)).astype(int))
            elif dmrg_type == 'dmrg_low_rank':
                g_alpha = g_alpha.transpose(np.concatenate(([0], perm_1 + 1)))
                perm_2 = []
                if gamma != tree.root:
                    # TODO Checks
                    perm_2 = [fH.tensors[gamma - 1].order - 1]
                perm_3 = []
                if alpha != tree.root and self.tensor.ranks[tree.root - 1] > 1:
                    perm_3 = [grad.order - 1]
                g_gamma = g_gamma.transpose(
                    np.concatenate(
                        ([0], perm_1b + 1, perm_2, perm_3)).astype(int))

                grad = [g_alpha, g_gamma]
            else:
                raise ValueError('Wrong DMRG type.')
        return grad
Exemplo n.º 9
0
    def parameter_gradient_eval(self, alpha, x=None, *args):
        '''
        Compute the gradient of the function with respect to its alpha-th
        parameter, evaluated at some points.

        Parameters
        ----------
        alpha : int
            The number of the parameter with respect to which compute the
            gradient of self.
        x : list or numpy.ndarray, optional
            The points at which the gradient is to be evaluated. The default is
            None, indicating to use self.bases if self.evaluated_bases is True.

        Raises
        ------
        ValueError
            If no input points are provided.

        Returns
        -------
        grad : Tensor
            The gradient of the function with respect to its alpha-th
            parameter, evaluated at some points.

        '''
        if self.evaluated_bases:
            bases_eval = self.bases
        elif x is not None:
            bases_eval = self.bases.eval(x)
        else:
            raise ValueError('Must provide the evaluation points or the ' +
                             'bases evaluations.')

        dims = np.arange(self.tensor.order)
        if isinstance(self.tensor, tensap.TreeBasedTensor):
            # Compute fH, the TimesMatrixEvalDiag of f with bases_eval in all
            # the dimensions except the ones associated with alpha (if alpha
            # is a leaf node) or with the inactive children of alpha (if
            # alpha is an internal node). The tensor fH is used to compute
            # the gradient of f with respect to f.tensor.tensors[alpha-1].
            tree = self.tensor.tree
            if tree.is_leaf[alpha - 1]:
                dims = dims[tree.dim2ind != alpha]
            else:
                children = tree.children(alpha)
                ind = tensap.fast_intersect(
                    tree.dim2ind, children[np.logical_not(
                        self.tensor.is_active_node[children - 1])])
                dims = dims[np.logical_not(np.isin(tree.dim2ind, ind))]

            if np.all(self.tensor.is_active_node):
                fH = self.tensor.tensor_matrix_product(
                    [bases_eval[x] for x in dims], dims)
            else:
                remaining_dims = np.arange(self.tensor.order)
                tensors = np.array(self.tensor.tensors)
                dim2ind = np.array(tree.dim2ind)

                for leaf in tensap.fast_intersect(tree.dim2ind[dims],
                                                  self.tensor.active_nodes):
                    dims = tensap.fast_setdiff(
                        dims,
                        np.nonzero(tree.dim2ind == leaf)[0][0])
                    tensors[leaf-1] = self.tensor.tensors[leaf-1].\
                        tensor_matrix_product(bases_eval[
                            np.nonzero(tree.dim2ind == leaf)[0][0]], 0)

                for pa in np.unique(
                        tree.parent(
                            tensap.fast_setdiff(tree.dim2ind[dims],
                                                self.tensor.active_nodes))):
                    ind = tensap.fast_intersect(tree.dim2ind[dims],
                                                tree.children(pa))
                    ind = tensap.fast_setdiff(ind, self.tensor.active_nodes)
                    dims_loc = np.array(
                        [np.nonzero(x == tree.dim2ind)[0][0] for x in ind])
                    if len(ind) > 1:
                        tensors[pa-1] = self.tensor.tensors[pa-1].\
                            tensor_matrix_product_eval_diag([bases_eval[x] for
                                                             x in dims_loc],
                                                            tree.child_number(
                                                                ind)-1)
                        remaining_dims = tensap.fast_setdiff(
                            remaining_dims, dims_loc[1:])
                        if np.all(
                                np.logical_not(self.tensor.is_active_node[
                                    tree.children(pa) - 1])):
                            dim2ind[dims_loc[0]] = tree.parent(
                                tree.dim2ind[dims_loc[0]])
                        else:
                            dims = tensap.fast_setdiff(dims, dims_loc[0])
                        dim2ind[dims_loc[1:]] = 0
                        perm = np.concatenate(
                            ([tree.child_number(ind[0]) - 1],
                             tensap.fast_setdiff(
                                 np.arange(tensors[pa - 1].order),
                                 tree.child_number(ind[0]) - 1)))
                        tensors[pa - 1] = tensors[pa - 1].itranspose(perm)
                    elif len(ind) == 1:
                        dims = dims[dims != dims_loc]
                        tensors[pa-1] = self.tensor.tensors[pa-1].\
                            tensor_matrix_product([bases_eval[x] for
                                                   x in dims_loc],
                                                  tree.child_number(ind)-1)
                        dim2ind[dims_loc] = tree.dim2ind[dims_loc]

                keep_ind = tensap.fast_setdiff(np.arange(tree.nb_nodes),
                                               tree.dim2ind[dims] - 1)
                adj_mat = tree.adjacency_matrix[np.ix_(keep_ind, keep_ind)]
                dim2ind = dim2ind[dim2ind != 0]

                ind = np.zeros(tree.nb_nodes)
                ind[tensap.fast_setdiff(np.arange(tree.nb_nodes),
                                        keep_ind)] = 1
                ind = np.cumsum(ind).astype(int)
                dim2ind -= ind[dim2ind - 1]
                alpha = alpha - ind[alpha - 1]

                tree = tensap.DimensionTree(dim2ind, adj_mat)
                fH = tensap.TreeBasedTensor(tensors[keep_ind], tree)
                fH = fH.remove_unique_children()
                bases_eval = [bases_eval[x] for x in remaining_dims]
        else:
            if alpha <= self.tensor.order:
                dims = np.delete(dims, alpha - 1)
            fH = self.tensor.tensor_matrix_product(
                [bases_eval[x] for x in dims], dims)

        grad = fH.parameter_gradient_eval_diag(alpha, bases_eval)
        if isinstance(self.tensor, tensap.TreeBasedTensor) and \
                not tree.is_leaf[alpha-1]:
            # If the order of the children has been modified in grad, compute
            # the inverse permutation.
            ch = tree.children(alpha)
            perm_1 = np.argsort(
                np.concatenate((np.atleast_1d(ch[fH.is_active_node[ch - 1]]),
                                np.atleast_1d(ch[np.logical_not(
                                    fH.is_active_node[ch - 1])]))))

            if alpha == tree.root:
                perm_2 = []
            else:
                perm_2 = [fH.tensors[alpha - 1].order]

            if alpha != tree.root and self.tensor.ranks[tree.root - 1] > 1:
                perm_3 = [grad.order - 1]
            else:
                perm_3 = []

            grad = grad.transpose(
                np.concatenate(([0], perm_1 + 1, perm_2, perm_3)).astype(int))

        return grad