示例#1
0
    def _project(self, t):
        '''
        Compute the projection of a tensor on the functional bases.

        The method takes an AlgebraicTensor t whose entries are the values of
        the function on a product grid, and returns a FunctionalTensor obtained
        by applying the projections obtained by the method
        _projectionOperators.

        Parameters
        ----------
        t : tensap.Tensor
            The tensor used for the projection.

        Returns
        -------
        tensap.FunctionalTensor
            The obtained projection.

        '''
        tensor = deepcopy(t)
        P = self._projection_operators()
        for nu in range(tensor.order):
            alpha = tensor.tree.dim2ind[nu]
            if tensor.is_active_node[alpha - 1]:
                data = np.matmul(P[nu], tensor.tensors[alpha - 1].data)
                tensor.tensors[alpha - 1] = tensap.FullTensor(
                    data, 2, data.shape)
            else:
                pa = tensor.tree.parent(alpha)
                ch = tensor.tree.child_number(alpha)
                tensor.tensors[pa-1] = \
                    tensor.tensors[pa-1].tensor_matrix_product(P[nu], ch-1)
        return tensap.FunctionalTensor(tensor, self.bases)
示例#2
0
    def tensor_product_interpolation(self, fun, grid=None):
        '''
        Return the interpolation of function fun on a product grid.

        Parameters
        ----------
        fun : function or tensap.Function or tensap.Tensor
            The function to interpolate, or a tensor of order d whose entries
            are the evaluations of the function on a product grid.
        grid : list, optional
            The grid of points used for the interpolation. If one grid has more
            points than the dimension of the corresponding basis, use
            magicPoints for the selection of a subset of points adapted to the
            basis. The default is None, indicating to use the method
            self.bases.interpolation_points().

        Raises
        ------
        ValueError
            If the argument fun is neither a tensap.Function, a function nor
            a tensap.Tensor.

        Returns
        -------
        tensap.FunctionalTensor
            The interpolation of the function.
        output : dict
            A dictionnary of outputs of the method.

        '''
        if grid is None:
            grid = self.bases.interpolation_points()
        else:
            grid = self.bases.interpolation_points(grid)
        grid = tensap.FullTensorGrid(grid)

        output = {}
        if hasattr(fun, '__call__') or isinstance(fun, tensap.Function):
            x_grid = grid.array()
            y = fun(x_grid)
            y = tensap.FullTensor(y, grid.dim, grid.shape)
            output['number_of_evaluations'] = y.storage()
        # TODO Create an empty class Tensor for when using isinstance?
        elif isinstance(fun, (tensap.FullTensor, tensap.CanonicalTensor,
                              tensap.TreeBasedTensor), tensap.DiagonalTensor):
            y = fun
        else:
            raise ValueError('The argument fun should be a Function, ' +
                             'function, or a Tensor.')
        output['grid'] = grid

        B = self.bases.eval(grid.grids)
        B = [np.linalg.inv(x) for x in B]
        y = y.tensor_matrix_product(B)
        return tensap.FunctionalTensor(y, self.bases), output
示例#3
0
    def projection(self, fun, I):
        '''
        Compute the projection of the function fun on the basis functions of
        self.

        Parameters
        ----------
        fun : tensap.Function
            The function to project.
        I : tensap.IntegrationRule
            The integration rule used to compute the projection.

        Raises
        ------
        NotImplementedError
            If the provided integration rule is not a
            tensap.FullTensorProductIntegrationRule.

        Returns
        -------
        tensap.FunctionalTensor
            The projection of the function fun on the basis functions of self.
        output : dict
            Dictionnary containing the number of evaluations of the function in
            the key 'number_of_evaluations'.

        '''
        if not isinstance(I, tensap.FullTensorProductIntegrationRule):
            raise NotImplementedError('Method not implemented.')

        u = fun.eval_on_tensor_grid(I.points)
        output = {'number_of_evaluations': u.storage()}
        Hx = self.bases.eval(np.hstack(I.points.grids))
        Mx = [
            np.matmul(np.transpose(x), np.matmul(np.diag(y), x))
            for x, y in zip(Hx, I.weights)
        ]
        Hx_w = [
            np.linalg.solve(m, np.matmul(np.transpose(x), np.diag(y)))
            for m, x, y in zip(Mx, Hx, I.weights)
        ]
        f_dims = np.arange(len(Hx_w))
        u_coeff = u.tensor_matrix_product(Hx_w, f_dims)
        return tensap.FunctionalTensor(u_coeff, self.bases, f_dims), output
示例#4
0
    def _solve_adaptation(self):
        '''
        Solver for the learning problem with tensor formats using the adaptive
        algorithm.

        Returns
        -------
        f : tensap.FunctionalTensor
            The learned approximation.
        output : dict
            The outputs of the solver.

        '''

        flag = 0
        output = {
            'enriched_nodes_iterations':
            np.empty(self.rank_adaptation_options['max_iterations'],
                     dtype=object)
        }
        tree_adapt = False

        f = None
        errors = np.zeros(self.rank_adaptation_options['max_iterations'])
        test_errors = np.zeros(self.rank_adaptation_options['max_iterations'])
        iterates = np.empty(self.rank_adaptation_options['max_iterations'],
                            dtype=object)

        # new_rank = s_local.rank
        new_rank = self.rank
        s_local = self.local_solver()
        s_local.model_selection = False

        enriched_nodes = np.array([])
        for iteration in range(self.rank_adaptation_options['max_iterations']):
            s_local.bases = self.bases
            s_local.bases_eval = self.bases_eval
            s_local.bases_eval_test = self.bases_eval_test
            s_local.training_data = self.training_data
            s_local.test_data = self.test_data
            s_local.rank = new_rank

            f_old = deepcopy(f)
            f, output_local = s_local.solve()
            s_local = self.local_solver()

            if 'error' in output_local:
                errors[iteration] = output_local['error']
                if np.isinf(errors[iteration]):
                    print('Infinite error, returning to the previous iterate.')
                    f = f_old
                    iteration -= 1
                    flag = -2
                    break

            if self.test_error:
                f_eval_test = tensap.FunctionalTensor(f, self.bases_eval_test)
                test_errors[iteration] = self.loss_function.test_error(
                    f_eval_test, self.test_data)

            if self.store_iterates:
                if isinstance(self.bases, tensap.FunctionalBases):
                    iterates[iteration] = tensap.FunctionalTensor(
                        f.tensor, self.bases)
                else:
                    iterates[iteration] = f

            if self.display:
                if self.alternating_minimization_parameters['display']:
                    print('')
                print('\nRank adaptation, iteration %i:' % (iteration))
                self.adaptation_display(f, enriched_nodes)
                print('\tStorage complexity = %i' % f.tensor.storage(),
                      flush=True)

                if errors[iteration] != 0:
                    print('\tError      = %2.5e' % errors[iteration])
                if test_errors[iteration] != 0:
                    print('\tTest error = %2.5e' % test_errors[iteration])
                if self.alternating_minimization_parameters['display']:
                    print('')

            if iteration == self.rank_adaptation_options['max_iterations'] - 1:
                break

            if (self.test_error and test_errors[iteration] <
                    self.tolerance['on_error']) or \
                ('error' in output_local and errors[iteration] <
                 self.tolerance['on_error']):
                flag = 1
                break

            fac = self.rank_adaptation_options['early_stopping_factor']
            cond = iteration > 0 and (
                self.test_error and
                (np.isnan(test_errors[iteration]) or fac *
                 np.min(test_errors[:iteration]) < test_errors[iteration]) or
                ('error' in output_local and
                 (np.isnan(errors[iteration])
                  or fac * np.min(errors[:iteration]) < errors[iteration])))
            if self.rank_adaptation_options['early_stopping'] and cond:
                print('Early stopping', end='')
                if 'error' in output_local:
                    print(', error = %2.5e' % errors[iteration], end='')
                if self.test_error:
                    print(', test error = %2.5e' % test_errors[iteration],
                          end='')
                print('\n')
                iteration -= 1
                f = f_old
                flag = -1
                break

            adapted_tree = False
            if s_local.tree_adaptation and iteration > 0 and \
                    (not self.tree_adaptation_options['force_rank_adaptation']
                     or not tree_adapt):
                C_old = f.tensor.storage()
                self, f, output = self.adapt_tree(f, errors[iteration], None,
                                                  output, iteration)
                adapted_tree = output['adapted_tree']
                if adapted_tree:
                    if self.display:
                        print('\t\tStorage complexity before permutation ' +
                              '= %i' % C_old)
                        print('\t\tStorage complexity after permutation ' +
                              '= %i' % f.tensor.storage())
                    if self.test_error:
                        f_eval_test = tensap.FunctionalTensor(
                            f, self.bases_eval_test)
                        test_errors[iteration] = self.loss_function.test_error(
                            f_eval_test, self.test_data)
                        if self.display:
                            print('\t\tTest error after permutation ' +
                                  '= %2.5e' % test_errors[iteration])

                    if self.alternating_minimization_parameters['display']:
                        print('')

            if not self.tree_adaptation or not adapted_tree:
                if iteration > 0 and not tree_adapt:
                    stagnation = self.stagnation_criterion(
                        tensap.FunctionalTensor(f.tensor, self.bases_eval),
                        tensap.FunctionalTensor(f_old.tensor, self.bases_eval))
                    if stagnation < self.tolerance['on_stagnation'] or \
                            np.isnan(stagnation):
                        break
                tree_adapt = False
                new_rank, enriched_nodes, tensor_for_initialization = \
                    self.new_rank_selection(f)
                output['enriched_nodes_iterations'][iteration] = enriched_nodes
                s_local = self.initial_guess_new_rank(
                    s_local, tensor_for_initialization, new_rank)
            else:
                tree_adapt = True
                enriched_nodes = []
                new_rank = f.tensor.ranks
                s_local.initialization_type = 'initial_guess'
                s_local.initial_guess = f.tensor

        if isinstance(self.bases, tensap.FunctionalBases):
            f = tensap.FunctionalTensor(f.tensor, self.bases)

        if self.store_iterates:
            output['iterates'] = iterates[:iteration + 1]

        output['flag'] = flag
        output['enriched_nodes_iterations'] = \
            output['enriched_nodes_iterations'][:iteration+1]
        if 'error' in output_local:
            output['error_iterations'] = errors[:iteration + 1]
            output['error'] = errors[iteration]

        if self.test_error:
            output['test_error_iterations'] = test_errors[:iteration + 1]
            output['test_error'] = test_errors[iteration]

        if 'adapted_tree' in output:
            del output['adapted_tree']

        return f, output
示例#5
0
    def _solve_standard(self):
        '''
        Solver for the learning problem with tensor formats using the standard
        algorithm (without adaptation).

        Raises
        ------
        ValueError
            If the number of LinearModelLearning objects is not equal to
            _numberOfParameters.

        Returns
        -------
        f : tensap.FunctionalTensor
            The learned approximation.
        output : dict
            The outputs of the solver.

        '''
        output = {'flag': 0}

        self, f = self.initialize()
        f = tensap.FunctionalTensor(f, self.bases_eval)

        # Replication of the LinearModelLearning objects
        if self.linear_model_learning_parameters[
                'identical_for_all_parameters'] and \
                not isinstance(self.linear_model_learning, (list, np.ndarray)):
            self.linear_model_learning = list(
                map(deepcopy,
                    [self.linear_model_learning] * self._number_of_parameters))
        elif isinstance(self.linear_model_learning, (list, np.ndarray)) and \
                len(self.linear_model_learning) != self._number_of_parameters:
            raise ValueError('Must provide self._numberOfParameters ' +
                             'LinearModelLearning objects.')

        if self.error_estimation:
            for x in self.linear_model_learning:
                setattr(x, 'error_estimation', True)

        # Working set paths
        if isinstance(self.linear_model_learning, (list, np.ndarray)) and \
            np.any([x.basis_adaptation for
                    x in self.linear_model_learning]) and \
                self.bases_adaptation_path is None:
            self.bases_adaptation_path = self.bases.adaptation_path()

        if self.alternating_minimization_parameters['max_iterations'] == 0:
            return f, output

        output['stagnation_criterion'] = []
        output['iterates'] = []
        output['error_iterations'] = []
        output['test_error_iterations'] = []

        # Alternating minimization loop
        for iteration in range(
                self.alternating_minimization_parameters['max_iterations']):
            self, f = self.pre_processing(f)
            f0 = deepcopy(f)

            if self.alternating_minimization_parameters['random']:
                # Randomize the exploration strategy
                alpha_list = self.randomize_exploration_strategy()
            else:
                alpha_list = self._exploration_strategy

            for alpha in alpha_list:
                self, A, b, f = \
                    self.prepare_alternating_minimization_system(f, alpha)
                self.linear_model_learning[alpha - 1].training_data = [None, b]
                self.linear_model_learning[alpha - 1].basis = None
                self.linear_model_learning[alpha - 1].basis_eval = A

                coef, output_tmp = self.linear_model_learning[alpha -
                                                              1].solve()
                if coef is None or np.count_nonzero(coef) == 0 or \
                        not np.all(np.isfinite(coef)):
                    print('Empty, zero or NaN solution, returning to the ' +
                          'previous iteration.')
                    output['flag'] = -2
                    output['error'] = np.inf
                    break

                f = self.set_parameter(f, alpha, coef)

            stagnation = self.stagnation_criterion(f, f0)
            output['stagnation_criterion'].append(stagnation)

            if self.store_iterates:
                if isinstance(self.bases, tensap.FunctionalBases):
                    output['iterates'].append(
                        tensap.FunctionalTensor(f.tensor, self.bases))
                else:
                    output['iterates'].append(f)

            if 'error' in output_tmp:
                output['error'] = output_tmp['error']
                output['error_iterations'].append(output['error'])

            if self.test_error:
                f_eval_test = tensap.FunctionalTensor(f, self.bases_eval_test)
                output['test_error'] = self.loss_function.test_error(
                    f_eval_test, self.test_data)
                output['test_error_iterations'].append(output['test_error'])

            if self.alternating_minimization_parameters['display']:
                print('\tAlt. min. iteration %s: stagnation = %2.5e' %
                      (str(iteration).zfill(
                          len(
                              str(self.alternating_minimization_parameters[
                                  'max_iterations'] - 1))), stagnation),
                      end='')
                if 'error' in output:
                    print(', error = %2.5e' % output['error'], end='')
                if self.test_error:
                    if not np.isscalar(output['test_error']):
                        output['test_error'] = output['test_error'].numpy()
                    print(', test error = %2.5e' % output['test_error'],
                          end='')
                print('')

            if iteration > 0 and stagnation < \
                    self.alternating_minimization_parameters['stagnation']:
                output['flag'] = 1
                break

            if self.test_error and \
                    output['test_error'] < self.tolerance['on_error']:
                output['flag'] = 1
                break

        if isinstance(self.bases, tensap.FunctionalBases):
            f = tensap.FunctionalTensor(f.tensor, self.bases)
        output['iter'] = iteration

        if self.display and not self.model_selection:
            if self.alternating_minimization_parameters['display']:
                print('')
            self.final_display(f)
            if 'error' in output:
                print(', error = %2.5e' % output['error'], end='')
            if 'test_error' in output:
                print(', test error = %2.5e' % output['test_error'], end='')
            print('')

        return f, output
示例#6
0
    def _solve_greedy(self):
        '''
        Greedy solver in canonical tensor format.

        Raises
        ------
        ValueError
            If the number of LinearModelLearning objects is not equal to
            _numberOfParameters.

        Returns
        -------
        f : tensap.FunctionalTensor
            The learned approximation.
        output : dict
            The outputs of the solver.

        '''
        assert isinstance(self.loss_function, tensap.SquareLossFunction), \
            'Method not implemented for this loss function.'

        bases_eval = self.bases_eval
        output = {}
        output['sequence'] = np.empty(self.rank, dtype=object)

        # Replication of the LinearModelLearning objects
        if self.linear_model_learning_parameters[
                'identical_for_all_parameters'] and \
                not isinstance(self.linear_model_learning, (list, np.ndarray)):
            self.linear_model_learning = list(
                map(deepcopy,
                    [self.linear_model_learning] * self._number_of_parameters))
        elif isinstance(self.linear_model_learning, (list, np.ndarray)) and \
                len(self.linear_model_learning) != self._number_of_parameters:
            raise ValueError('Must provide self._numberOfParameters ' +
                             'LinearModelLearning objects.')

        # Working set paths
        if isinstance(self.linear_model_learning, (list, np.ndarray)) and \
            np.any([x.basis_adaptation for
                    x in self.linear_model_learning]) and \
                self.bases_adaptation_path is None:
            self.bases_adaptation_path = self.bases.adaptation_path()

        y = self.training_data[1]

        s_local = deepcopy(self)
        s_local.algorithm = 'standard'
        s_local.rank = 1
        s_local.display = False
        s_local.test_error = False
        s_local.model_selection = False

        f = tensap.CanonicalTensor.zeros(0, [x.shape[1] for x in bases_eval])
        f_0 = deepcopy(f)
        stagnation = np.zeros((1, self.rank))

        ls_local = deepcopy(
            s_local.linear_model_learning[self._number_of_parameters - 1])

        is_error = False
        error = np.ones((1, self.rank))
        pure_greedy = False
        if not pure_greedy:
            for linear_solver in self.linear_model_learning:
                setattr(linear_solver, 'error_estimation', False)

        for k in np.arange(1, self.rank + 1):
            s_local.training_data[1] = \
                y - f.tensor_matrix_product(bases_eval).eval_diag().data
            f_add, output_greedy = s_local.solve()
            if isinstance(f_add, tensap.FunctionalTensor):
                f += f_add.tensor
            else:
                f += f_add

            if not pure_greedy:
                f_H = f.tensor_matrix_product(bases_eval)
                A = np.ones((bases_eval[0].shape[0], len(f_H.core.data)))
                for space in f_H.space:
                    A *= space
                ls_local.basis_adaptation = False
                ls_local.basis = None
                ls_local.basis_eval = A
                ls_local.training_data = [None, y]
                coef, output_greedy = ls_local.solve()
                f.core.data = np.ravel(coef)

            stagnation[k - 1] = 2 * (f - f_0).norm() / (f.norm() + f_0.norm())
            current_rank = len(f.core.data)
            output['sequence'][k - 1] = f

            if 'error' in output_greedy:
                error[k - 1] = output_greedy['error']
                is_error = True
                if self.alternating_minimization_parameters['display']:
                    print(
                        'Alternating minimization (greedy): rank = %i, ' +
                        'error = %2.5e, stagnation = %2.5e', current_rank,
                        error[k - 1], stagnation[k - 1])
            else:
                if self.alternating_minimization_parameters['display']:
                    print(
                        'Alternating minimization (greedy): rank = %i, ' +
                        'stagnation = %2.5e', current_rank, stagnation[k - 1])

            if error[k-1] < self.tolerance['on_error'] or \
                stagnation[k-1] < self.tolerance['on_stagnation'] or \
                    (k > 2 and error[k-1] > error[k-2] and
                     error[k-2] > error[k-3]):
                break

            f_0 = deepcopy(f)
            if self.test_error:
                f_eval_test = tensap.FunctionalTensor(f, self.bases_eval_test)
                output['test_error'] = self.loss_function.test_error(
                    f_eval_test, self.test_data)
                output['test_error_iterations'].append(output['test_error'])
                if self.display:
                    print('Greedy: iteration %i, test error = %2.5e', k,
                          output['test_error'])

        output['stagnation'] = stagnation[:k - 1]
        if is_error:
            output['errors'] = error[:k - 1]
            K = np.argmin(output['errors'])
            f = output['sequence'][K]
            output['selected_iterate'] = K
            output['error'] = output['errors'][K]

        if isinstance(self.bases, tensap.FunctionalBases):
            f = tensap.FunctionalTensor(f, self.bases)

        return f, output