Exemplo n.º 1
0
 def __init__(self, truth_problem, **kwargs):
     # Call the parent initialization
     DifferentialProblemReductionMethod_DerivedClass.__init__(self, truth_problem, **kwargs)
     
     # Declare a GS object
     self.GS = None # GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem with several components)
     # I/O
     self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots")
     self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing")
     self.greedy_selected_parameters = GreedySelectedParametersList()
     self.greedy_error_estimators = GreedyErrorEstimatorsList()
     self.label = "RB"
Exemplo n.º 2
0
    def __init__(self, truth_problem, folder_prefix, **kwargs):
        # Call the parent initialization
        ParametrizedProblem.__init__(self, folder_prefix)
        # Store the parametrized problem object and the bc list
        self.truth_problem = truth_problem

        # Define additional storage for SCM
        self.B_min = BoundingBoxSideList(
        )  # minimum values of the bounding box mathcal{B}. Vector of size Q
        self.B_max = BoundingBoxSideList(
        )  # maximum values of the bounding box mathcal{B}. Vector of size Q
        self.training_set = None  # SCM algorithm needs the training set also in the online stage
        self.greedy_selected_parameters = GreedySelectedParametersList(
        )  # list storing the parameters selected during the training phase
        self.greedy_selected_parameters_complement = dict(
        )  # dict, over N, of list storing the complement of parameters selected during the training phase
        self.UB_vectors = UpperBoundsList(
        )  # list of Q-dimensional vectors storing the infimizing elements at the greedily selected parameters
        self.N = 0
        self.M_e = kwargs[
            "M_e"]  # integer denoting the number of constraints based on the exact eigenvalues, or None
        self.M_p = kwargs[
            "M_p"]  # integer denoting the number of constraints based on the previous lower bounds, or None

        # I/O
        self.folder["cache"] = os.path.join(self.folder_prefix,
                                            "reduced_cache")
        self.cache_config = config.get("SCM", "cache")
        self.folder["reduced_operators"] = os.path.join(
            self.folder_prefix, "reduced_operators")

        # Coercivity constant eigen problem
        self.exact_coercivity_constant_calculator = ParametrizedCoercivityConstantEigenProblem(
            truth_problem, "a", True, "smallest",
            kwargs["coercivity_eigensolver_parameters"], self.folder_prefix)

        # Store here input parameters provided by the user that are needed by the reduction method
        self._input_storage_for_SCM_reduction = dict()
        self._input_storage_for_SCM_reduction[
            "bounding_box_minimum_eigensolver_parameters"] = kwargs[
                "bounding_box_minimum_eigensolver_parameters"]
        self._input_storage_for_SCM_reduction[
            "bounding_box_maximum_eigensolver_parameters"] = kwargs[
                "bounding_box_maximum_eigensolver_parameters"]

        # Avoid useless linear programming solves
        self._alpha_LB = 0.
        self._alpha_LB_cache = dict()
        self._alpha_UB = 0.
        self._alpha_UB_cache = dict()
Exemplo n.º 3
0
        def __init__(self, truth_problem, **kwargs):
            # Call to parent
            EllipticCoerciveReducedProblem_DerivedClass.__init__(
                self, truth_problem, **kwargs)

            # Copy of greedy snapshots
            self.snapshots_mu = GreedySelectedParametersList(
            )  # the difference between this list and greedy_selected_parameters in the reduction method is that this one also stores the initial parameter
            self.snapshots = SnapshotsMatrix(truth_problem.V)

            # Extend allowed keywords argument in solve
            self._online_solve_default_kwargs["online_rectification"] = True
            self.OnlineSolveKwargs = OnlineSolveKwargsGenerator(
                **self._online_solve_default_kwargs)

            # Generate all combinations of allowed keyword arguments in solve
            online_solve_kwargs_with_rectification = list()
            online_solve_kwargs_without_rectification = list()
            for other_args in cartesian_product(
                (True, False),
                    repeat=len(self._online_solve_default_kwargs) - 1):
                args_with_rectification = self.OnlineSolveKwargs(*(other_args +
                                                                   (True, )))
                args_without_rectification = self.OnlineSolveKwargs(
                    *(other_args + (False, )))
                online_solve_kwargs_with_rectification.append(
                    args_with_rectification)
                online_solve_kwargs_without_rectification.append(
                    args_without_rectification)
            self.online_solve_kwargs_with_rectification = online_solve_kwargs_with_rectification
            self.online_solve_kwargs_without_rectification = online_solve_kwargs_without_rectification

            # Flag to disable error estimation after rectification has been setup
            self._disable_error_estimation = False
Exemplo n.º 4
0
 def __init__(self, EIM_approximation):
     # Call the parent initialization
     ReductionMethod.__init__(self, EIM_approximation.folder_prefix)
     
     # $$ OFFLINE DATA STRUCTURES $$ #
     # High fidelity problem
     self.EIM_approximation = EIM_approximation
     # Declare a new container to store the snapshots
     self.snapshots_container = self.EIM_approximation.parametrized_expression.create_snapshots_container()
     self._training_set_parameters_to_snapshots_container_index = dict()
     # I/O
     self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots")
     self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing")
     self.greedy_selected_parameters = GreedySelectedParametersList()
     self.greedy_errors = GreedyErrorEstimatorsList()
     #
     # By default set a tolerance slightly larger than zero, in order to
     # stop greedy iterations in trivial cases by default
     self.tol = 1e-15
Exemplo n.º 5
0
class EIMApproximationReductionMethod(ReductionMethod):
    
    # Default initialization of members
    def __init__(self, EIM_approximation):
        # Call the parent initialization
        ReductionMethod.__init__(self, EIM_approximation.folder_prefix)
        
        # $$ OFFLINE DATA STRUCTURES $$ #
        # High fidelity problem
        self.EIM_approximation = EIM_approximation
        # Declare a new container to store the snapshots
        self.snapshots_container = self.EIM_approximation.parametrized_expression.create_snapshots_container()
        self._training_set_parameters_to_snapshots_container_index = dict()
        # I/O
        self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots")
        self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing")
        self.greedy_selected_parameters = GreedySelectedParametersList()
        self.greedy_errors = GreedyErrorEstimatorsList()
        #
        # By default set a tolerance slightly larger than zero, in order to
        # stop greedy iterations in trivial cases by default
        self.tol = 1e-15
    
    def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs):
        import_successful = ReductionMethod.initialize_training_set(self, self.EIM_approximation.mu_range, ntrain, enable_import, sampling, **kwargs)
        # Since exact evaluation is required, we cannot use a distributed training set
        self.training_set.distributed_max = False
        # Also initialize the map from parameter values to snapshots container index
        self._training_set_parameters_to_snapshots_container_index = dict((mu, mu_index) for (mu_index, mu) in enumerate(self.training_set))
        return import_successful
        
    def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs):
        return ReductionMethod.initialize_testing_set(self, self.EIM_approximation.mu_range, ntest, enable_import, sampling, **kwargs)
    
    # Perform the offline phase of EIM
    def offline(self):
        need_to_do_offline_stage = self._init_offline()
        if need_to_do_offline_stage:
            self._offline()
        self._finalize_offline()
        return self.EIM_approximation
        
    # Initialize data structures required for the offline phase
    def _init_offline(self):
        # Prepare folders and init EIM approximation
        all_folders = Folders()
        all_folders.update(self.folder)
        all_folders.update(self.EIM_approximation.folder)
        all_folders.pop("testing_set") # this is required only in the error/speedup analysis
        all_folders.pop("error_analysis") # this is required only in the error analysis
        all_folders.pop("speedup_analysis") # this is required only in the speedup analysis
        at_least_one_folder_created = all_folders.create()
        if not at_least_one_folder_created:
            return False # offline construction should be skipped, since data are already available
        else:
            self.EIM_approximation.init("offline")
            return True # offline construction should be carried out
        
    def _offline(self):
        interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name()
        description = self.EIM_approximation.parametrized_expression.description()
        
        # Evaluate the parametrized expression for all parameters in the training set
        print(TextBox(interpolation_method_name + " preprocessing phase begins for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        for (mu_index, mu) in enumerate(self.training_set):
            print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":"))
            
            self.EIM_approximation.set_mu(mu)
            
            print("evaluate parametrized expression at mu =", mu)
            self.EIM_approximation.evaluate_parametrized_expression()
            self.EIM_approximation.export_solution(self.folder["snapshots"], "truth_" + str(mu_index))
            
            print("add to snapshots")
            self.add_to_snapshots(self.EIM_approximation.snapshot)

            print("")
            
        # If basis generation is POD, compute the first POD modes of the snapshots
        if self.EIM_approximation.basis_generation == "POD":
            print("compute basis")
            N_POD = self.compute_basis_POD()
            print("")
        
        print(TextBox(interpolation_method_name + " preprocessing phase ends for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        print(TextBox(interpolation_method_name + " offline phase begins for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        if self.EIM_approximation.basis_generation == "Greedy":
            # Arbitrarily start from the first parameter in the training set
            self.EIM_approximation.set_mu(self.training_set[0])
            
            # Carry out greedy selection
            relative_error_max = 2.*self.tol
            while self.EIM_approximation.N < self.Nmax and relative_error_max >= self.tol:
                print(TextLine(interpolation_method_name + " N = " + str(self.EIM_approximation.N), fill=":"))
                
                self._print_greedy_interpolation_solve_message()
                self.EIM_approximation.solve()
                
                print("compute and locate maximum interpolation error")
                self.EIM_approximation.snapshot = self.load_snapshot()
                (error, maximum_error, maximum_location) = self.EIM_approximation.compute_maximum_interpolation_error()
                
                print("update locations with", maximum_location)
                self.update_interpolation_locations(maximum_location)
                
                print("update basis")
                self.update_basis_greedy(error, maximum_error)
                
                print("update interpolation matrix")
                self.update_interpolation_matrix()
                
                (error_max, relative_error_max) = self.greedy()
                print("maximum interpolation error =", error_max)
                print("maximum interpolation relative error =", relative_error_max)
                
                print("")
        else:
            while self.EIM_approximation.N < N_POD:
                print(TextLine(interpolation_method_name + " N = " + str(self.EIM_approximation.N), fill=":"))
            
                print("solve interpolation for basis number", self.EIM_approximation.N)
                self.EIM_approximation._solve(self.EIM_approximation.basis_functions[self.EIM_approximation.N])
                
                print("compute and locate maximum interpolation error")
                self.EIM_approximation.snapshot = self.EIM_approximation.basis_functions[self.EIM_approximation.N]
                (error, maximum_error, maximum_location) = self.EIM_approximation.compute_maximum_interpolation_error()
                
                print("update locations with", maximum_location)
                self.update_interpolation_locations(maximum_location)
                
                self.EIM_approximation.N += 1
                
                print("update interpolation matrix")
                self.update_interpolation_matrix()
                
                print("")
                
        print(TextBox(interpolation_method_name + " offline phase ends for" + "\n" + "\n".join(description), fill="="))
        print("")
        
    # Finalize data structures required after the offline phase
    def _finalize_offline(self):
        self.EIM_approximation.init("online")
        
    def _print_greedy_interpolation_solve_message(self):
        print("solve interpolation for mu =", self.EIM_approximation.mu)
        
    # Update the snapshots container
    def add_to_snapshots(self, snapshot):
        self.snapshots_container.enrich(snapshot)
        
    # Update basis (greedy version)
    def update_basis_greedy(self, error, maximum_error):
        if abs(maximum_error) > 0.:
            self.EIM_approximation.basis_functions.enrich(error/maximum_error)
        else:
            # Trivial case, greedy will stop at the first iteration
            assert self.EIM_approximation.N == 0
            self.EIM_approximation.basis_functions.enrich(error) # error is actually zero
        self.EIM_approximation.basis_functions.save(self.EIM_approximation.folder["basis"], "basis")
        self.EIM_approximation.N += 1

    # Update basis (POD version)
    def compute_basis_POD(self):
        POD = self.EIM_approximation.parametrized_expression.create_POD_container()
        POD.store_snapshot(self.snapshots_container)
        (_, _, basis_functions, N) = POD.apply(self.Nmax, self.tol)
        self.EIM_approximation.basis_functions.enrich(basis_functions)
        self.EIM_approximation.basis_functions.save(self.EIM_approximation.folder["basis"], "basis")
        # do not increment self.EIM_approximation.N
        POD.print_eigenvalues(N)
        POD.save_eigenvalues_file(self.folder["post_processing"], "eigs")
        POD.save_retained_energy_file(self.folder["post_processing"], "retained_energy")
        return N
        
    def update_interpolation_locations(self, maximum_location):
        self.EIM_approximation.interpolation_locations.append(maximum_location)
        self.EIM_approximation.interpolation_locations.save(self.EIM_approximation.folder["reduced_operators"], "interpolation_locations")
    
    # Assemble the interpolation matrix
    def update_interpolation_matrix(self):
        self.EIM_approximation.interpolation_matrix[0] = evaluate(self.EIM_approximation.basis_functions[:self.EIM_approximation.N], self.EIM_approximation.interpolation_locations)
        self.EIM_approximation.interpolation_matrix.save(self.EIM_approximation.folder["reduced_operators"], "interpolation_matrix")
            
    # Load the precomputed snapshot
    def load_snapshot(self):
        assert self.EIM_approximation.basis_generation == "Greedy"
        mu = self.EIM_approximation.mu
        mu_index = self._training_set_parameters_to_snapshots_container_index[mu]
        assert mu == self.training_set[mu_index]
        return self.snapshots_container[mu_index]
        
    # Choose the next parameter in the offline stage in a greedy fashion
    def greedy(self):
        assert self.EIM_approximation.basis_generation == "Greedy"
        
        # Print some additional information on the consistency of the reduced basis
        self.EIM_approximation.solve()
        self.EIM_approximation.snapshot = self.load_snapshot()
        error = self.EIM_approximation.snapshot - self.EIM_approximation.basis_functions*self.EIM_approximation._interpolation_coefficients
        error_on_interpolation_locations = evaluate(error, self.EIM_approximation.interpolation_locations)
        (maximum_error, _) = max(abs(error))
        (maximum_error_on_interpolation_locations, _) = max(abs(error_on_interpolation_locations)) # for consistency check, should be zero
        print("interpolation error for current mu =", abs(maximum_error))
        print("interpolation error on interpolation locations for current mu =", abs(maximum_error_on_interpolation_locations))
        
        # Carry out the actual greedy search
        def solve_and_computer_error(mu):
            self.EIM_approximation.set_mu(mu)
            
            self.EIM_approximation.solve()
            self.EIM_approximation.snapshot = self.load_snapshot()
            (_, maximum_error, _) = self.EIM_approximation.compute_maximum_interpolation_error()
            return abs(maximum_error)
            
        print("find next mu")
        (error_max, error_argmax) = self.training_set.max(solve_and_computer_error)
        self.EIM_approximation.set_mu(self.training_set[error_argmax])
        self.greedy_selected_parameters.append(self.training_set[error_argmax])
        self.greedy_selected_parameters.save(self.folder["post_processing"], "mu_greedy")
        self.greedy_errors.append(error_max)
        self.greedy_errors.save(self.folder["post_processing"], "error_max")
        if abs(self.greedy_errors[0]) > 0.:
            return (abs(error_max), abs(error_max/self.greedy_errors[0]))
        else:
            # Trivial case, greedy will stop at the first iteration
            assert len(self.greedy_errors) == 1
            assert self.EIM_approximation.N == 1
            return (0., 0.)
    
    # Compute the error of the empirical interpolation approximation with respect to the
    # exact function over the testing set
    def error_analysis(self, N_generator=None, filename=None, **kwargs):
        assert len(kwargs) == 0 # not used in this method
            
        self._init_error_analysis(**kwargs)
        self._error_analysis(N_generator, filename, **kwargs)
        self._finalize_error_analysis(**kwargs)
        
    def _error_analysis(self, N_generator=None, filename=None, **kwargs):
        if N_generator is None:
            def N_generator(n):
                return n
                
        N = self.EIM_approximation.N
        interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name()
        description = self.EIM_approximation.parametrized_expression.description()
        
        print(TextBox(interpolation_method_name + " error analysis begins for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        error_analysis_table = ErrorAnalysisTable(self.testing_set)
        error_analysis_table.set_Nmax(N)
        error_analysis_table.add_column("error", group_name="eim", operations=("mean", "max"))
        error_analysis_table.add_column("relative_error", group_name="eim", operations=("mean", "max"))
        
        for (mu_index, mu) in enumerate(self.testing_set):
            print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":"))
            
            self.EIM_approximation.set_mu(mu)
            
            # Evaluate the exact function on the truth grid
            self.EIM_approximation.evaluate_parametrized_expression()
            
            for n in range(1, N + 1): # n = 1, ... N
                n_arg = N_generator(n)
                
                if n_arg is not None:
                    self.EIM_approximation.solve(n_arg)
                    (_, error, _) = self.EIM_approximation.compute_maximum_interpolation_error(n)
                    (_, relative_error, _) = self.EIM_approximation.compute_maximum_interpolation_relative_error(n)
                    error_analysis_table["error", n, mu_index] = abs(error)
                    error_analysis_table["relative_error", n, mu_index] = abs(relative_error)
                else:
                    error_analysis_table["error", n, mu_index] = NotImplemented
                    error_analysis_table["relative_error", n, mu_index] = NotImplemented
        
        # Print
        print("")
        print(error_analysis_table)
        
        print("")
        print(TextBox(interpolation_method_name + " error analysis ends for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        # Export error analysis table
        error_analysis_table.save(self.folder["error_analysis"], "error_analysis" if filename is None else filename)
        
    # Compute the speedup of the empirical interpolation approximation with respect to the
    # exact function over the testing set
    def speedup_analysis(self, N_generator=None, filename=None, **kwargs):
        assert len(kwargs) == 0 # not used in this method
            
        self._init_speedup_analysis(**kwargs)
        self._speedup_analysis(N_generator, filename, **kwargs)
        self._finalize_speedup_analysis(**kwargs)
        
    # Initialize data structures required for the speedup analysis phase
    def _init_speedup_analysis(self, **kwargs):
        # Make sure to clean up snapshot cache to ensure that parametrized
        # expression evaluation is actually carried out
        self.EIM_approximation.snapshot_cache.clear()
        # ... and also disable the capability of importing/exporting truth solutions
        self.disable_import_solution = PatchInstanceMethod(self.EIM_approximation, "import_solution", lambda self_, folder, filename, solution=None: False)
        self.disable_export_solution = PatchInstanceMethod(self.EIM_approximation, "export_solution", lambda self_, folder, filename, solution=None: None)
        self.disable_import_solution.patch()
        self.disable_export_solution.patch()
        
    def _speedup_analysis(self, N_generator=None, filename=None, **kwargs):
        if N_generator is None:
            def N_generator(n):
                return n
                
        N = self.EIM_approximation.N
        interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name()
        description = self.EIM_approximation.parametrized_expression.description()
        
        print(TextBox(interpolation_method_name + " speedup analysis begins for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        speedup_analysis_table = SpeedupAnalysisTable(self.testing_set)
        speedup_analysis_table.set_Nmax(N)
        speedup_analysis_table.add_column("speedup", group_name="speedup", operations=("min", "mean", "max"))
        
        evaluate_timer = Timer("parallel")
        EIM_timer = Timer("serial")
        
        for (mu_index, mu) in enumerate(self.testing_set):
            print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":"))
            
            self.EIM_approximation.set_mu(mu)
            
            # Evaluate the exact function on the truth grid
            evaluate_timer.start()
            self.EIM_approximation.evaluate_parametrized_expression()
            elapsed_evaluate = evaluate_timer.stop()
            
            for n in range(1, N + 1): # n = 1, ... N
                n_arg = N_generator(n)
                
                if n_arg is not None:
                    EIM_timer.start()
                    self.EIM_approximation.solve(n_arg)
                    elapsed_EIM = EIM_timer.stop()
                    speedup_analysis_table["speedup", n, mu_index] = elapsed_evaluate/elapsed_EIM
                else:
                    speedup_analysis_table["speedup", n, mu_index] = NotImplemented
        
        # Print
        print("")
        print(speedup_analysis_table)
        
        print("")
        print(TextBox(interpolation_method_name + " speedup analysis ends for" + "\n" + "\n".join(description), fill="="))
        print("")
        
        # Export speedup analysis table
        speedup_analysis_table.save(self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename)
        
    # Finalize data structures required after the speedup analysis phase
    def _finalize_speedup_analysis(self, **kwargs):
        # Restore the capability to import/export truth solutions
        self.disable_import_solution.unpatch()
        self.disable_export_solution.unpatch()
        del self.disable_import_solution
        del self.disable_export_solution
Exemplo n.º 6
0
    class RBReduction_Class(DifferentialProblemReductionMethod_DerivedClass):
        """
        The folders used to store the snapshots and for the post processing data, the parameters for the greedy algorithm and the error estimator evaluations are initialized.
        
        :param truth_problem: class of the truth problem to be solved.
        :return: reduced RB class.
       
        """
        def __init__(self, truth_problem, **kwargs):
            # Call the parent initialization
            DifferentialProblemReductionMethod_DerivedClass.__init__(
                self, truth_problem, **kwargs)

            # Declare a GS object
            self.GS = None  # GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem with several components)
            # I/O
            self.folder["snapshots"] = os.path.join(self.folder_prefix,
                                                    "snapshots")
            self.folder["post_processing"] = os.path.join(
                self.folder_prefix, "post_processing")
            self.greedy_selected_parameters = GreedySelectedParametersList()
            self.greedy_error_estimators = GreedyErrorEstimatorsList()
            self.label = "RB"

        def _init_offline(self):
            # Call parent to initialize inner product and reduced problem
            output = DifferentialProblemReductionMethod_DerivedClass._init_offline(
                self)

            # Declare a new GS for each basis component
            if len(self.truth_problem.components) > 1:
                self.GS = dict()
                for component in self.truth_problem.components:
                    assert len(
                        self.truth_problem.inner_product[component]) == 1
                    inner_product = self.truth_problem.inner_product[
                        component][0]
                    self.GS[component] = GramSchmidt(self.truth_problem.V,
                                                     inner_product)
            else:
                assert len(self.truth_problem.inner_product) == 1
                inner_product = self.truth_problem.inner_product[0]
                self.GS = GramSchmidt(self.truth_problem.V, inner_product)

            # Return
            return output

        def offline(self):
            """
            It performs the offline phase of the reduced order model.
            
            :return: reduced_problem where all offline data are stored.
            """
            need_to_do_offline_stage = self._init_offline()
            if need_to_do_offline_stage:
                self._offline()
            self._finalize_offline()
            return self.reduced_problem

        @snapshot_links_to_cache
        def _offline(self):
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " offline phase begins",
                        fill="="))
            print("")

            # Initialize first parameter to be used
            self.reduced_problem.build_reduced_operators()
            self.reduced_problem.build_error_estimation_operators()
            (absolute_error_estimator_max,
             relative_error_estimator_max) = self.greedy()
            print(
                "initial maximum absolute error estimator over training set =",
                absolute_error_estimator_max)
            print(
                "initial maximum relative error estimator over training set =",
                relative_error_estimator_max)

            print("")

            iteration = 0
            while self.reduced_problem.N < self.Nmax and relative_error_estimator_max >= self.tol:
                print(TextLine("N = " + str(self.reduced_problem.N), fill="#"))

                print("truth solve for mu =", self.truth_problem.mu)
                snapshot = self.truth_problem.solve()
                self.truth_problem.export_solution(self.folder["snapshots"],
                                                   "truth_" + str(iteration),
                                                   snapshot)
                snapshot = self.postprocess_snapshot(snapshot, iteration)

                print("update basis matrix")
                self.update_basis_matrix(snapshot)
                iteration += 1

                print("build reduced operators")
                self.reduced_problem.build_reduced_operators()

                print("reduced order solve")
                self.reduced_problem.solve()

                print("build operators for error estimation")
                self.reduced_problem.build_error_estimation_operators()

                (absolute_error_estimator_max,
                 relative_error_estimator_max) = self.greedy()
                print("maximum absolute error estimator over training set =",
                      absolute_error_estimator_max)
                print("maximum relative error estimator over training set =",
                      relative_error_estimator_max)

                print("")

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " offline phase ends",
                        fill="="))
            print("")

        def update_basis_matrix(self, snapshot):
            """
            It updates basis matrix.
            
            :param snapshot: last offline solution calculated.
            """
            if len(self.truth_problem.components) > 1:
                for component in self.truth_problem.components:
                    new_basis_function = self.GS[component].apply(
                        snapshot,
                        self.reduced_problem.basis_functions[component]
                        [self.reduced_problem.N_bc[component]:],
                        component=component)
                    self.reduced_problem.basis_functions.enrich(
                        new_basis_function, component=component)
                    self.reduced_problem.N[component] += 1
                self.reduced_problem.basis_functions.save(
                    self.reduced_problem.folder["basis"], "basis")
            else:
                new_basis_function = self.GS.apply(
                    snapshot, self.reduced_problem.
                    basis_functions[self.reduced_problem.N_bc:])
                self.reduced_problem.basis_functions.enrich(new_basis_function)
                self.reduced_problem.N += 1
                self.reduced_problem.basis_functions.save(
                    self.reduced_problem.folder["basis"], "basis")

        def greedy(self):
            """
            It chooses the next parameter in the offline stage in a greedy fashion: wrapper with post processing of the result (in particular, set greedily selected parameter and save to file)
            
            :return: max error estimator and the comparison with the first one calculated.
            """
            (error_estimator_max, error_estimator_argmax) = self._greedy()
            self.truth_problem.set_mu(
                self.training_set[error_estimator_argmax])
            self.greedy_selected_parameters.append(
                self.training_set[error_estimator_argmax])
            self.greedy_selected_parameters.save(
                self.folder["post_processing"], "mu_greedy")
            self.greedy_error_estimators.append(error_estimator_max)
            self.greedy_error_estimators.save(self.folder["post_processing"],
                                              "error_estimator_max")
            return (error_estimator_max,
                    error_estimator_max / self.greedy_error_estimators[0])

        def _greedy(self):
            """
            It chooses the next parameter in the offline stage in a greedy fashion. Internal method.
            
            :return: max error estimator and the respective parameter.
            """

            if self.reduced_problem.N > 0:  # skip during initialization
                # Print some additional information on the consistency of the reduced basis
                print("absolute error for current mu =",
                      self.reduced_problem.compute_error())
                print("absolute error estimator for current mu =",
                      self.reduced_problem.estimate_error())

            # Carry out the actual greedy search
            def solve_and_estimate_error(mu):
                self.reduced_problem.set_mu(mu)
                self.reduced_problem.solve()
                error_estimator = self.reduced_problem.estimate_error()
                logger.log(
                    DEBUG, "Error estimator for mu = " + str(mu) + " is " +
                    str(error_estimator))
                return error_estimator

            if self.reduced_problem.N == 0:
                print("find initial mu")
            else:
                print("find next mu")

            return self.training_set.max(solve_and_estimate_error)

        def error_analysis(self, N_generator=None, filename=None, **kwargs):
            """
            It computes the error of the reduced order approximation with respect to the full order one over the testing set.
            
            :param N_generator: generator of dimension of reduced problem.
            """
            self._init_error_analysis(**kwargs)
            self._error_analysis(N_generator, filename, **kwargs)
            self._finalize_error_analysis(**kwargs)

        def _error_analysis(self, N_generator=None, filename=None, **kwargs):
            if N_generator is None:

                def N_generator():
                    N = self.reduced_problem.N
                    if isinstance(N, dict):
                        N = min(N.values())
                    for n in range(1, N + 1):  # n = 1, ... N
                        yield n

            if "components" in kwargs:
                components = kwargs["components"]
            else:
                components = self.truth_problem.components

            def N_generator_items():
                for n in N_generator():
                    assert isinstance(n, (dict, int))
                    if isinstance(n, int):
                        yield (n, n)
                    elif isinstance(n, dict):
                        assert len(n) == 1
                        (n_int, n_online_size_dict) = n.popitem()
                        assert isinstance(n_int, int)
                        assert isinstance(n_online_size_dict, OnlineSizeDict)
                        yield (n_int, n_online_size_dict)
                    else:
                        raise TypeError(
                            "Invalid item generated by N_generator")

            def N_generator_max():
                *_, Nmax = N_generator_items()
                assert isinstance(Nmax, tuple)
                assert len(Nmax) == 2
                assert isinstance(Nmax[0], int)
                return Nmax[0]

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " error analysis begins",
                        fill="="))
            print("")

            error_analysis_table = ErrorAnalysisTable(self.testing_set)
            error_analysis_table.set_Nmax(N_generator_max())
            if len(components) > 1:
                all_components_string = "".join(components)
                for component in components:
                    error_analysis_table.add_column("error_" + component,
                                                    group_name="solution_" +
                                                    component + "_error",
                                                    operations=("mean", "max"))
                    error_analysis_table.add_column(
                        "relative_error_" + component,
                        group_name="solution_" + component + "_relative_error",
                        operations=("mean", "max"))
                error_analysis_table.add_column(
                    "error_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "error_estimator_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "effectivity_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("min", "mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_estimator_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_effectivity_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("min", "mean", "max"))
            else:
                component = components[0]
                error_analysis_table.add_column("error_" + component,
                                                group_name="solution_" +
                                                component + "_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column("error_estimator_" + component,
                                                group_name="solution_" +
                                                component + "_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column(
                    "effectivity_" + component,
                    group_name="solution_" + component + "_error",
                    operations=("min", "mean", "max"))
                error_analysis_table.add_column("relative_error_" + component,
                                                group_name="solution_" +
                                                component + "_relative_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_estimator_" + component,
                    group_name="solution_" + component + "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_effectivity_" + component,
                    group_name="solution_" + component + "_relative_error",
                    operations=("min", "mean", "max"))
            error_analysis_table.add_column("error_output",
                                            group_name="output_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("error_estimator_output",
                                            group_name="output_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("effectivity_output",
                                            group_name="output_error",
                                            operations=("min", "mean", "max"))
            error_analysis_table.add_column("relative_error_output",
                                            group_name="output_relative_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("relative_error_estimator_output",
                                            group_name="output_relative_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("relative_effectivity_output",
                                            group_name="output_relative_error",
                                            operations=("min", "mean", "max"))

            for (mu_index, mu) in enumerate(self.testing_set):
                print(TextLine(str(mu_index), fill="#"))

                self.reduced_problem.set_mu(mu)

                for (n_int, n_arg) in N_generator_items():
                    self.reduced_problem.solve(n_arg, **kwargs)
                    error = self.reduced_problem.compute_error(**kwargs)
                    if len(components) > 1:
                        error[all_components_string] = sqrt(
                            sum([
                                error[component]**2 for component in components
                            ]))
                    error_estimator = self.reduced_problem.estimate_error()
                    relative_error = self.reduced_problem.compute_relative_error(
                        **kwargs)
                    if len(components) > 1:
                        relative_error[all_components_string] = sqrt(
                            sum([
                                relative_error[component]**2
                                for component in components
                            ]))
                    relative_error_estimator = self.reduced_problem.estimate_relative_error(
                    )

                    self.reduced_problem.compute_output()
                    error_output = self.reduced_problem.compute_error_output(
                        **kwargs)
                    error_output_estimator = self.reduced_problem.estimate_error_output(
                    )
                    relative_error_output = self.reduced_problem.compute_relative_error_output(
                        **kwargs)
                    relative_error_output_estimator = self.reduced_problem.estimate_relative_error_output(
                    )

                    if len(components) > 1:
                        for component in components:
                            error_analysis_table["error_" + component, n_int,
                                                 mu_index] = error[component]
                            error_analysis_table[
                                "relative_error_" + component, n_int,
                                mu_index] = relative_error[component]
                        error_analysis_table[
                            "error_" + all_components_string, n_int,
                            mu_index] = error[all_components_string]
                        error_analysis_table["error_estimator_" +
                                             all_components_string, n_int,
                                             mu_index] = error_estimator
                        error_analysis_table[
                            "effectivity_" + all_components_string, n_int,
                            mu_index] = error_analysis_table[
                                "error_estimator_" + all_components_string,
                                n_int, mu_index] / error_analysis_table[
                                    "error_" + all_components_string, n_int,
                                    mu_index]
                        error_analysis_table[
                            "relative_error_" + all_components_string, n_int,
                            mu_index] = relative_error[all_components_string]
                        error_analysis_table[
                            "relative_error_estimator_" +
                            all_components_string, n_int,
                            mu_index] = relative_error_estimator
                        error_analysis_table[
                            "relative_effectivity_" + all_components_string,
                            n_int, mu_index] = error_analysis_table[
                                "relative_error_estimator_" +
                                all_components_string, n_int,
                                mu_index] / error_analysis_table[
                                    "relative_error_" + all_components_string,
                                    n_int, mu_index]
                    else:
                        component = components[0]
                        error_analysis_table["error_" + component, n_int,
                                             mu_index] = error
                        error_analysis_table["error_estimator_" + component,
                                             n_int, mu_index] = error_estimator
                        error_analysis_table[
                            "effectivity_" + component, n_int,
                            mu_index] = error_analysis_table[
                                "error_estimator_" + component, n_int,
                                mu_index] / error_analysis_table[
                                    "error_" + component, n_int, mu_index]
                        error_analysis_table["relative_error_" + component,
                                             n_int, mu_index] = relative_error
                        error_analysis_table[
                            "relative_error_estimator_" + component, n_int,
                            mu_index] = relative_error_estimator
                        error_analysis_table[
                            "relative_effectivity_" + component, n_int,
                            mu_index] = error_analysis_table[
                                "relative_error_estimator_" + component, n_int,
                                mu_index] / error_analysis_table[
                                    "relative_error_" + component, n_int,
                                    mu_index]

                    error_analysis_table["error_output", n_int,
                                         mu_index] = error_output
                    error_analysis_table["error_estimator_output", n_int,
                                         mu_index] = error_output_estimator
                    error_analysis_table[
                        "effectivity_output", n_int,
                        mu_index] = error_analysis_table[
                            "error_estimator_output", n_int,
                            mu_index] / error_analysis_table["error_output",
                                                             n_int, mu_index]
                    error_analysis_table["relative_error_output", n_int,
                                         mu_index] = relative_error_output
                    error_analysis_table[
                        "relative_error_estimator_output", n_int,
                        mu_index] = relative_error_output_estimator
                    error_analysis_table[
                        "relative_effectivity_output", n_int,
                        mu_index] = error_analysis_table[
                            "relative_error_estimator_output", n_int,
                            mu_index] / error_analysis_table[
                                "relative_error_output", n_int, mu_index]

            # Print
            print("")
            print(error_analysis_table)

            print("")
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " error analysis ends",
                        fill="="))
            print("")

            # Export error analysis table
            error_analysis_table.save(
                self.folder["error_analysis"],
                "error_analysis" if filename is None else filename)

        def speedup_analysis(self, N_generator=None, filename=None, **kwargs):
            """
            It computes the speedup of the reduced order approximation with respect to the full order one over the testing set.
            
            :param N_generator: generator of dimension of the reduced problem.
            """
            self._init_speedup_analysis(**kwargs)
            self._speedup_analysis(N_generator, filename, **kwargs)
            self._finalize_speedup_analysis(**kwargs)

        def _speedup_analysis(self, N_generator=None, filename=None, **kwargs):
            if N_generator is None:

                def N_generator():
                    N = self.reduced_problem.N
                    if isinstance(N, dict):
                        N = min(N.values())
                    for n in range(1, N + 1):  # n = 1, ... N
                        yield n

            def N_generator_items():
                for n in N_generator():
                    assert isinstance(n, (dict, int))
                    if isinstance(n, int):
                        yield (n, n)
                    elif isinstance(n, dict):
                        assert len(n) == 1
                        (n_int, n_online_size_dict) = n.popitem()
                        assert isinstance(n_int, int)
                        assert isinstance(n_online_size_dict, OnlineSizeDict)
                        yield (n_int, n_online_size_dict)
                    else:
                        raise TypeError(
                            "Invalid item generated by N_generator")

            def N_generator_max():
                *_, Nmax = N_generator_items()
                assert isinstance(Nmax, tuple)
                assert len(Nmax) == 2
                assert isinstance(Nmax[0], int)
                return Nmax[0]

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " speedup analysis begins",
                        fill="="))
            print("")

            speedup_analysis_table = SpeedupAnalysisTable(self.testing_set)
            speedup_analysis_table.set_Nmax(N_generator_max())
            speedup_analysis_table.add_column("speedup_solve",
                                              group_name="speedup_solve",
                                              operations=("min", "mean",
                                                          "max"))
            speedup_analysis_table.add_column(
                "speedup_solve_and_estimate_error",
                group_name="speedup_solve_and_estimate_error",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column(
                "speedup_solve_and_estimate_relative_error",
                group_name="speedup_solve_and_estimate_relative_error",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column("speedup_output",
                                              group_name="speedup_output",
                                              operations=("min", "mean",
                                                          "max"))
            speedup_analysis_table.add_column(
                "speedup_output_and_estimate_error_output",
                group_name="speedup_output_and_estimate_error_output",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column(
                "speedup_output_and_estimate_relative_error_output",
                group_name="speedup_output_and_estimate_relative_error_output",
                operations=("min", "mean", "max"))

            truth_timer = Timer("parallel")
            reduced_timer = Timer("serial")

            for (mu_index, mu) in enumerate(self.testing_set):
                print(TextLine(str(mu_index), fill="#"))

                self.reduced_problem.set_mu(mu)

                truth_timer.start()
                self.truth_problem.solve(**kwargs)
                elapsed_truth_solve = truth_timer.stop()

                truth_timer.start()
                self.truth_problem.compute_output()
                elapsed_truth_output = truth_timer.stop()

                for (n_int, n_arg) in N_generator_items():
                    reduced_timer.start()
                    solution = self.reduced_problem.solve(n_arg, **kwargs)
                    elapsed_reduced_solve = reduced_timer.stop()

                    truth_timer.start()
                    self.reduced_problem.compute_error(**kwargs)
                    elapsed_error = truth_timer.stop()

                    reduced_timer.start()
                    error_estimator = self.reduced_problem.estimate_error()
                    elapsed_error_estimator = reduced_timer.stop()

                    truth_timer.start()
                    self.reduced_problem.compute_relative_error(**kwargs)
                    elapsed_relative_error = truth_timer.stop()

                    reduced_timer.start()
                    relative_error_estimator = self.reduced_problem.estimate_relative_error(
                    )
                    elapsed_relative_error_estimator = reduced_timer.stop()

                    reduced_timer.start()
                    output = self.reduced_problem.compute_output()
                    elapsed_reduced_output = reduced_timer.stop()

                    truth_timer.start()
                    self.reduced_problem.compute_error_output(**kwargs)
                    elapsed_error_output = truth_timer.stop()

                    reduced_timer.start()
                    error_estimator_output = self.reduced_problem.estimate_error_output(
                    )
                    elapsed_error_estimator_output = reduced_timer.stop()

                    truth_timer.start()
                    self.reduced_problem.compute_relative_error_output(
                        **kwargs)
                    elapsed_relative_error_output = truth_timer.stop()

                    reduced_timer.start()
                    relative_error_estimator_output = self.reduced_problem.estimate_relative_error_output(
                    )
                    elapsed_relative_error_estimator_output = reduced_timer.stop(
                    )

                    if solution is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve", n_int,
                            mu_index] = elapsed_truth_solve / elapsed_reduced_solve
                    else:
                        speedup_analysis_table["speedup_solve", n_int,
                                               mu_index] = NotImplemented
                    if error_estimator is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_error", n_int,
                            mu_index] = (elapsed_truth_solve + elapsed_error
                                         ) / (elapsed_reduced_solve +
                                              elapsed_error_estimator)
                    else:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_error", n_int,
                            mu_index] = NotImplemented
                    if relative_error_estimator is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_relative_error", n_int,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_relative_error) / (
                                             elapsed_reduced_solve +
                                             elapsed_relative_error_estimator)
                    else:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_relative_error", n_int,
                            mu_index] = NotImplemented
                    if output is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_output", n_int,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_truth_output) / (
                                             elapsed_reduced_solve +
                                             elapsed_reduced_output)
                    else:
                        speedup_analysis_table["speedup_output", n_int,
                                               mu_index] = NotImplemented
                    if error_estimator_output is not NotImplemented:
                        assert output is not NotImplemented
                        speedup_analysis_table[
                            "speedup_output_and_estimate_error_output", n_int,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_truth_output +
                                         elapsed_error_output) / (
                                             elapsed_reduced_solve +
                                             elapsed_reduced_output +
                                             elapsed_error_estimator_output)
                    else:
                        speedup_analysis_table[
                            "speedup_output_and_estimate_error_output", n_int,
                            mu_index] = NotImplemented
                    if relative_error_estimator_output is not NotImplemented:
                        assert output is not NotImplemented
                        speedup_analysis_table[
                            "speedup_output_and_estimate_relative_error_output",
                            n_int, mu_index] = (
                                elapsed_truth_solve + elapsed_truth_output +
                                elapsed_relative_error_output) / (
                                    elapsed_reduced_solve +
                                    elapsed_reduced_output +
                                    elapsed_relative_error_estimator_output)
                    else:
                        speedup_analysis_table[
                            "speedup_output_and_estimate_relative_error_output",
                            n_int, mu_index] = NotImplemented

            # Print
            print("")
            print(speedup_analysis_table)

            print("")
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " speedup analysis ends",
                        fill="="))
            print("")

            # Export speedup analysis table
            speedup_analysis_table.save(
                self.folder["speedup_analysis"],
                "speedup_analysis" if filename is None else filename)
Exemplo n.º 7
0
    def __init__(self, truth_problem, folder_prefix):
        # Call the parent initialization
        ParametrizedProblem.__init__(self, folder_prefix)
        # Store the parametrized problem object and the bc list
        self.truth_problem = truth_problem

        # Define additional storage for SCM
        self.bounding_box_min = BoundingBoxSideList(
        )  # minimum values of the bounding box. Vector of size Q
        self.bounding_box_max = BoundingBoxSideList(
        )  # maximum values of the bounding box. Vector of size Q
        self.training_set = None  # SCM algorithm needs the training set also in the online stage
        # greedy_selected_parameters: list storing the parameters selected during the training phase
        self.greedy_selected_parameters = GreedySelectedParametersList()
        # greedy_selected_parameters_complement: dict, over N, of list storing the complement of parameters
        # selected during the training phase
        self.greedy_selected_parameters_complement = dict()
        # upper_bound_vectors: list of Q-dimensional vectors storing the infimizing elements at the greedily
        # selected parameters
        self.upper_bound_vectors = UpperBoundsList()
        self.N = 0

        # Storage for online computations
        self._stability_factor_lower_bound = 0.
        self._stability_factor_upper_bound = 0.

        # I/O
        self.folder["cache"] = os.path.join(self.folder_prefix,
                                            "reduced_cache")
        self.folder["reduced_operators"] = os.path.join(
            self.folder_prefix, "reduced_operators")

        def _stability_factor_cache_key_generator(*args, **kwargs):
            assert len(args) == 2
            assert args[0] == self.mu
            assert len(kwargs) == 0
            return self._cache_key(args[1])

        def _stability_factor_cache_filename_generator(*args, **kwargs):
            assert len(args) == 2
            assert args[0] == self.mu
            assert len(kwargs) == 0
            return self._cache_file(args[1])

        def _stability_factor_lower_bound_cache_import(filename):
            self.import_stability_factor_lower_bound(self.folder["cache"],
                                                     filename)
            return self._stability_factor_lower_bound

        def _stability_factor_lower_bound_cache_export(filename):
            self.export_stability_factor_lower_bound(self.folder["cache"],
                                                     filename)

        self._stability_factor_lower_bound_cache = Cache(
            "SCM",
            key_generator=_stability_factor_cache_key_generator,
            import_=_stability_factor_lower_bound_cache_import,
            export=_stability_factor_lower_bound_cache_export,
            filename_generator=_stability_factor_cache_filename_generator)

        def _stability_factor_upper_bound_cache_import(filename):
            self.import_stability_factor_upper_bound(self.folder["cache"],
                                                     filename)
            return self._stability_factor_upper_bound

        def _stability_factor_upper_bound_cache_export(filename):
            self.export_stability_factor_upper_bound(self.folder["cache"],
                                                     filename)

        self._stability_factor_upper_bound_cache = Cache(
            "SCM",
            key_generator=_stability_factor_cache_key_generator,
            import_=_stability_factor_upper_bound_cache_import,
            export=_stability_factor_upper_bound_cache_export,
            filename_generator=_stability_factor_cache_filename_generator)

        # Stability factor eigen problem
        self.stability_factor_calculator = ParametrizedStabilityFactorEigenProblem(
            self.truth_problem, "smallest",
            self.truth_problem._eigen_solver_parameters["stability_factor"],
            self.folder_prefix)
Exemplo n.º 8
0
class SCMApproximation(ParametrizedProblem):

    # Default initialization of members
    @sync_setters("truth_problem", "set_mu", "mu")
    @sync_setters("truth_problem", "set_mu_range", "mu_range")
    def __init__(self, truth_problem, folder_prefix):
        # Call the parent initialization
        ParametrizedProblem.__init__(self, folder_prefix)
        # Store the parametrized problem object and the bc list
        self.truth_problem = truth_problem

        # Define additional storage for SCM
        self.bounding_box_min = BoundingBoxSideList(
        )  # minimum values of the bounding box. Vector of size Q
        self.bounding_box_max = BoundingBoxSideList(
        )  # maximum values of the bounding box. Vector of size Q
        self.training_set = None  # SCM algorithm needs the training set also in the online stage
        # greedy_selected_parameters: list storing the parameters selected during the training phase
        self.greedy_selected_parameters = GreedySelectedParametersList()
        # greedy_selected_parameters_complement: dict, over N, of list storing the complement of parameters
        # selected during the training phase
        self.greedy_selected_parameters_complement = dict()
        # upper_bound_vectors: list of Q-dimensional vectors storing the infimizing elements at the greedily
        # selected parameters
        self.upper_bound_vectors = UpperBoundsList()
        self.N = 0

        # Storage for online computations
        self._stability_factor_lower_bound = 0.
        self._stability_factor_upper_bound = 0.

        # I/O
        self.folder["cache"] = os.path.join(self.folder_prefix,
                                            "reduced_cache")
        self.folder["reduced_operators"] = os.path.join(
            self.folder_prefix, "reduced_operators")

        def _stability_factor_cache_key_generator(*args, **kwargs):
            assert len(args) == 2
            assert args[0] == self.mu
            assert len(kwargs) == 0
            return self._cache_key(args[1])

        def _stability_factor_cache_filename_generator(*args, **kwargs):
            assert len(args) == 2
            assert args[0] == self.mu
            assert len(kwargs) == 0
            return self._cache_file(args[1])

        def _stability_factor_lower_bound_cache_import(filename):
            self.import_stability_factor_lower_bound(self.folder["cache"],
                                                     filename)
            return self._stability_factor_lower_bound

        def _stability_factor_lower_bound_cache_export(filename):
            self.export_stability_factor_lower_bound(self.folder["cache"],
                                                     filename)

        self._stability_factor_lower_bound_cache = Cache(
            "SCM",
            key_generator=_stability_factor_cache_key_generator,
            import_=_stability_factor_lower_bound_cache_import,
            export=_stability_factor_lower_bound_cache_export,
            filename_generator=_stability_factor_cache_filename_generator)

        def _stability_factor_upper_bound_cache_import(filename):
            self.import_stability_factor_upper_bound(self.folder["cache"],
                                                     filename)
            return self._stability_factor_upper_bound

        def _stability_factor_upper_bound_cache_export(filename):
            self.export_stability_factor_upper_bound(self.folder["cache"],
                                                     filename)

        self._stability_factor_upper_bound_cache = Cache(
            "SCM",
            key_generator=_stability_factor_cache_key_generator,
            import_=_stability_factor_upper_bound_cache_import,
            export=_stability_factor_upper_bound_cache_export,
            filename_generator=_stability_factor_cache_filename_generator)

        # Stability factor eigen problem
        self.stability_factor_calculator = ParametrizedStabilityFactorEigenProblem(
            self.truth_problem, "smallest",
            self.truth_problem._eigen_solver_parameters["stability_factor"],
            self.folder_prefix)

    # Initialize data structures required for the online phase
    def init(self, current_stage="online"):
        assert current_stage in ("online", "offline")
        # Init truth problem, to setup stability_factor_{left,right}_hand_matrix operators
        self.truth_problem.init()
        # Init exact stability factor computations
        self.stability_factor_calculator.init()
        # Read/Initialize reduced order data structures
        if current_stage == "online":
            self.bounding_box_min.load(self.folder["reduced_operators"],
                                       "bounding_box_min")
            self.bounding_box_max.load(self.folder["reduced_operators"],
                                       "bounding_box_max")
            self.training_set.load(self.folder["reduced_operators"],
                                   "training_set")
            self.greedy_selected_parameters.load(
                self.folder["reduced_operators"], "greedy_selected_parameters")
            self.upper_bound_vectors.load(self.folder["reduced_operators"],
                                          "upper_bound_vectors")
            # Set the value of N
            self.N = len(self.greedy_selected_parameters)
        elif current_stage == "offline":
            # Properly resize structures related to operator
            Q = self.truth_problem.Q["stability_factor_left_hand_matrix"]
            self.bounding_box_min = BoundingBoxSideList(Q)
            self.bounding_box_max = BoundingBoxSideList(Q)
            # Save the training set, which was passed by the reduction method,
            # in order to use it online
            assert self.training_set is not None
            self.training_set.save(self.folder["reduced_operators"],
                                   "training_set")
            # Properly initialize structures related to greedy selected parameters
            assert len(self.greedy_selected_parameters) == 0
        else:
            raise ValueError("Invalid stage in init().")

    def evaluate_stability_factor(self):
        return self.stability_factor_calculator.solve()

    # Get a lower bound for the stability factor
    def get_stability_factor_lower_bound(self, N=None):
        if N is None:
            N = self.N
        try:
            self._stability_factor_lower_bound = self._stability_factor_lower_bound_cache[
                self.mu, N]
        except KeyError:
            self._get_stability_factor_lower_bound(N)
            self._stability_factor_lower_bound_cache[
                self.mu, N] = self._stability_factor_lower_bound
        return self._stability_factor_lower_bound

    def _get_stability_factor_lower_bound(self, N):
        assert N <= len(self.greedy_selected_parameters)
        Q = self.truth_problem.Q["stability_factor_left_hand_matrix"]
        M_e = N
        M_p = min(
            N,
            len(self.training_set) - len(self.greedy_selected_parameters))

        # 1. Constrain the Q variables to be in the bounding box
        bounds = list()  # of Q pairs
        for q in range(Q):
            assert self.bounding_box_min[q] <= self.bounding_box_max[q]
            bounds.append((self.bounding_box_min[q], self.bounding_box_max[q]))

        # 2. Add three different sets of constraints.
        #    Our constrains are of the form
        #       a^T * x >= b
        constraints_matrix = Matrix(M_e + M_p + 1, Q)
        constraints_vector = Vector(M_e + M_p + 1)

        # 2a. Add constraints: a constraint is added for the closest samples to mu among the selected parameters
        mu_bak = self.mu
        closest_selected_parameters = self._closest_selected_parameters(
            M_e, N, self.mu)
        for (j, omega) in enumerate(closest_selected_parameters):
            # Overwrite parameter values
            self.set_mu(omega)

            # Compute theta
            current_theta = self.truth_problem.compute_theta(
                "stability_factor_left_hand_matrix")

            # Assemble the LHS of the constraint
            for q in range(Q):
                constraints_matrix[j, q] = current_theta[q]

            # Assemble the RHS of the constraint: note that computations for this call may be already cached
            (constraints_vector[j], _) = self.evaluate_stability_factor()
        self.set_mu(mu_bak)

        # 2b. Add constraints: also constrain the closest point in the complement of selected parameters,
        #                      with RHS depending on previously computed lower bounds
        mu_bak = self.mu
        closest_selected_parameters_complement = self._closest_unselected_parameters(
            M_p, N, self.mu)
        for (j, nu) in enumerate(closest_selected_parameters_complement):
            # Overwrite parameter values
            self.set_mu(nu)

            # Compute theta
            current_theta = self.truth_problem.compute_theta(
                "stability_factor_left_hand_matrix")

            # Assemble the LHS of the constraint
            for q in range(Q):
                constraints_matrix[M_e + j, q] = current_theta[q]

            # Assemble the RHS of the constraint: note that computations for this call may be already cached
            if N > 1:
                constraints_vector[
                    M_e + j] = self.get_stability_factor_lower_bound(N - 1)
            else:
                constraints_vector[M_e + j] = 0.
        self.set_mu(mu_bak)

        # 2c. Add constraints: also constrain the stability factor for mu to be positive
        # Compute theta
        current_theta = self.truth_problem.compute_theta(
            "stability_factor_left_hand_matrix")

        # Assemble the LHS of the constraint
        for q in range(Q):
            constraints_matrix[M_e + M_p, q] = current_theta[q]

        # Assemble the RHS of the constraint
        constraints_vector[M_e + M_p] = 0.

        # 3. Add cost function coefficients
        cost = Vector(Q)
        for q in range(Q):
            cost[q] = current_theta[q]

        # 4. Solve the linear programming problem
        linear_program = LinearProgramSolver(cost, constraints_matrix,
                                             constraints_vector, bounds)
        try:
            stability_factor_lower_bound = linear_program.solve()
        except LinearProgramSolverError:
            print("SCM warning at mu = " + str(self.mu) +
                  ": error occured while solving linear program.")
            print(
                "Please consider switching to a different solver. A truth eigensolve will be performed."
            )

            (stability_factor_lower_bound,
             _) = self.evaluate_stability_factor()

        self._stability_factor_lower_bound = stability_factor_lower_bound

    # Get an upper bound for the stability factor
    def get_stability_factor_upper_bound(self, N=None):
        if N is None:
            N = self.N
        try:
            self._stability_factor_upper_bound = self._stability_factor_upper_bound_cache[
                self.mu, N]
        except KeyError:
            self._get_stability_factor_upper_bound(N)
            self._stability_factor_upper_bound_cache[
                self.mu, N] = self._stability_factor_upper_bound
        return self._stability_factor_upper_bound

    def _get_stability_factor_upper_bound(self, N):
        Q = self.truth_problem.Q["stability_factor_left_hand_matrix"]
        upper_bound_vectors = self.upper_bound_vectors

        stability_factor_upper_bound = None
        current_theta = self.truth_problem.compute_theta(
            "stability_factor_left_hand_matrix")

        for j in range(N):
            upper_bound_vector = upper_bound_vectors[j]

            # Compute the cost function for fixed omega
            obj = 0.
            for q in range(Q):
                obj += upper_bound_vector[q] * current_theta[q]

            if stability_factor_upper_bound is None or obj < stability_factor_upper_bound:
                stability_factor_upper_bound = obj

        assert stability_factor_upper_bound is not None
        self._stability_factor_upper_bound = stability_factor_upper_bound

    def _cache_key(self, N):
        return (self.mu, N)

    def _cache_file(self, N):
        return hashlib.sha1(str(
            self._cache_key(N)).encode("utf-8")).hexdigest()

    def _closest_selected_parameters(self, M, N, mu):
        return self.greedy_selected_parameters[:N].closest(M, mu)

    def _closest_unselected_parameters(self, M, N, mu):
        if N not in self.greedy_selected_parameters_complement:
            self.greedy_selected_parameters_complement[
                N] = self.training_set.diff(
                    self.greedy_selected_parameters[:N])
        return self.greedy_selected_parameters_complement[N].closest(M, mu)

    def export_stability_factor_lower_bound(self, folder=None, filename=None):
        if folder is None:
            folder = self.folder_prefix
        if filename is None:
            filename = "stability_factor"
        export([self._stability_factor_lower_bound], folder,
               filename + "_lower_bound")

    def export_stability_factor_upper_bound(self, folder=None, filename=None):
        if folder is None:
            folder = self.folder_prefix
        if filename is None:
            filename = "stability_factor"
        export([self._stability_factor_upper_bound], folder,
               filename + "_upper_bound")

    def import_stability_factor_lower_bound(self, folder=None, filename=None):
        if folder is None:
            folder = self.folder_prefix
        if filename is None:
            filename = "stability_factor"
        stability_factor_lower_bound_storage = [0.]
        import_(stability_factor_lower_bound_storage, folder,
                filename + "_lower_bound")
        assert len(stability_factor_lower_bound_storage) == 1
        self._stability_factor_lower_bound = stability_factor_lower_bound_storage[
            0]

    def import_stability_factor_upper_bound(self, folder=None, filename=None):
        if folder is None:
            folder = self.folder_prefix
        if filename is None:
            filename = "stability_factor"
        stability_factor_upper_bound_storage = [0.]
        import_(stability_factor_upper_bound_storage, folder,
                filename + "_upper_bound")
        assert len(stability_factor_upper_bound_storage) == 1
        self._stability_factor_upper_bound = stability_factor_upper_bound_storage[
            0]
Exemplo n.º 9
0
class SCMApproximation(ParametrizedProblem):

    # Default initialization of members
    @sync_setters("truth_problem", "set_mu", "mu")
    @sync_setters("truth_problem", "set_mu_range", "mu_range")
    def __init__(self, truth_problem, folder_prefix, **kwargs):
        # Call the parent initialization
        ParametrizedProblem.__init__(self, folder_prefix)
        # Store the parametrized problem object and the bc list
        self.truth_problem = truth_problem

        # Define additional storage for SCM
        self.B_min = BoundingBoxSideList(
        )  # minimum values of the bounding box mathcal{B}. Vector of size Q
        self.B_max = BoundingBoxSideList(
        )  # maximum values of the bounding box mathcal{B}. Vector of size Q
        self.training_set = None  # SCM algorithm needs the training set also in the online stage
        self.greedy_selected_parameters = GreedySelectedParametersList(
        )  # list storing the parameters selected during the training phase
        self.greedy_selected_parameters_complement = dict(
        )  # dict, over N, of list storing the complement of parameters selected during the training phase
        self.UB_vectors = UpperBoundsList(
        )  # list of Q-dimensional vectors storing the infimizing elements at the greedily selected parameters
        self.N = 0
        self.M_e = kwargs[
            "M_e"]  # integer denoting the number of constraints based on the exact eigenvalues, or None
        self.M_p = kwargs[
            "M_p"]  # integer denoting the number of constraints based on the previous lower bounds, or None

        # I/O
        self.folder["cache"] = os.path.join(self.folder_prefix,
                                            "reduced_cache")
        self.cache_config = config.get("SCM", "cache")
        self.folder["reduced_operators"] = os.path.join(
            self.folder_prefix, "reduced_operators")

        # Coercivity constant eigen problem
        self.exact_coercivity_constant_calculator = ParametrizedCoercivityConstantEigenProblem(
            truth_problem, "a", True, "smallest",
            kwargs["coercivity_eigensolver_parameters"], self.folder_prefix)

        # Store here input parameters provided by the user that are needed by the reduction method
        self._input_storage_for_SCM_reduction = dict()
        self._input_storage_for_SCM_reduction[
            "bounding_box_minimum_eigensolver_parameters"] = kwargs[
                "bounding_box_minimum_eigensolver_parameters"]
        self._input_storage_for_SCM_reduction[
            "bounding_box_maximum_eigensolver_parameters"] = kwargs[
                "bounding_box_maximum_eigensolver_parameters"]

        # Avoid useless linear programming solves
        self._alpha_LB = 0.
        self._alpha_LB_cache = dict()
        self._alpha_UB = 0.
        self._alpha_UB_cache = dict()

    # Initialize data structures required for the online phase
    def init(self, current_stage="online"):
        assert current_stage in ("online", "offline")
        # Read/Initialize reduced order data structures
        if current_stage == "online":
            self.B_min.load(self.folder["reduced_operators"], "B_min")
            self.B_max.load(self.folder["reduced_operators"], "B_max")
            self.training_set.load(self.folder["reduced_operators"],
                                   "training_set")
            self.greedy_selected_parameters.load(
                self.folder["reduced_operators"], "greedy_selected_parameters")
            self.UB_vectors.load(self.folder["reduced_operators"],
                                 "UB_vectors")
            # Set the value of N
            self.N = len(self.greedy_selected_parameters)
        elif current_stage == "offline":
            self.truth_problem.init()
            # Properly resize structures related to operator
            Q = self.truth_problem.Q["a"]
            self.B_min = BoundingBoxSideList(Q)
            self.B_max = BoundingBoxSideList(Q)
            # Save the training set, which was passed by the reduction method,
            # in order to use it online
            assert self.training_set is not None
            self.training_set.save(self.folder["reduced_operators"],
                                   "training_set")
            # Properly initialize structures related to greedy selected parameters
            assert len(self.greedy_selected_parameters) is 0
            # Init exact coercivity constant computations
            self.exact_coercivity_constant_calculator.init()
        else:
            raise ValueError("Invalid stage in init().")

    def evaluate_stability_factor(self):
        return self.exact_coercivity_constant_calculator.solve()

    # Get a lower bound for alpha
    def get_stability_factor_lower_bound(self, N=None):
        if N is None:
            N = self.N
        assert N <= len(self.greedy_selected_parameters)
        (cache_key, cache_file) = self._cache_key_and_file(N)
        if "RAM" in self.cache_config and cache_key in self._alpha_LB_cache:
            log(PROGRESS, "Loading stability factor lower bound from cache")
            self._alpha_LB = self._alpha_LB_cache[cache_key]
        elif "Disk" in self.cache_config and self.import_stability_factor_lower_bound(
                self.folder["cache"], cache_file):
            log(PROGRESS, "Loading stability factor lower bound from file")
            if "RAM" in self.cache_config:
                self._alpha_LB_cache[cache_key] = self._alpha_LB
        else:
            log(PROGRESS,
                "Solving stability factor lower bound reduced problem")
            Q = self.truth_problem.Q["a"]
            M_e = min(self.M_e if self.M_e is not None else N, N,
                      len(self.greedy_selected_parameters))
            M_p = min(
                self.M_p if self.M_p is not None else N, N,
                len(self.training_set) - len(self.greedy_selected_parameters))

            # 1. Constrain the Q variables to be in the bounding box
            bounds = list()  # of Q pairs
            for q in range(Q):
                assert self.B_min[q] <= self.B_max[q]
                bounds.append((self.B_min[q], self.B_max[q]))

            # 2. Add three different sets of constraints.
            #    Our constrains are of the form
            #       a^T * x >= b
            constraints_matrix = Matrix(M_e + M_p + 1, Q)
            constraints_vector = Vector(M_e + M_p + 1)

            # 2a. Add constraints: a constraint is added for the closest samples to mu among the selected parameters
            mu_bak = self.mu
            closest_selected_parameters = self._closest_selected_parameters(
                M_e, N, self.mu)
            for (j, omega) in enumerate(closest_selected_parameters):
                # Overwrite parameter values
                self.set_mu(omega)

                # Compute theta
                current_theta_a = self.truth_problem.compute_theta("a")

                # Assemble the LHS of the constraint
                for q in range(Q):
                    constraints_matrix[j, q] = current_theta_a[q]

                # Assemble the RHS of the constraint
                (constraints_vector[j], _) = self.evaluate_stability_factor(
                )  # note that computations for this call may be already cached
            self.set_mu(mu_bak)

            # 2b. Add constraints: also constrain the closest point in the complement of selected parameters,
            #                      with RHS depending on previously computed lower bounds
            mu_bak = self.mu
            closest_selected_parameters_complement = self._closest_unselected_parameters(
                M_p, N, self.mu)
            for (j, nu) in enumerate(closest_selected_parameters_complement):
                # Overwrite parameter values
                self.set_mu(nu)

                # Compute theta
                current_theta_a = self.truth_problem.compute_theta("a")

                # Assemble the LHS of the constraint
                for q in range(Q):
                    constraints_matrix[M_e + j, q] = current_theta_a[q]

                # Assemble the RHS of the constraint
                if N > 1:
                    constraints_vector[
                        M_e + j] = self.get_stability_factor_lower_bound(
                            N - 1
                        )  # note that computations for this call may be already cached
                else:
                    constraints_vector[M_e + j] = 0.
            self.set_mu(mu_bak)

            # 2c. Add constraints: also constrain the coercivity constant for mu to be positive
            # Compute theta
            current_theta_a = self.truth_problem.compute_theta("a")

            # Assemble the LHS of the constraint
            for q in range(Q):
                constraints_matrix[M_e + M_p, q] = current_theta_a[q]

            # Assemble the RHS of the constraint
            constraints_vector[M_e + M_p] = 0.

            # 3. Add cost function coefficients
            cost = Vector(Q)
            for q in range(Q):
                cost[q] = current_theta_a[q]

            # 4. Solve the linear programming problem
            linear_program = LinearProgramSolver(cost, constraints_matrix,
                                                 constraints_vector, bounds)
            try:
                alpha_LB = linear_program.solve()
            except LinearProgramSolverError:
                print("SCM warning at mu = " + str(self.mu) +
                      ": error occured while solving linear program.")
                print(
                    "Please consider switching to a different solver. A truth eigensolve will be performed."
                )

                (alpha_LB, _) = self.evaluate_stability_factor()

            self._alpha_LB = alpha_LB
            if "RAM" in self.cache_config:
                self._alpha_LB_cache[cache_key] = alpha_LB
            self.export_stability_factor_lower_bound(
                self.folder["cache"], cache_file
            )  # Note that we export to file regardless of config options, because they may change across different runs
        return self._alpha_LB

    # Get an upper bound for alpha
    def get_stability_factor_upper_bound(self, N=None):
        if N is None:
            N = self.N
        (cache_key, cache_file) = self._cache_key_and_file(N)
        if "RAM" in self.cache_config and cache_key in self._alpha_UB_cache:
            log(PROGRESS, "Loading stability factor upper bound from cache")
            self._alpha_UB = self._alpha_UB_cache[cache_key]
        elif "Disk" in self.cache_config and self.import_stability_factor_upper_bound(
                self.folder["cache"], cache_file):
            log(PROGRESS, "Loading stability factor upper bound from file")
            if "RAM" in self.cache_config:
                self._alpha_UB_cache[cache_key] = self._alpha_UB
        else:
            log(PROGRESS,
                "Solving stability factor upper bound reduced problem")
            Q = self.truth_problem.Q["a"]
            UB_vectors = self.UB_vectors

            alpha_UB = None
            current_theta_a = self.truth_problem.compute_theta("a")

            for j in range(N):
                UB_vector = UB_vectors[j]

                # Compute the cost function for fixed omega
                obj = 0.
                for q in range(Q):
                    obj += UB_vector[q] * current_theta_a[q]

                if alpha_UB is None or obj < alpha_UB:
                    alpha_UB = obj

            assert alpha_UB is not None
            self._alpha_UB = alpha_UB
            if "RAM" in self.cache_config:
                self._alpha_UB_cache[cache_key] = alpha_UB
            self.export_stability_factor_upper_bound(
                self.folder["cache"], cache_file
            )  # Note that we export to file regardless of config options, because they may change across different runs
        return self._alpha_UB

    def _cache_key_and_file(self, N):
        cache_key = (self.mu, N)
        cache_file = hashlib.sha1(str(cache_key).encode("utf-8")).hexdigest()
        return (cache_key, cache_file)

    def _closest_selected_parameters(self, M, N, mu):
        return self.greedy_selected_parameters[:N].closest(M, mu)

    def _closest_unselected_parameters(self, M, N, mu):
        if N not in self.greedy_selected_parameters_complement:
            self.greedy_selected_parameters_complement[
                N] = self.training_set.diff(
                    self.greedy_selected_parameters[:N])
        return self.greedy_selected_parameters_complement[N].closest(M, mu)

    def export_stability_factor_lower_bound(self, folder, filename):
        export([self._alpha_LB], folder, filename + "_LB")

    def export_stability_factor_upper_bound(self, folder, filename):
        export([self._alpha_UB], folder, filename + "_UB")

    def import_stability_factor_lower_bound(self, folder, filename):
        eigenvalue_storage = [0.]
        import_successful = import_(eigenvalue_storage, folder,
                                    filename + "_LB")
        if import_successful:
            assert len(eigenvalue_storage) == 1
            self._alpha_LB = eigenvalue_storage[0]
        return import_successful

    def import_stability_factor_upper_bound(self, folder, filename):
        eigenvalue_storage = [0.]
        import_successful = import_(eigenvalue_storage, folder,
                                    filename + "_UB")
        if import_successful:
            assert len(eigenvalue_storage) == 1
            self._alpha_UB = eigenvalue_storage[0]
        return import_successful
Exemplo n.º 10
0
    class RBReduction_Class(DifferentialProblemReductionMethod_DerivedClass):
        """
        The folders used to store the snapshots and for the post processing data, the parameters for the greedy algorithm and the error estimator evaluations are initialized.
        
        :param truth_problem: class of the truth problem to be solved.
        :return: reduced RB class.
       
        """
        def __init__(self, truth_problem, **kwargs):
            # Call the parent initialization
            DifferentialProblemReductionMethod_DerivedClass.__init__(
                self, truth_problem, **kwargs)

            # Declare a GS object
            self.GS = None  # GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem with several components)
            # I/O
            self.folder["snapshots"] = os.path.join(self.folder_prefix,
                                                    "snapshots")
            self.folder["post_processing"] = os.path.join(
                self.folder_prefix, "post_processing")
            self.greedy_selected_parameters = GreedySelectedParametersList()
            self.greedy_error_estimators = GreedyErrorEstimatorsList()
            self.label = "RB"

        def _init_offline(self):
            # Call parent to initialize inner product and reduced problem
            output = DifferentialProblemReductionMethod_DerivedClass._init_offline(
                self)

            # Declare a new GS for each basis component
            if len(self.truth_problem.components) > 1:
                self.GS = dict()
                for component in self.truth_problem.components:
                    assert len(
                        self.truth_problem.inner_product[component]) == 1
                    inner_product = self.truth_problem.inner_product[
                        component][0]
                    self.GS[component] = GramSchmidt(inner_product)
            else:
                assert len(self.truth_problem.inner_product) == 1
                inner_product = self.truth_problem.inner_product[0]
                self.GS = GramSchmidt(inner_product)

            # The current value of mu may have been already used when computing lifting functions.
            # If so, we do not want to use that value again at the first greedy iteration, because
            # for steady linear problems with only one paremtrized BC the resulting first snapshot
            # would have been already stored in the basis, being exactly equal to the lifting.
            # To this end, we arbitrarily change the current value of mu to the first parameter
            # in the training set.
            if output:  # do not bother changing current mu if offline stage has been already completed
                need_to_change_mu = False
                if len(self.truth_problem.components) > 1:
                    for component in self.truth_problem.components:
                        if self.reduced_problem.dirichlet_bc[
                                component] and not self.reduced_problem.dirichlet_bc_are_homogeneous[
                                    component]:
                            need_to_change_mu = True
                            break
                else:
                    if self.reduced_problem.dirichlet_bc and not self.reduced_problem.dirichlet_bc_are_homogeneous:
                        need_to_change_mu = True
                if (need_to_change_mu and len(
                        self.truth_problem.mu
                ) > 0  # there is not much we can change in the trivial case without any parameter!
                    ):
                    new_mu = self.training_set[0]
                    assert self.truth_problem.mu != new_mu
                    self.truth_problem.set_mu(new_mu)

            # Return
            return output

        def offline(self):
            """
            It performs the offline phase of the reduced order model.
            
            :return: reduced_problem where all offline data are stored.
            """
            need_to_do_offline_stage = self._init_offline()
            if need_to_do_offline_stage:
                self._offline()
            self._finalize_offline()
            return self.reduced_problem

        def _offline(self):
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " offline phase begins",
                        fill="="))
            print("")

            iteration = 0
            relative_error_estimator_max = 2. * self.tol
            while self.reduced_problem.N < self.Nmax and relative_error_estimator_max >= self.tol:
                print(TextLine("N = " + str(self.reduced_problem.N), fill="#"))

                print("truth solve for mu =", self.truth_problem.mu)
                snapshot = self.truth_problem.solve()
                self.truth_problem.export_solution(self.folder["snapshots"],
                                                   "truth_" + str(iteration),
                                                   snapshot)
                snapshot = self.postprocess_snapshot(snapshot, iteration)

                print("update basis matrix")
                self.update_basis_matrix(snapshot)
                iteration += 1

                print("build reduced operators")
                self.reduced_problem.build_reduced_operators()

                print("reduced order solve")
                self.reduced_problem.solve()

                print("build operators for error estimation")
                self.reduced_problem.build_error_estimation_operators()

                (absolute_error_estimator_max,
                 relative_error_estimator_max) = self.greedy()
                print("maximum absolute error estimator over training set =",
                      absolute_error_estimator_max)
                print("maximum relative error estimator over training set =",
                      relative_error_estimator_max)

                print("")

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " offline phase ends",
                        fill="="))
            print("")

        def update_basis_matrix(self, snapshot):
            """
            It updates basis matrix.
            
            :param snapshot: last offline solution calculated.
            """
            if len(self.truth_problem.components) > 1:
                for component in self.truth_problem.components:
                    self.reduced_problem.basis_functions.enrich(
                        snapshot, component=component)
                    self.GS[component].apply(
                        self.reduced_problem.basis_functions[component],
                        self.reduced_problem.N_bc[component])
                    self.reduced_problem.N[component] += 1
                self.reduced_problem.basis_functions.save(
                    self.reduced_problem.folder["basis"], "basis")
            else:
                self.reduced_problem.basis_functions.enrich(snapshot)
                self.GS.apply(self.reduced_problem.basis_functions,
                              self.reduced_problem.N_bc)
                self.reduced_problem.N += 1
                self.reduced_problem.basis_functions.save(
                    self.reduced_problem.folder["basis"], "basis")

        def greedy(self):
            """
            It chooses the next parameter in the offline stage in a greedy fashion: wrapper with post processing of the result (in particular, set greedily selected parameter and save to file)
            
            :return: max error estimator and the comparison with the first one calculated.
            """
            (error_estimator_max, error_estimator_argmax) = self._greedy()
            self.truth_problem.set_mu(
                self.training_set[error_estimator_argmax])
            self.greedy_selected_parameters.append(
                self.training_set[error_estimator_argmax])
            self.greedy_selected_parameters.save(
                self.folder["post_processing"], "mu_greedy")
            self.greedy_error_estimators.append(error_estimator_max)
            self.greedy_error_estimators.save(self.folder["post_processing"],
                                              "error_estimator_max")
            return (error_estimator_max,
                    error_estimator_max / self.greedy_error_estimators[0])

        def _greedy(self):
            """
            It chooses the next parameter in the offline stage in a greedy fashion. Internal method.
            
            :return: max error estimator and the respective parameter.
            """

            # Print some additional information on the consistency of the reduced basis
            print("absolute error for current mu =",
                  self.reduced_problem.compute_error())
            print("absolute error estimator for current mu =",
                  self.reduced_problem.estimate_error())

            # Carry out the actual greedy search
            def solve_and_estimate_error(mu):
                self.reduced_problem.set_mu(mu)
                self.reduced_problem.solve()
                error_estimator = self.reduced_problem.estimate_error()
                log(
                    DEBUG, "Error estimator for mu = " + str(mu) + " is " +
                    str(error_estimator))
                return error_estimator

            print("find next mu")
            return self.training_set.max(solve_and_estimate_error)

        def error_analysis(self, N_generator=None, filename=None, **kwargs):
            """
            It computes the error of the reduced order approximation with respect to the full order one over the testing set.
            
            :param N: dimension of reduced problem.
            """
            self._init_error_analysis(**kwargs)
            self._error_analysis(N_generator, filename, **kwargs)
            self._finalize_error_analysis(**kwargs)

        def _error_analysis(self, N_generator=None, filename=None, **kwargs):
            if N_generator is None:

                def N_generator(n):
                    return n

            if "components" in kwargs:
                components = kwargs["components"]
            else:
                components = self.truth_problem.components

            N = self.reduced_problem.N
            if isinstance(N, dict):
                N = min(N.values())

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " error analysis begins",
                        fill="="))
            print("")

            error_analysis_table = ErrorAnalysisTable(self.testing_set)
            error_analysis_table.set_Nmax(N)
            if len(components) > 1:
                all_components_string = "".join(components)
                for component in components:
                    error_analysis_table.add_column("error_" + component,
                                                    group_name="solution_" +
                                                    component + "_error",
                                                    operations=("mean", "max"))
                    error_analysis_table.add_column(
                        "relative_error_" + component,
                        group_name="solution_" + component + "_relative_error",
                        operations=("mean", "max"))
                error_analysis_table.add_column(
                    "error_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "error_estimator_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "effectivity_" + all_components_string,
                    group_name="solution_" + all_components_string + "_error",
                    operations=("min", "mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_estimator_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_effectivity_" + all_components_string,
                    group_name="solution_" + all_components_string +
                    "_relative_error",
                    operations=("min", "mean", "max"))
            else:
                component = components[0]
                error_analysis_table.add_column("error_" + component,
                                                group_name="solution_" +
                                                component + "_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column("error_estimator_" + component,
                                                group_name="solution_" +
                                                component + "_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column(
                    "effectivity_" + component,
                    group_name="solution_" + component + "_error",
                    operations=("min", "mean", "max"))
                error_analysis_table.add_column("relative_error_" + component,
                                                group_name="solution_" +
                                                component + "_relative_error",
                                                operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_error_estimator_" + component,
                    group_name="solution_" + component + "_relative_error",
                    operations=("mean", "max"))
                error_analysis_table.add_column(
                    "relative_effectivity_" + component,
                    group_name="solution_" + component + "_relative_error",
                    operations=("min", "mean", "max"))
            error_analysis_table.add_column("error_output",
                                            group_name="output_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("error_estimator_output",
                                            group_name="output_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("effectivity_output",
                                            group_name="output_error",
                                            operations=("min", "mean", "max"))
            error_analysis_table.add_column("relative_error_output",
                                            group_name="output_relative_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("relative_error_estimator_output",
                                            group_name="output_relative_error",
                                            operations=("mean", "max"))
            error_analysis_table.add_column("relative_effectivity_output",
                                            group_name="output_relative_error",
                                            operations=("min", "mean", "max"))

            for (mu_index, mu) in enumerate(self.testing_set):
                print(TextLine(str(mu_index), fill="#"))

                self.reduced_problem.set_mu(mu)

                for n in range(1, N + 1):  # n = 1, ... N
                    n_arg = N_generator(n)

                    if n_arg is not None:
                        self.reduced_problem.solve(n_arg, **kwargs)
                        error = self.reduced_problem.compute_error(**kwargs)
                        if len(components) > 1:
                            error[all_components_string] = sqrt(
                                sum([
                                    error[component]**2
                                    for component in components
                                ]))
                        error_estimator = self.reduced_problem.estimate_error()
                        relative_error = self.reduced_problem.compute_relative_error(
                            **kwargs)
                        if len(components) > 1:
                            relative_error[all_components_string] = sqrt(
                                sum([
                                    relative_error[component]**2
                                    for component in components
                                ]))
                        relative_error_estimator = self.reduced_problem.estimate_relative_error(
                        )

                        self.reduced_problem.compute_output()
                        error_output = self.reduced_problem.compute_error_output(
                            **kwargs)
                        error_output_estimator = self.reduced_problem.estimate_error_output(
                        )
                        relative_error_output = self.reduced_problem.compute_relative_error_output(
                            **kwargs)
                        relative_error_output_estimator = self.reduced_problem.estimate_relative_error_output(
                        )
                    else:
                        if len(components) > 1:
                            error = {
                                component: NotImplemented
                                for component in components
                            }
                            error[all_components_string] = NotImplemented
                        else:
                            error = NotImplemented
                        error_estimator = NotImplemented
                        if len(components) > 1:
                            relative_error = {
                                component: NotImplemented
                                for component in components
                            }
                            relative_error[
                                all_components_string] = NotImplemented
                        else:
                            relative_error = NotImplemented
                        relative_error_estimator = NotImplemented

                        error_output = NotImplemented
                        error_output_estimator = NotImplemented
                        relative_error_output = NotImplemented
                        relative_error_output_estimator = NotImplemented

                    if len(components) > 1:
                        for component in components:
                            error_analysis_table["error_" + component, n,
                                                 mu_index] = error[component]
                            error_analysis_table[
                                "relative_error_" + component, n,
                                mu_index] = relative_error[component]
                        error_analysis_table[
                            "error_" + all_components_string, n,
                            mu_index] = error[all_components_string]
                        error_analysis_table["error_estimator_" +
                                             all_components_string, n,
                                             mu_index] = error_estimator
                        error_analysis_table[
                            "effectivity_" + all_components_string, n,
                            mu_index] = error_analysis_table[
                                "error_estimator_" + all_components_string, n,
                                mu_index] / error_analysis_table[
                                    "error_" + all_components_string, n,
                                    mu_index]
                        error_analysis_table[
                            "relative_error_" + all_components_string, n,
                            mu_index] = relative_error[all_components_string]
                        error_analysis_table[
                            "relative_error_estimator_" +
                            all_components_string, n,
                            mu_index] = relative_error_estimator
                        error_analysis_table[
                            "relative_effectivity_" + all_components_string, n,
                            mu_index] = error_analysis_table[
                                "relative_error_estimator_" +
                                all_components_string, n,
                                mu_index] / error_analysis_table[
                                    "relative_error_" + all_components_string,
                                    n, mu_index]
                    else:
                        component = components[0]
                        error_analysis_table["error_" + component, n,
                                             mu_index] = error
                        error_analysis_table["error_estimator_" + component, n,
                                             mu_index] = error_estimator
                        error_analysis_table[
                            "effectivity_" + component, n,
                            mu_index] = error_analysis_table[
                                "error_estimator_" + component, n,
                                mu_index] / error_analysis_table["error_" +
                                                                 component, n,
                                                                 mu_index]
                        error_analysis_table["relative_error_" + component, n,
                                             mu_index] = relative_error
                        error_analysis_table[
                            "relative_error_estimator_" + component, n,
                            mu_index] = relative_error_estimator
                        error_analysis_table[
                            "relative_effectivity_" + component, n,
                            mu_index] = error_analysis_table[
                                "relative_error_estimator_" + component, n,
                                mu_index] / error_analysis_table[
                                    "relative_error_" + component, n, mu_index]

                    error_analysis_table["error_output", n,
                                         mu_index] = error_output
                    error_analysis_table["error_estimator_output", n,
                                         mu_index] = error_output_estimator
                    error_analysis_table["effectivity_output", n,
                                         mu_index] = error_analysis_table[
                                             "error_estimator_output", n,
                                             mu_index] / error_analysis_table[
                                                 "error_output", n, mu_index]
                    error_analysis_table["relative_error_output", n,
                                         mu_index] = relative_error_output
                    error_analysis_table[
                        "relative_error_estimator_output", n,
                        mu_index] = relative_error_output_estimator
                    error_analysis_table[
                        "relative_effectivity_output", n,
                        mu_index] = error_analysis_table[
                            "relative_error_estimator_output", n,
                            mu_index] / error_analysis_table[
                                "relative_error_output", n, mu_index]

            # Print
            print("")
            print(error_analysis_table)

            print("")
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " error analysis ends",
                        fill="="))
            print("")

            # Export error analysis table
            error_analysis_table.save(
                self.folder["error_analysis"],
                "error_analysis" if filename is None else filename)

        def speedup_analysis(self, N_generator=None, filename=None, **kwargs):
            """
            It computes the speedup of the reduced order approximation with respect to the full order one over the testing set.
            
            :param N: dimension of the reduced problem.
            """
            self._init_speedup_analysis(**kwargs)
            self._speedup_analysis(N_generator, filename, **kwargs)
            self._finalize_speedup_analysis(**kwargs)

        def _speedup_analysis(self, N_generator=None, filename=None, **kwargs):
            if N_generator is None:

                def N_generator(n):
                    return n

            N = self.reduced_problem.N
            if isinstance(N, dict):
                N = min(N.values())

            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " speedup analysis begins",
                        fill="="))
            print("")

            speedup_analysis_table = SpeedupAnalysisTable(self.testing_set)
            speedup_analysis_table.set_Nmax(N)
            speedup_analysis_table.add_column("speedup_solve",
                                              group_name="speedup_solve",
                                              operations=("min", "mean",
                                                          "max"))
            speedup_analysis_table.add_column(
                "speedup_solve_and_estimate_error",
                group_name="speedup_solve_and_estimate_error",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column(
                "speedup_solve_and_estimate_relative_error",
                group_name="speedup_solve_and_estimate_relative_error",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column("speedup_output",
                                              group_name="speedup_output",
                                              operations=("min", "mean",
                                                          "max"))
            speedup_analysis_table.add_column(
                "speedup_output_and_estimate_error_output",
                group_name="speedup_output_and_estimate_error_output",
                operations=("min", "mean", "max"))
            speedup_analysis_table.add_column(
                "speedup_output_and_estimate_relative_error_output",
                group_name="speedup_output_and_estimate_relative_error_output",
                operations=("min", "mean", "max"))

            truth_timer = Timer("parallel")
            reduced_timer = Timer("serial")

            for (mu_index, mu) in enumerate(self.testing_set):
                print(TextLine(str(mu_index), fill="#"))

                self.reduced_problem.set_mu(mu)

                truth_timer.start()
                self.truth_problem.solve(**kwargs)
                elapsed_truth_solve = truth_timer.stop()

                truth_timer.start()
                self.truth_problem.compute_output()
                elapsed_truth_output = truth_timer.stop()

                for n in range(1, N + 1):  # n = 1, ... N
                    n_arg = N_generator(n)

                    if n_arg is not None:
                        reduced_timer.start()
                        solution = self.reduced_problem.solve(n_arg, **kwargs)
                        elapsed_reduced_solve = reduced_timer.stop()

                        truth_timer.start()
                        self.reduced_problem.compute_error(**kwargs)
                        elapsed_error = truth_timer.stop()

                        reduced_timer.start()
                        error_estimator = self.reduced_problem.estimate_error()
                        elapsed_error_estimator = reduced_timer.stop()

                        truth_timer.start()
                        self.reduced_problem.compute_relative_error(**kwargs)
                        elapsed_relative_error = truth_timer.stop()

                        reduced_timer.start()
                        relative_error_estimator = self.reduced_problem.estimate_relative_error(
                        )
                        elapsed_relative_error_estimator = reduced_timer.stop()

                        reduced_timer.start()
                        output = self.reduced_problem.compute_output()
                        elapsed_reduced_output = reduced_timer.stop()

                        truth_timer.start()
                        self.reduced_problem.compute_error_output(**kwargs)
                        elapsed_error_output = truth_timer.stop()

                        reduced_timer.start()
                        error_estimator_output = self.reduced_problem.estimate_error_output(
                        )
                        elapsed_error_estimator_output = reduced_timer.stop()

                        truth_timer.start()
                        self.reduced_problem.compute_relative_error_output(
                            **kwargs)
                        elapsed_relative_error_output = truth_timer.stop()

                        reduced_timer.start()
                        relative_error_estimator_output = self.reduced_problem.estimate_relative_error_output(
                        )
                        elapsed_relative_error_estimator_output = reduced_timer.stop(
                        )
                    else:
                        solution = NotImplemented
                        error_estimator = NotImplemented
                        relative_error_estimator = NotImplemented

                        output = NotImplemented
                        error_estimator_output = NotImplemented
                        relative_error_estimator_output = NotImplemented

                    if solution is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve", n,
                            mu_index] = elapsed_truth_solve / elapsed_reduced_solve
                    else:
                        speedup_analysis_table["speedup_solve", n,
                                               mu_index] = NotImplemented
                    if error_estimator is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_error", n,
                            mu_index] = (elapsed_truth_solve + elapsed_error
                                         ) / (elapsed_reduced_solve +
                                              elapsed_error_estimator)
                    else:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_error", n,
                            mu_index] = NotImplemented
                    if relative_error_estimator is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_relative_error", n,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_relative_error) / (
                                             elapsed_reduced_solve +
                                             elapsed_relative_error_estimator)
                    else:
                        speedup_analysis_table[
                            "speedup_solve_and_estimate_relative_error", n,
                            mu_index] = NotImplemented
                    if output is not NotImplemented:
                        speedup_analysis_table[
                            "speedup_output", n,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_truth_output) / (
                                             elapsed_reduced_solve +
                                             elapsed_reduced_output)
                    else:
                        speedup_analysis_table["speedup_output", n,
                                               mu_index] = NotImplemented
                    if error_estimator_output is not NotImplemented:
                        assert output is not NotImplemented
                        speedup_analysis_table[
                            "speedup_output_and_estimate_error_output", n,
                            mu_index] = (elapsed_truth_solve +
                                         elapsed_truth_output +
                                         elapsed_error_output) / (
                                             elapsed_reduced_solve +
                                             elapsed_reduced_output +
                                             elapsed_error_estimator_output)
                    else:
                        speedup_analysis_table[
                            "speedup_output_and_estimate_error_output", n,
                            mu_index] = NotImplemented
                    if relative_error_estimator_output is not NotImplemented:
                        assert output is not NotImplemented
                        speedup_analysis_table[
                            "speedup_output_and_estimate_relative_error_output",
                            n, mu_index] = (
                                elapsed_truth_solve + elapsed_truth_output +
                                elapsed_relative_error_output) / (
                                    elapsed_reduced_solve +
                                    elapsed_reduced_output +
                                    elapsed_relative_error_estimator_output)
                    else:
                        speedup_analysis_table[
                            "speedup_output_and_estimate_relative_error_output",
                            n, mu_index] = NotImplemented

            # Print
            print("")
            print(speedup_analysis_table)

            print("")
            print(
                TextBox(self.truth_problem.name() + " " + self.label +
                        " speedup analysis ends",
                        fill="="))
            print("")

            # Export speedup analysis table
            speedup_analysis_table.save(
                self.folder["speedup_analysis"],
                "speedup_analysis" if filename is None else filename)