class EIMApproximationReductionMethod(ReductionMethod): # Default initialization of members def __init__(self, EIM_approximation): # Call the parent initialization ReductionMethod.__init__(self, EIM_approximation.folder_prefix) # $$ OFFLINE DATA STRUCTURES $$ # # High fidelity problem self.EIM_approximation = EIM_approximation # Declare a new container to store the snapshots self.snapshots_container = self.EIM_approximation.parametrized_expression.create_snapshots_container() self._training_set_parameters_to_snapshots_container_index = dict() # I/O self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots") self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing") self.greedy_selected_parameters = GreedySelectedParametersList() self.greedy_errors = GreedyErrorEstimatorsList() # # By default set a tolerance slightly larger than zero, in order to # stop greedy iterations in trivial cases by default self.tol = 1e-15 def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs): import_successful = ReductionMethod.initialize_training_set(self, self.EIM_approximation.mu_range, ntrain, enable_import, sampling, **kwargs) # Since exact evaluation is required, we cannot use a distributed training set self.training_set.distributed_max = False # Also initialize the map from parameter values to snapshots container index self._training_set_parameters_to_snapshots_container_index = dict((mu, mu_index) for (mu_index, mu) in enumerate(self.training_set)) return import_successful def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs): return ReductionMethod.initialize_testing_set(self, self.EIM_approximation.mu_range, ntest, enable_import, sampling, **kwargs) # Perform the offline phase of EIM def offline(self): need_to_do_offline_stage = self._init_offline() if need_to_do_offline_stage: self._offline() self._finalize_offline() return self.EIM_approximation # Initialize data structures required for the offline phase def _init_offline(self): # Prepare folders and init EIM approximation all_folders = Folders() all_folders.update(self.folder) all_folders.update(self.EIM_approximation.folder) all_folders.pop("testing_set") # this is required only in the error/speedup analysis all_folders.pop("error_analysis") # this is required only in the error analysis all_folders.pop("speedup_analysis") # this is required only in the speedup analysis at_least_one_folder_created = all_folders.create() if not at_least_one_folder_created: return False # offline construction should be skipped, since data are already available else: self.EIM_approximation.init("offline") return True # offline construction should be carried out def _offline(self): interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name() description = self.EIM_approximation.parametrized_expression.description() # Evaluate the parametrized expression for all parameters in the training set print(TextBox(interpolation_method_name + " preprocessing phase begins for" + "\n" + "\n".join(description), fill="=")) print("") for (mu_index, mu) in enumerate(self.training_set): print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":")) self.EIM_approximation.set_mu(mu) print("evaluate parametrized expression at mu =", mu) self.EIM_approximation.evaluate_parametrized_expression() self.EIM_approximation.export_solution(self.folder["snapshots"], "truth_" + str(mu_index)) print("add to snapshots") self.add_to_snapshots(self.EIM_approximation.snapshot) print("") # If basis generation is POD, compute the first POD modes of the snapshots if self.EIM_approximation.basis_generation == "POD": print("compute basis") N_POD = self.compute_basis_POD() print("") print(TextBox(interpolation_method_name + " preprocessing phase ends for" + "\n" + "\n".join(description), fill="=")) print("") print(TextBox(interpolation_method_name + " offline phase begins for" + "\n" + "\n".join(description), fill="=")) print("") if self.EIM_approximation.basis_generation == "Greedy": # Arbitrarily start from the first parameter in the training set self.EIM_approximation.set_mu(self.training_set[0]) # Carry out greedy selection relative_error_max = 2.*self.tol while self.EIM_approximation.N < self.Nmax and relative_error_max >= self.tol: print(TextLine(interpolation_method_name + " N = " + str(self.EIM_approximation.N), fill=":")) self._print_greedy_interpolation_solve_message() self.EIM_approximation.solve() print("compute and locate maximum interpolation error") self.EIM_approximation.snapshot = self.load_snapshot() (error, maximum_error, maximum_location) = self.EIM_approximation.compute_maximum_interpolation_error() print("update locations with", maximum_location) self.update_interpolation_locations(maximum_location) print("update basis") self.update_basis_greedy(error, maximum_error) print("update interpolation matrix") self.update_interpolation_matrix() (error_max, relative_error_max) = self.greedy() print("maximum interpolation error =", error_max) print("maximum interpolation relative error =", relative_error_max) print("") else: while self.EIM_approximation.N < N_POD: print(TextLine(interpolation_method_name + " N = " + str(self.EIM_approximation.N), fill=":")) print("solve interpolation for basis number", self.EIM_approximation.N) self.EIM_approximation._solve(self.EIM_approximation.basis_functions[self.EIM_approximation.N]) print("compute and locate maximum interpolation error") self.EIM_approximation.snapshot = self.EIM_approximation.basis_functions[self.EIM_approximation.N] (error, maximum_error, maximum_location) = self.EIM_approximation.compute_maximum_interpolation_error() print("update locations with", maximum_location) self.update_interpolation_locations(maximum_location) self.EIM_approximation.N += 1 print("update interpolation matrix") self.update_interpolation_matrix() print("") print(TextBox(interpolation_method_name + " offline phase ends for" + "\n" + "\n".join(description), fill="=")) print("") # Finalize data structures required after the offline phase def _finalize_offline(self): self.EIM_approximation.init("online") def _print_greedy_interpolation_solve_message(self): print("solve interpolation for mu =", self.EIM_approximation.mu) # Update the snapshots container def add_to_snapshots(self, snapshot): self.snapshots_container.enrich(snapshot) # Update basis (greedy version) def update_basis_greedy(self, error, maximum_error): if abs(maximum_error) > 0.: self.EIM_approximation.basis_functions.enrich(error/maximum_error) else: # Trivial case, greedy will stop at the first iteration assert self.EIM_approximation.N == 0 self.EIM_approximation.basis_functions.enrich(error) # error is actually zero self.EIM_approximation.basis_functions.save(self.EIM_approximation.folder["basis"], "basis") self.EIM_approximation.N += 1 # Update basis (POD version) def compute_basis_POD(self): POD = self.EIM_approximation.parametrized_expression.create_POD_container() POD.store_snapshot(self.snapshots_container) (_, _, basis_functions, N) = POD.apply(self.Nmax, self.tol) self.EIM_approximation.basis_functions.enrich(basis_functions) self.EIM_approximation.basis_functions.save(self.EIM_approximation.folder["basis"], "basis") # do not increment self.EIM_approximation.N POD.print_eigenvalues(N) POD.save_eigenvalues_file(self.folder["post_processing"], "eigs") POD.save_retained_energy_file(self.folder["post_processing"], "retained_energy") return N def update_interpolation_locations(self, maximum_location): self.EIM_approximation.interpolation_locations.append(maximum_location) self.EIM_approximation.interpolation_locations.save(self.EIM_approximation.folder["reduced_operators"], "interpolation_locations") # Assemble the interpolation matrix def update_interpolation_matrix(self): self.EIM_approximation.interpolation_matrix[0] = evaluate(self.EIM_approximation.basis_functions[:self.EIM_approximation.N], self.EIM_approximation.interpolation_locations) self.EIM_approximation.interpolation_matrix.save(self.EIM_approximation.folder["reduced_operators"], "interpolation_matrix") # Load the precomputed snapshot def load_snapshot(self): assert self.EIM_approximation.basis_generation == "Greedy" mu = self.EIM_approximation.mu mu_index = self._training_set_parameters_to_snapshots_container_index[mu] assert mu == self.training_set[mu_index] return self.snapshots_container[mu_index] # Choose the next parameter in the offline stage in a greedy fashion def greedy(self): assert self.EIM_approximation.basis_generation == "Greedy" # Print some additional information on the consistency of the reduced basis self.EIM_approximation.solve() self.EIM_approximation.snapshot = self.load_snapshot() error = self.EIM_approximation.snapshot - self.EIM_approximation.basis_functions*self.EIM_approximation._interpolation_coefficients error_on_interpolation_locations = evaluate(error, self.EIM_approximation.interpolation_locations) (maximum_error, _) = max(abs(error)) (maximum_error_on_interpolation_locations, _) = max(abs(error_on_interpolation_locations)) # for consistency check, should be zero print("interpolation error for current mu =", abs(maximum_error)) print("interpolation error on interpolation locations for current mu =", abs(maximum_error_on_interpolation_locations)) # Carry out the actual greedy search def solve_and_computer_error(mu): self.EIM_approximation.set_mu(mu) self.EIM_approximation.solve() self.EIM_approximation.snapshot = self.load_snapshot() (_, maximum_error, _) = self.EIM_approximation.compute_maximum_interpolation_error() return abs(maximum_error) print("find next mu") (error_max, error_argmax) = self.training_set.max(solve_and_computer_error) self.EIM_approximation.set_mu(self.training_set[error_argmax]) self.greedy_selected_parameters.append(self.training_set[error_argmax]) self.greedy_selected_parameters.save(self.folder["post_processing"], "mu_greedy") self.greedy_errors.append(error_max) self.greedy_errors.save(self.folder["post_processing"], "error_max") if abs(self.greedy_errors[0]) > 0.: return (abs(error_max), abs(error_max/self.greedy_errors[0])) else: # Trivial case, greedy will stop at the first iteration assert len(self.greedy_errors) == 1 assert self.EIM_approximation.N == 1 return (0., 0.) # Compute the error of the empirical interpolation approximation with respect to the # exact function over the testing set def error_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_error_analysis(**kwargs) self._error_analysis(N_generator, filename, **kwargs) self._finalize_error_analysis(**kwargs) def _error_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n N = self.EIM_approximation.N interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name() description = self.EIM_approximation.parametrized_expression.description() print(TextBox(interpolation_method_name + " error analysis begins for" + "\n" + "\n".join(description), fill="=")) print("") error_analysis_table = ErrorAnalysisTable(self.testing_set) error_analysis_table.set_Nmax(N) error_analysis_table.add_column("error", group_name="eim", operations=("mean", "max")) error_analysis_table.add_column("relative_error", group_name="eim", operations=("mean", "max")) for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":")) self.EIM_approximation.set_mu(mu) # Evaluate the exact function on the truth grid self.EIM_approximation.evaluate_parametrized_expression() for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: self.EIM_approximation.solve(n_arg) (_, error, _) = self.EIM_approximation.compute_maximum_interpolation_error(n) (_, relative_error, _) = self.EIM_approximation.compute_maximum_interpolation_relative_error(n) error_analysis_table["error", n, mu_index] = abs(error) error_analysis_table["relative_error", n, mu_index] = abs(relative_error) else: error_analysis_table["error", n, mu_index] = NotImplemented error_analysis_table["relative_error", n, mu_index] = NotImplemented # Print print("") print(error_analysis_table) print("") print(TextBox(interpolation_method_name + " error analysis ends for" + "\n" + "\n".join(description), fill="=")) print("") # Export error analysis table error_analysis_table.save(self.folder["error_analysis"], "error_analysis" if filename is None else filename) # Compute the speedup of the empirical interpolation approximation with respect to the # exact function over the testing set def speedup_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_speedup_analysis(**kwargs) self._speedup_analysis(N_generator, filename, **kwargs) self._finalize_speedup_analysis(**kwargs) # Initialize data structures required for the speedup analysis phase def _init_speedup_analysis(self, **kwargs): # Make sure to clean up snapshot cache to ensure that parametrized # expression evaluation is actually carried out self.EIM_approximation.snapshot_cache.clear() # ... and also disable the capability of importing/exporting truth solutions self.disable_import_solution = PatchInstanceMethod(self.EIM_approximation, "import_solution", lambda self_, folder, filename, solution=None: False) self.disable_export_solution = PatchInstanceMethod(self.EIM_approximation, "export_solution", lambda self_, folder, filename, solution=None: None) self.disable_import_solution.patch() self.disable_export_solution.patch() def _speedup_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n N = self.EIM_approximation.N interpolation_method_name = self.EIM_approximation.parametrized_expression.interpolation_method_name() description = self.EIM_approximation.parametrized_expression.description() print(TextBox(interpolation_method_name + " speedup analysis begins for" + "\n" + "\n".join(description), fill="=")) print("") speedup_analysis_table = SpeedupAnalysisTable(self.testing_set) speedup_analysis_table.set_Nmax(N) speedup_analysis_table.add_column("speedup", group_name="speedup", operations=("min", "mean", "max")) evaluate_timer = Timer("parallel") EIM_timer = Timer("serial") for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(interpolation_method_name + " " + str(mu_index), fill=":")) self.EIM_approximation.set_mu(mu) # Evaluate the exact function on the truth grid evaluate_timer.start() self.EIM_approximation.evaluate_parametrized_expression() elapsed_evaluate = evaluate_timer.stop() for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: EIM_timer.start() self.EIM_approximation.solve(n_arg) elapsed_EIM = EIM_timer.stop() speedup_analysis_table["speedup", n, mu_index] = elapsed_evaluate/elapsed_EIM else: speedup_analysis_table["speedup", n, mu_index] = NotImplemented # Print print("") print(speedup_analysis_table) print("") print(TextBox(interpolation_method_name + " speedup analysis ends for" + "\n" + "\n".join(description), fill="=")) print("") # Export speedup analysis table speedup_analysis_table.save(self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename) # Finalize data structures required after the speedup analysis phase def _finalize_speedup_analysis(self, **kwargs): # Restore the capability to import/export truth solutions self.disable_import_solution.unpatch() self.disable_export_solution.unpatch() del self.disable_import_solution del self.disable_export_solution
class SCMApproximationReductionMethod(ReductionMethod): # Default initialization of members def __init__(self, SCM_approximation, folder_prefix): # Call the parent initialization ReductionMethod.__init__(self, folder_prefix) # $$ OFFLINE DATA STRUCTURES $$ # # High fidelity problem self.SCM_approximation = SCM_approximation # I/O self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing") self.greedy_selected_parameters = SCM_approximation.greedy_selected_parameters self.greedy_error_estimators = GreedyErrorEstimatorsList() # OFFLINE: set the elements in the training set. def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs): assert enable_import import_successful = ReductionMethod.initialize_training_set( self, self.SCM_approximation.mu_range, ntrain, enable_import, sampling, **kwargs) self.SCM_approximation.training_set = self.training_set return import_successful def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs): return ReductionMethod.initialize_testing_set( self, self.SCM_approximation.mu_range, ntest, enable_import, sampling, **kwargs) # Perform the offline phase of SCM def offline(self): need_to_do_offline_stage = self._init_offline() if need_to_do_offline_stage: self._offline() self._finalize_offline() return self.SCM_approximation # Initialize data structures required for the offline phase def _init_offline(self): # Prepare folders and init SCM approximation required_folders = Folders() required_folders.update(self.folder) required_folders.update(self.SCM_approximation.folder) optional_folders = Folders() optional_folders["cache"] = required_folders.pop("cache") # cache does not affect the availability of offline data optional_folders["testing_set"] = required_folders.pop("testing_set") # testing set is required only in the error/speedup analysis optional_folders["error_analysis"] = required_folders.pop( "error_analysis") # error analysis folder is required only in the error analysis optional_folders["speedup_analysis"] = required_folders.pop( "speedup_analysis") # speedup analysis folder is required only in the speedup analysis at_least_one_required_folder_created = required_folders.create() at_least_one_optional_folder_created = optional_folders.create( ) # noqa: F841 if not at_least_one_required_folder_created: return False # offline construction should be skipped, since data are already available else: self.SCM_approximation.init("offline") return True # offline construction should be carried out def _offline(self): print(TextBox("SCM offline phase begins", fill="=")) print("") # Compute the bounding box \mathcal{B} self.compute_bounding_box() print("") # Arbitrarily start from the first parameter in the training set self.SCM_approximation.set_mu(self.training_set[0]) relative_error_estimator_max = 2. * self.tol while self.SCM_approximation.N < self.Nmax and relative_error_estimator_max >= self.tol: print( TextLine("SCM N = " + str(self.SCM_approximation.N), fill="~")) # Store the greedy parameter self.store_greedy_selected_parameters() # Evaluate the stability factor print("evaluate the stability factor for mu =", self.SCM_approximation.mu) (stability_factor, eigenvector) = self.SCM_approximation.evaluate_stability_factor() print("stability factor =", stability_factor) # Update data structures related to upper bound vectors upper_bound_vector = self.compute_upper_bound_vector(eigenvector) self.update_upper_bound_vectors(upper_bound_vector) # Prepare for next iteration print("find next mu") (error_estimator_max, relative_error_estimator_max) = self.greedy() print("maximum SCM error estimator =", error_estimator_max) print("maximum SCM relative error estimator =", relative_error_estimator_max) print("") print(TextBox("SCM offline phase ends", fill="=")) print("") # Finalize data structures required after the offline phase def _finalize_offline(self): self.SCM_approximation.init("online") # Compute the bounding box \mathcal{B} def compute_bounding_box(self): # Resize the bounding box storage Q = self.SCM_approximation.truth_problem.Q[ "stability_factor_left_hand_matrix"] for q in range(Q): # Compute the minimum eigenvalue minimum_eigenvalue_calculator = ParametrizedStabilityFactorEigenProblem( self.SCM_approximation.truth_problem, "smallest", self.SCM_approximation.truth_problem. _eigen_solver_parameters["bounding_box_minimum"], self.folder_prefix, expansion_index=q) minimum_eigenvalue_calculator.init() (self.SCM_approximation.bounding_box_min[q], _) = minimum_eigenvalue_calculator.solve() print("bounding_box_min[" + str(q) + "] = " + str(self.SCM_approximation.bounding_box_min[q])) # Compute the maximum eigenvalue maximum_eigenvalue_calculator = ParametrizedStabilityFactorEigenProblem( self.SCM_approximation.truth_problem, "largest", self.SCM_approximation.truth_problem. _eigen_solver_parameters["bounding_box_maximum"], self.folder_prefix, expansion_index=q) maximum_eigenvalue_calculator.init() (self.SCM_approximation.bounding_box_max[q], _) = maximum_eigenvalue_calculator.solve() print("bounding_box_max[" + str(q) + "] = " + str(self.SCM_approximation.bounding_box_max[q])) # Save to file self.SCM_approximation.bounding_box_min.save( self.SCM_approximation.folder["reduced_operators"], "bounding_box_min") self.SCM_approximation.bounding_box_max.save( self.SCM_approximation.folder["reduced_operators"], "bounding_box_max") # Store the greedy parameter def store_greedy_selected_parameters(self): mu = self.SCM_approximation.mu self.SCM_approximation.greedy_selected_parameters.append(mu) self.SCM_approximation.N = len( self.SCM_approximation.greedy_selected_parameters) # Save to file self.SCM_approximation.greedy_selected_parameters.save( self.SCM_approximation.folder["reduced_operators"], "greedy_selected_parameters") def compute_upper_bound_vector(self, u): Q = self.SCM_approximation.truth_problem.Q[ "stability_factor_left_hand_matrix"] A = self.SCM_approximation.truth_problem.operator[ "stability_factor_left_hand_matrix"] B = self.SCM_approximation.truth_problem.operator[ "stability_factor_right_hand_matrix"] assert len(B) == 1 normalization = transpose(u) * B[0] * u upper_bound_vector = OnlineVector(Q) for q in range(Q): upper_bound_vector[q] = (transpose(u) * A[q] * u) / normalization return upper_bound_vector def update_upper_bound_vectors(self, upper_bound_vector): self.SCM_approximation.upper_bound_vectors.append(upper_bound_vector) self.SCM_approximation.upper_bound_vectors.save( self.SCM_approximation.folder["reduced_operators"], "upper_bound_vectors") # Choose the next parameter in the offline stage in a greedy fashion def greedy(self): def solve_and_estimate_error(mu): self.SCM_approximation.set_mu(mu) stability_factor_lower_bound = self.SCM_approximation.get_stability_factor_lower_bound( ) stability_factor_upper_bound = self.SCM_approximation.get_stability_factor_upper_bound( ) ratio = stability_factor_lower_bound / stability_factor_upper_bound if ratio < 0. and not isclose(ratio, 0.): # if ratio << 0 print("SCM warning at mu = " + str(mu) + ": stability factor lower bound = " + str(stability_factor_lower_bound) + " < 0") if ratio > 1. and not isclose(ratio, 1.): # if ratio >> 1 print("SCM warning at mu = " + str(mu) + ": stability factor lower bound = " + str(stability_factor_lower_bound) + " > stability factor upper bound = " + str(stability_factor_upper_bound)) error_estimator = 1. - ratio return error_estimator (error_estimator_max, error_estimator_argmax ) = self.training_set.max(solve_and_estimate_error) self.SCM_approximation.set_mu( self.training_set[error_estimator_argmax]) self.greedy_error_estimators.append(error_estimator_max) self.greedy_error_estimators.save(self.folder["post_processing"], "error_estimator_max") return (error_estimator_max, error_estimator_max / self.greedy_error_estimators[0]) # Initialize data structures required for the error analysis phase def _init_error_analysis(self, **kwargs): # Initialize reduced order data structures in the SCM online problem self.SCM_approximation.init("online") # Compute the error of the scm approximation with respect to the # exact stability factor over the testing set def error_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_error_analysis(**kwargs) self._error_analysis(N_generator, filename, **kwargs) self._finalize_error_analysis(**kwargs) def _error_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(): N = self.SCM_approximation.N for n in range(1, N + 1): # n = 1, ... N yield n def N_generator_max(): *_, Nmax = N_generator() return Nmax print(TextBox("SCM error analysis begins", fill="=")) print("") error_analysis_table = ErrorAnalysisTable(self.testing_set) error_analysis_table.set_Nmax(N_generator_max()) error_analysis_table.add_column("normalized_error", group_name="scm", operations=("min", "mean", "max")) for (mu_index, mu) in enumerate(self.testing_set): print(TextLine("SCM " + str(mu_index), fill="~")) self.SCM_approximation.set_mu(mu) (exact_stability_factor, _) = self.SCM_approximation.evaluate_stability_factor() for n in N_generator(): stability_factor_lower_bound = self.SCM_approximation.get_stability_factor_lower_bound( n) stability_factor_upper_bound = self.SCM_approximation.get_stability_factor_upper_bound( n) ratio_lower_bound_to_upper_bound = stability_factor_lower_bound / stability_factor_upper_bound ratio_lower_bound_to_exact = stability_factor_lower_bound / exact_stability_factor if ratio_lower_bound_to_upper_bound < 0. and not isclose( ratio_lower_bound_to_upper_bound, 0.): # if ratio_lower_bound_to_upper_bound << 0 print("SCM warning at mu = " + str(mu) + ": stability factor lower bound = " + str(stability_factor_lower_bound) + " < 0") if ratio_lower_bound_to_upper_bound > 1. and not isclose( ratio_lower_bound_to_upper_bound, 1.): # if ratio_lower_bound_to_upper_bound >> 1 print("SCM warning at mu = " + str(mu) + ": stability factor lower bound = " + str(stability_factor_lower_bound) + " > stability factor upper bound = " + str(stability_factor_upper_bound)) if ratio_lower_bound_to_exact > 1. and not isclose( ratio_lower_bound_to_exact, 1.): # if ratio_lower_bound_to_exact >> 1 print("SCM warning at mu = " + str(mu) + ": stability factor lower bound = " + str(stability_factor_lower_bound) + " > exact stability factor =" + str(exact_stability_factor)) error_analysis_table["normalized_error", n, mu_index] = ( exact_stability_factor - stability_factor_lower_bound ) / stability_factor_upper_bound # Print print("") print(error_analysis_table) print("") print(TextBox("SCM error analysis ends", fill="=")) print("") # Export error analysis table error_analysis_table.save( self.folder["error_analysis"], "error_analysis" if filename is None else filename) # Compute the speedup of the scm approximation with respect to the # exact stability factor over the testing set def speedup_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_speedup_analysis(**kwargs) self._speedup_analysis(N_generator, filename, **kwargs) self._finalize_speedup_analysis(**kwargs) # Initialize data structures required for the speedup analysis phase def _init_speedup_analysis(self, **kwargs): # Make sure to clean up snapshot cache to ensure that parametrized # expression evaluation is actually carried out self.SCM_approximation._stability_factor_lower_bound_cache.clear() self.SCM_approximation._stability_factor_upper_bound_cache.clear() self.SCM_approximation.stability_factor_calculator._eigenvalue_cache.clear( ) self.SCM_approximation.stability_factor_calculator._eigenvector_cache.clear( ) def _speedup_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(): N = self.SCM_approximation.N for n in range(1, N + 1): # n = 1, ... N yield n def N_generator_max(): *_, Nmax = N_generator() return Nmax print(TextBox("SCM speedup analysis begins", fill="=")) print("") speedup_analysis_table = SpeedupAnalysisTable(self.testing_set) speedup_analysis_table.set_Nmax(N_generator_max()) speedup_analysis_table.add_column("speedup", group_name="speedup", operations=("min", "mean", "max")) exact_timer = Timer("parallel") SCM_timer = Timer("serial") for (mu_index, mu) in enumerate(self.testing_set): print(TextLine("SCM " + str(mu_index), fill="~")) self.SCM_approximation.set_mu(mu) exact_timer.start() self.SCM_approximation.evaluate_stability_factor() elapsed_exact = exact_timer.stop() for n in N_generator(): SCM_timer.start() self.SCM_approximation.get_stability_factor_lower_bound(n) self.SCM_approximation.get_stability_factor_upper_bound(n) elapsed_SCM = SCM_timer.stop() speedup_analysis_table["speedup", n, mu_index] = elapsed_exact / elapsed_SCM # Print print("") print(speedup_analysis_table) print("") print(TextBox("SCM speedup analysis ends", fill="=")) print("") # Export speedup analysis table speedup_analysis_table.save( self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename)
class SCMApproximationReductionMethod(ReductionMethod): # Default initialization of members def __init__(self, SCM_approximation, folder_prefix): # Call the parent initialization ReductionMethod.__init__(self, folder_prefix) # $$ OFFLINE DATA STRUCTURES $$ # # High fidelity problem self.SCM_approximation = SCM_approximation # I/O self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing") self.greedy_selected_parameters = SCM_approximation.greedy_selected_parameters self.greedy_error_estimators = GreedyErrorEstimatorsList() # Get data that were temporarily store in the SCM_approximation self.bounding_box_minimum_eigensolver_parameters = self.SCM_approximation._input_storage_for_SCM_reduction["bounding_box_minimum_eigensolver_parameters"] self.bounding_box_maximum_eigensolver_parameters = self.SCM_approximation._input_storage_for_SCM_reduction["bounding_box_maximum_eigensolver_parameters"] del self.SCM_approximation._input_storage_for_SCM_reduction # OFFLINE: set the elements in the training set. def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs): assert enable_import import_successful = ReductionMethod.initialize_training_set(self, self.SCM_approximation.mu_range, ntrain, enable_import, sampling, **kwargs) self.SCM_approximation.training_set = self.training_set return import_successful def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs): return ReductionMethod.initialize_testing_set(self, self.SCM_approximation.mu_range, ntest, enable_import, sampling, **kwargs) # Perform the offline phase of SCM def offline(self): need_to_do_offline_stage = self._init_offline() if need_to_do_offline_stage: self._offline() self._finalize_offline() return self.SCM_approximation # Initialize data structures required for the offline phase def _init_offline(self): # Prepare folders and init SCM approximation all_folders = Folders() all_folders.update(self.folder) all_folders.update(self.SCM_approximation.folder) all_folders.pop("testing_set") # this is required only in the error/speedup analysis all_folders.pop("error_analysis") # this is required only in the error analysis all_folders.pop("speedup_analysis") # this is required only in the speedup analysis at_least_one_folder_created = all_folders.create() if not at_least_one_folder_created: return False # offline construction should be skipped, since data are already available else: self.SCM_approximation.init("offline") return True # offline construction should be carried out def _offline(self): print(TextBox("SCM offline phase begins", fill="=")) print("") # Compute the bounding box \mathcal{B} self.compute_bounding_box() print("") # Arbitrarily start from the first parameter in the training set self.SCM_approximation.set_mu(self.training_set[0]) relative_error_estimator_max = 2.*self.tol while self.SCM_approximation.N < self.Nmax and relative_error_estimator_max >= self.tol: print(TextLine("SCM N = " + str(self.SCM_approximation.N), fill="~")) # Store the greedy parameter self.store_greedy_selected_parameters() # Evaluate the coercivity constant print("evaluate the stability factor for mu =", self.SCM_approximation.mu) (alpha, eigenvector) = self.SCM_approximation.evaluate_stability_factor() print("stability factor =", alpha) # Update data structures related to upper bound vectors UB_vector = self.compute_UB_vector(eigenvector) self.update_UB_vectors(UB_vector) # Prepare for next iteration print("find next mu") (error_estimator_max, relative_error_estimator_max) = self.greedy() print("maximum SCM error estimator =", error_estimator_max) print("maximum SCM relative error estimator =", relative_error_estimator_max) print("") print(TextBox("SCM offline phase ends", fill="=")) print("") # Finalize data structures required after the offline phase def _finalize_offline(self): self.SCM_approximation.init("online") # Compute the bounding box \mathcal{B} def compute_bounding_box(self): # Resize the bounding box storage Q = self.SCM_approximation.truth_problem.Q["a"] for q in range(Q): # Compute the minimum eigenvalue minimum_eigenvalue_calculator = ParametrizedCoercivityConstantEigenProblem(self.SCM_approximation.truth_problem, ("a", q), False, "smallest", self.bounding_box_minimum_eigensolver_parameters, self.folder_prefix) minimum_eigenvalue_calculator.init() (self.SCM_approximation.B_min[q], _) = minimum_eigenvalue_calculator.solve() print("B_min[" + str(q) + "] = " + str(self.SCM_approximation.B_min[q])) # Compute the maximum eigenvalue maximum_eigenvalue_calculator = ParametrizedCoercivityConstantEigenProblem(self.SCM_approximation.truth_problem, ("a", q), False, "largest", self.bounding_box_maximum_eigensolver_parameters, self.folder_prefix) maximum_eigenvalue_calculator.init() (self.SCM_approximation.B_max[q], _) = maximum_eigenvalue_calculator.solve() print("B_max[" + str(q) + "] = " + str(self.SCM_approximation.B_max[q])) # Save to file self.SCM_approximation.B_min.save(self.SCM_approximation.folder["reduced_operators"], "B_min") self.SCM_approximation.B_max.save(self.SCM_approximation.folder["reduced_operators"], "B_max") # Store the greedy parameter def store_greedy_selected_parameters(self): mu = self.SCM_approximation.mu self.SCM_approximation.greedy_selected_parameters.append(mu) self.SCM_approximation.N = len(self.SCM_approximation.greedy_selected_parameters) # Save to file self.SCM_approximation.greedy_selected_parameters.save(self.SCM_approximation.folder["reduced_operators"], "greedy_selected_parameters") # Compute the ratio between a_q(u,u) and s(u,u), for all q in vec def compute_UB_vector(self, u): Q = self.SCM_approximation.truth_problem.Q["a"] inner_product = self.SCM_approximation.truth_problem.inner_product[0] UB_vector = OnlineVector(Q) norm_S_squared = transpose(u)*inner_product*u for q in range(Q): A_q = self.SCM_approximation.truth_problem.operator["a"][q] UB_vector[q] = (transpose(u)*A_q*u)/norm_S_squared return UB_vector def update_UB_vectors(self, UB_vector): self.SCM_approximation.UB_vectors.append(UB_vector) self.SCM_approximation.UB_vectors.save(self.SCM_approximation.folder["reduced_operators"], "UB_vectors") # Choose the next parameter in the offline stage in a greedy fashion def greedy(self): def solve_and_estimate_error(mu): self.SCM_approximation.set_mu(mu) LB = self.SCM_approximation.get_stability_factor_lower_bound() UB = self.SCM_approximation.get_stability_factor_upper_bound() error_estimator = (UB - LB)/UB if LB/UB < 0 and not isclose(LB/UB, 0.): # if LB/UB << 0 print("SCM warning at mu = " + str(mu) + ": LB = " + str(LB) + " < 0") if LB/UB > 1 and not isclose(LB/UB, 1.): # if LB/UB >> 1 print("SCM warning at mu = " + str(mu) + ": LB = " + str(LB) + " > UB = " + str(UB)) return error_estimator (error_estimator_max, error_estimator_argmax) = self.training_set.max(solve_and_estimate_error) self.SCM_approximation.set_mu(self.training_set[error_estimator_argmax]) self.greedy_error_estimators.append(error_estimator_max) self.greedy_error_estimators.save(self.folder["post_processing"], "error_estimator_max") return (error_estimator_max, error_estimator_max/self.greedy_error_estimators[0]) # Initialize data structures required for the error analysis phase def _init_error_analysis(self, **kwargs): # Initialize the exact coercivity constant object self.SCM_approximation.exact_coercivity_constant_calculator.init() # Initialize reduced order data structures in the SCM online problem self.SCM_approximation.init("online") # Compute the error of the scm approximation with respect to the # exact coercivity over the testing set def error_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_error_analysis(**kwargs) self._error_analysis(N_generator, filename, **kwargs) self._finalize_error_analysis(**kwargs) def _error_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n N = self.SCM_approximation.N print(TextBox("SCM error analysis begins", fill="=")) print("") error_analysis_table = ErrorAnalysisTable(self.testing_set) error_analysis_table.set_Nmax(N) error_analysis_table.add_column("normalized_error", group_name="scm", operations=("min", "mean", "max")) for (mu_index, mu) in enumerate(self.testing_set): print(TextLine("SCM " + str(mu_index), fill="~")) self.SCM_approximation.set_mu(mu) (exact, _) = self.SCM_approximation.evaluate_stability_factor() for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: LB = self.SCM_approximation.get_stability_factor_lower_bound(n_arg) UB = self.SCM_approximation.get_stability_factor_upper_bound(n_arg) if LB/UB < 0 and not isclose(LB/UB, 0.): # if LB/UB << 0 print("SCM warning at mu = " + str(mu) + ": LB = " + str(LB) + " < 0") if LB/UB > 1 and not isclose(LB/UB, 1.): # if LB/UB >> 1 print("SCM warning at mu = " + str(mu) + ": LB = " + str(LB) + " > UB = " + str(UB)) if LB/exact > 1 and not isclose(LB/exact, 1.): # if LB/exact >> 1 print("SCM warning at mu = " + str(mu) + ": LB = " + str(LB) + " > exact =" + str(exact)) error_analysis_table["normalized_error", n, mu_index] = (exact - LB)/UB else: error_analysis_table["normalized_error", n, mu_index] = NotImplemented # Print print("") print(error_analysis_table) print("") print(TextBox("SCM error analysis ends", fill="=")) print("") # Export error analysis table error_analysis_table.save(self.folder["error_analysis"], "error_analysis" if filename is None else filename) # Compute the speedup of the scm approximation with respect to the # exact coercivity over the testing set def speedup_analysis(self, N_generator=None, filename=None, **kwargs): assert len(kwargs) == 0 # not used in this method self._init_speedup_analysis(**kwargs) self._speedup_analysis(N_generator, filename, **kwargs) self._finalize_speedup_analysis(**kwargs) # Initialize data structures required for the speedup analysis phase def _init_speedup_analysis(self, **kwargs): # Make sure to clean up snapshot cache to ensure that parametrized # expression evaluation is actually carried out self.SCM_approximation._alpha_LB_cache.clear() self.SCM_approximation._alpha_UB_cache.clear() self.SCM_approximation.exact_coercivity_constant_calculator._eigenvalue_cache.clear() self.SCM_approximation.exact_coercivity_constant_calculator._eigenvector_cache.clear() def _speedup_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n N = self.SCM_approximation.N print(TextBox("SCM speedup analysis begins", fill="=")) print("") speedup_analysis_table = SpeedupAnalysisTable(self.testing_set) speedup_analysis_table.set_Nmax(N) speedup_analysis_table.add_column("speedup", group_name="speedup", operations=("min", "mean", "max")) exact_timer = Timer("parallel") SCM_timer = Timer("serial") for (mu_index, mu) in enumerate(self.testing_set): print(TextLine("SCM " + str(mu_index), fill="~")) self.SCM_approximation.set_mu(mu) exact_timer.start() self.SCM_approximation.evaluate_stability_factor() elapsed_exact = exact_timer.stop() for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: SCM_timer.start() self.SCM_approximation.get_stability_factor_lower_bound(n_arg) self.SCM_approximation.get_stability_factor_upper_bound(n_arg) elapsed_SCM = SCM_timer.stop() speedup_analysis_table["speedup", n, mu_index] = elapsed_exact/elapsed_SCM else: speedup_analysis_table["speedup", n, mu_index] = NotImplemented # Print print("") print(speedup_analysis_table) print("") print(TextBox("SCM speedup analysis ends", fill="=")) print("") # Export speedup analysis table speedup_analysis_table.save(self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename)
class RBReduction_Class(DifferentialProblemReductionMethod_DerivedClass): """ The folders used to store the snapshots and for the post processing data, the parameters for the greedy algorithm and the error estimator evaluations are initialized. :param truth_problem: class of the truth problem to be solved. :return: reduced RB class. """ def __init__(self, truth_problem, **kwargs): # Call the parent initialization DifferentialProblemReductionMethod_DerivedClass.__init__( self, truth_problem, **kwargs) # Declare a GS object self.GS = None # GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem with several components) # I/O self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots") self.folder["post_processing"] = os.path.join( self.folder_prefix, "post_processing") self.greedy_selected_parameters = GreedySelectedParametersList() self.greedy_error_estimators = GreedyErrorEstimatorsList() self.label = "RB" def _init_offline(self): # Call parent to initialize inner product and reduced problem output = DifferentialProblemReductionMethod_DerivedClass._init_offline( self) # Declare a new GS for each basis component if len(self.truth_problem.components) > 1: self.GS = dict() for component in self.truth_problem.components: assert len( self.truth_problem.inner_product[component]) == 1 inner_product = self.truth_problem.inner_product[ component][0] self.GS[component] = GramSchmidt(self.truth_problem.V, inner_product) else: assert len(self.truth_problem.inner_product) == 1 inner_product = self.truth_problem.inner_product[0] self.GS = GramSchmidt(self.truth_problem.V, inner_product) # Return return output def offline(self): """ It performs the offline phase of the reduced order model. :return: reduced_problem where all offline data are stored. """ need_to_do_offline_stage = self._init_offline() if need_to_do_offline_stage: self._offline() self._finalize_offline() return self.reduced_problem @snapshot_links_to_cache def _offline(self): print( TextBox(self.truth_problem.name() + " " + self.label + " offline phase begins", fill="=")) print("") # Initialize first parameter to be used self.reduced_problem.build_reduced_operators() self.reduced_problem.build_error_estimation_operators() (absolute_error_estimator_max, relative_error_estimator_max) = self.greedy() print( "initial maximum absolute error estimator over training set =", absolute_error_estimator_max) print( "initial maximum relative error estimator over training set =", relative_error_estimator_max) print("") iteration = 0 while self.reduced_problem.N < self.Nmax and relative_error_estimator_max >= self.tol: print(TextLine("N = " + str(self.reduced_problem.N), fill="#")) print("truth solve for mu =", self.truth_problem.mu) snapshot = self.truth_problem.solve() self.truth_problem.export_solution(self.folder["snapshots"], "truth_" + str(iteration), snapshot) snapshot = self.postprocess_snapshot(snapshot, iteration) print("update basis matrix") self.update_basis_matrix(snapshot) iteration += 1 print("build reduced operators") self.reduced_problem.build_reduced_operators() print("reduced order solve") self.reduced_problem.solve() print("build operators for error estimation") self.reduced_problem.build_error_estimation_operators() (absolute_error_estimator_max, relative_error_estimator_max) = self.greedy() print("maximum absolute error estimator over training set =", absolute_error_estimator_max) print("maximum relative error estimator over training set =", relative_error_estimator_max) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " offline phase ends", fill="=")) print("") def update_basis_matrix(self, snapshot): """ It updates basis matrix. :param snapshot: last offline solution calculated. """ if len(self.truth_problem.components) > 1: for component in self.truth_problem.components: new_basis_function = self.GS[component].apply( snapshot, self.reduced_problem.basis_functions[component] [self.reduced_problem.N_bc[component]:], component=component) self.reduced_problem.basis_functions.enrich( new_basis_function, component=component) self.reduced_problem.N[component] += 1 self.reduced_problem.basis_functions.save( self.reduced_problem.folder["basis"], "basis") else: new_basis_function = self.GS.apply( snapshot, self.reduced_problem. basis_functions[self.reduced_problem.N_bc:]) self.reduced_problem.basis_functions.enrich(new_basis_function) self.reduced_problem.N += 1 self.reduced_problem.basis_functions.save( self.reduced_problem.folder["basis"], "basis") def greedy(self): """ It chooses the next parameter in the offline stage in a greedy fashion: wrapper with post processing of the result (in particular, set greedily selected parameter and save to file) :return: max error estimator and the comparison with the first one calculated. """ (error_estimator_max, error_estimator_argmax) = self._greedy() self.truth_problem.set_mu( self.training_set[error_estimator_argmax]) self.greedy_selected_parameters.append( self.training_set[error_estimator_argmax]) self.greedy_selected_parameters.save( self.folder["post_processing"], "mu_greedy") self.greedy_error_estimators.append(error_estimator_max) self.greedy_error_estimators.save(self.folder["post_processing"], "error_estimator_max") return (error_estimator_max, error_estimator_max / self.greedy_error_estimators[0]) def _greedy(self): """ It chooses the next parameter in the offline stage in a greedy fashion. Internal method. :return: max error estimator and the respective parameter. """ if self.reduced_problem.N > 0: # skip during initialization # Print some additional information on the consistency of the reduced basis print("absolute error for current mu =", self.reduced_problem.compute_error()) print("absolute error estimator for current mu =", self.reduced_problem.estimate_error()) # Carry out the actual greedy search def solve_and_estimate_error(mu): self.reduced_problem.set_mu(mu) self.reduced_problem.solve() error_estimator = self.reduced_problem.estimate_error() logger.log( DEBUG, "Error estimator for mu = " + str(mu) + " is " + str(error_estimator)) return error_estimator if self.reduced_problem.N == 0: print("find initial mu") else: print("find next mu") return self.training_set.max(solve_and_estimate_error) def error_analysis(self, N_generator=None, filename=None, **kwargs): """ It computes the error of the reduced order approximation with respect to the full order one over the testing set. :param N_generator: generator of dimension of reduced problem. """ self._init_error_analysis(**kwargs) self._error_analysis(N_generator, filename, **kwargs) self._finalize_error_analysis(**kwargs) def _error_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(): N = self.reduced_problem.N if isinstance(N, dict): N = min(N.values()) for n in range(1, N + 1): # n = 1, ... N yield n if "components" in kwargs: components = kwargs["components"] else: components = self.truth_problem.components def N_generator_items(): for n in N_generator(): assert isinstance(n, (dict, int)) if isinstance(n, int): yield (n, n) elif isinstance(n, dict): assert len(n) == 1 (n_int, n_online_size_dict) = n.popitem() assert isinstance(n_int, int) assert isinstance(n_online_size_dict, OnlineSizeDict) yield (n_int, n_online_size_dict) else: raise TypeError( "Invalid item generated by N_generator") def N_generator_max(): *_, Nmax = N_generator_items() assert isinstance(Nmax, tuple) assert len(Nmax) == 2 assert isinstance(Nmax[0], int) return Nmax[0] print( TextBox(self.truth_problem.name() + " " + self.label + " error analysis begins", fill="=")) print("") error_analysis_table = ErrorAnalysisTable(self.testing_set) error_analysis_table.set_Nmax(N_generator_max()) if len(components) > 1: all_components_string = "".join(components) for component in components: error_analysis_table.add_column("error_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "error_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("mean", "max")) error_analysis_table.add_column( "error_estimator_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("mean", "max")) error_analysis_table.add_column( "effectivity_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("min", "mean", "max")) error_analysis_table.add_column( "relative_error_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_estimator_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_effectivity_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("min", "mean", "max")) else: component = components[0] error_analysis_table.add_column("error_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column("error_estimator_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column( "effectivity_" + component, group_name="solution_" + component + "_error", operations=("min", "mean", "max")) error_analysis_table.add_column("relative_error_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_estimator_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_effectivity_" + component, group_name="solution_" + component + "_relative_error", operations=("min", "mean", "max")) error_analysis_table.add_column("error_output", group_name="output_error", operations=("mean", "max")) error_analysis_table.add_column("error_estimator_output", group_name="output_error", operations=("mean", "max")) error_analysis_table.add_column("effectivity_output", group_name="output_error", operations=("min", "mean", "max")) error_analysis_table.add_column("relative_error_output", group_name="output_relative_error", operations=("mean", "max")) error_analysis_table.add_column("relative_error_estimator_output", group_name="output_relative_error", operations=("mean", "max")) error_analysis_table.add_column("relative_effectivity_output", group_name="output_relative_error", operations=("min", "mean", "max")) for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(str(mu_index), fill="#")) self.reduced_problem.set_mu(mu) for (n_int, n_arg) in N_generator_items(): self.reduced_problem.solve(n_arg, **kwargs) error = self.reduced_problem.compute_error(**kwargs) if len(components) > 1: error[all_components_string] = sqrt( sum([ error[component]**2 for component in components ])) error_estimator = self.reduced_problem.estimate_error() relative_error = self.reduced_problem.compute_relative_error( **kwargs) if len(components) > 1: relative_error[all_components_string] = sqrt( sum([ relative_error[component]**2 for component in components ])) relative_error_estimator = self.reduced_problem.estimate_relative_error( ) self.reduced_problem.compute_output() error_output = self.reduced_problem.compute_error_output( **kwargs) error_output_estimator = self.reduced_problem.estimate_error_output( ) relative_error_output = self.reduced_problem.compute_relative_error_output( **kwargs) relative_error_output_estimator = self.reduced_problem.estimate_relative_error_output( ) if len(components) > 1: for component in components: error_analysis_table["error_" + component, n_int, mu_index] = error[component] error_analysis_table[ "relative_error_" + component, n_int, mu_index] = relative_error[component] error_analysis_table[ "error_" + all_components_string, n_int, mu_index] = error[all_components_string] error_analysis_table["error_estimator_" + all_components_string, n_int, mu_index] = error_estimator error_analysis_table[ "effectivity_" + all_components_string, n_int, mu_index] = error_analysis_table[ "error_estimator_" + all_components_string, n_int, mu_index] / error_analysis_table[ "error_" + all_components_string, n_int, mu_index] error_analysis_table[ "relative_error_" + all_components_string, n_int, mu_index] = relative_error[all_components_string] error_analysis_table[ "relative_error_estimator_" + all_components_string, n_int, mu_index] = relative_error_estimator error_analysis_table[ "relative_effectivity_" + all_components_string, n_int, mu_index] = error_analysis_table[ "relative_error_estimator_" + all_components_string, n_int, mu_index] / error_analysis_table[ "relative_error_" + all_components_string, n_int, mu_index] else: component = components[0] error_analysis_table["error_" + component, n_int, mu_index] = error error_analysis_table["error_estimator_" + component, n_int, mu_index] = error_estimator error_analysis_table[ "effectivity_" + component, n_int, mu_index] = error_analysis_table[ "error_estimator_" + component, n_int, mu_index] / error_analysis_table[ "error_" + component, n_int, mu_index] error_analysis_table["relative_error_" + component, n_int, mu_index] = relative_error error_analysis_table[ "relative_error_estimator_" + component, n_int, mu_index] = relative_error_estimator error_analysis_table[ "relative_effectivity_" + component, n_int, mu_index] = error_analysis_table[ "relative_error_estimator_" + component, n_int, mu_index] / error_analysis_table[ "relative_error_" + component, n_int, mu_index] error_analysis_table["error_output", n_int, mu_index] = error_output error_analysis_table["error_estimator_output", n_int, mu_index] = error_output_estimator error_analysis_table[ "effectivity_output", n_int, mu_index] = error_analysis_table[ "error_estimator_output", n_int, mu_index] / error_analysis_table["error_output", n_int, mu_index] error_analysis_table["relative_error_output", n_int, mu_index] = relative_error_output error_analysis_table[ "relative_error_estimator_output", n_int, mu_index] = relative_error_output_estimator error_analysis_table[ "relative_effectivity_output", n_int, mu_index] = error_analysis_table[ "relative_error_estimator_output", n_int, mu_index] / error_analysis_table[ "relative_error_output", n_int, mu_index] # Print print("") print(error_analysis_table) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " error analysis ends", fill="=")) print("") # Export error analysis table error_analysis_table.save( self.folder["error_analysis"], "error_analysis" if filename is None else filename) def speedup_analysis(self, N_generator=None, filename=None, **kwargs): """ It computes the speedup of the reduced order approximation with respect to the full order one over the testing set. :param N_generator: generator of dimension of the reduced problem. """ self._init_speedup_analysis(**kwargs) self._speedup_analysis(N_generator, filename, **kwargs) self._finalize_speedup_analysis(**kwargs) def _speedup_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(): N = self.reduced_problem.N if isinstance(N, dict): N = min(N.values()) for n in range(1, N + 1): # n = 1, ... N yield n def N_generator_items(): for n in N_generator(): assert isinstance(n, (dict, int)) if isinstance(n, int): yield (n, n) elif isinstance(n, dict): assert len(n) == 1 (n_int, n_online_size_dict) = n.popitem() assert isinstance(n_int, int) assert isinstance(n_online_size_dict, OnlineSizeDict) yield (n_int, n_online_size_dict) else: raise TypeError( "Invalid item generated by N_generator") def N_generator_max(): *_, Nmax = N_generator_items() assert isinstance(Nmax, tuple) assert len(Nmax) == 2 assert isinstance(Nmax[0], int) return Nmax[0] print( TextBox(self.truth_problem.name() + " " + self.label + " speedup analysis begins", fill="=")) print("") speedup_analysis_table = SpeedupAnalysisTable(self.testing_set) speedup_analysis_table.set_Nmax(N_generator_max()) speedup_analysis_table.add_column("speedup_solve", group_name="speedup_solve", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_solve_and_estimate_error", group_name="speedup_solve_and_estimate_error", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_solve_and_estimate_relative_error", group_name="speedup_solve_and_estimate_relative_error", operations=("min", "mean", "max")) speedup_analysis_table.add_column("speedup_output", group_name="speedup_output", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_output_and_estimate_error_output", group_name="speedup_output_and_estimate_error_output", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_output_and_estimate_relative_error_output", group_name="speedup_output_and_estimate_relative_error_output", operations=("min", "mean", "max")) truth_timer = Timer("parallel") reduced_timer = Timer("serial") for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(str(mu_index), fill="#")) self.reduced_problem.set_mu(mu) truth_timer.start() self.truth_problem.solve(**kwargs) elapsed_truth_solve = truth_timer.stop() truth_timer.start() self.truth_problem.compute_output() elapsed_truth_output = truth_timer.stop() for (n_int, n_arg) in N_generator_items(): reduced_timer.start() solution = self.reduced_problem.solve(n_arg, **kwargs) elapsed_reduced_solve = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_error(**kwargs) elapsed_error = truth_timer.stop() reduced_timer.start() error_estimator = self.reduced_problem.estimate_error() elapsed_error_estimator = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_relative_error(**kwargs) elapsed_relative_error = truth_timer.stop() reduced_timer.start() relative_error_estimator = self.reduced_problem.estimate_relative_error( ) elapsed_relative_error_estimator = reduced_timer.stop() reduced_timer.start() output = self.reduced_problem.compute_output() elapsed_reduced_output = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_error_output(**kwargs) elapsed_error_output = truth_timer.stop() reduced_timer.start() error_estimator_output = self.reduced_problem.estimate_error_output( ) elapsed_error_estimator_output = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_relative_error_output( **kwargs) elapsed_relative_error_output = truth_timer.stop() reduced_timer.start() relative_error_estimator_output = self.reduced_problem.estimate_relative_error_output( ) elapsed_relative_error_estimator_output = reduced_timer.stop( ) if solution is not NotImplemented: speedup_analysis_table[ "speedup_solve", n_int, mu_index] = elapsed_truth_solve / elapsed_reduced_solve else: speedup_analysis_table["speedup_solve", n_int, mu_index] = NotImplemented if error_estimator is not NotImplemented: speedup_analysis_table[ "speedup_solve_and_estimate_error", n_int, mu_index] = (elapsed_truth_solve + elapsed_error ) / (elapsed_reduced_solve + elapsed_error_estimator) else: speedup_analysis_table[ "speedup_solve_and_estimate_error", n_int, mu_index] = NotImplemented if relative_error_estimator is not NotImplemented: speedup_analysis_table[ "speedup_solve_and_estimate_relative_error", n_int, mu_index] = (elapsed_truth_solve + elapsed_relative_error) / ( elapsed_reduced_solve + elapsed_relative_error_estimator) else: speedup_analysis_table[ "speedup_solve_and_estimate_relative_error", n_int, mu_index] = NotImplemented if output is not NotImplemented: speedup_analysis_table[ "speedup_output", n_int, mu_index] = (elapsed_truth_solve + elapsed_truth_output) / ( elapsed_reduced_solve + elapsed_reduced_output) else: speedup_analysis_table["speedup_output", n_int, mu_index] = NotImplemented if error_estimator_output is not NotImplemented: assert output is not NotImplemented speedup_analysis_table[ "speedup_output_and_estimate_error_output", n_int, mu_index] = (elapsed_truth_solve + elapsed_truth_output + elapsed_error_output) / ( elapsed_reduced_solve + elapsed_reduced_output + elapsed_error_estimator_output) else: speedup_analysis_table[ "speedup_output_and_estimate_error_output", n_int, mu_index] = NotImplemented if relative_error_estimator_output is not NotImplemented: assert output is not NotImplemented speedup_analysis_table[ "speedup_output_and_estimate_relative_error_output", n_int, mu_index] = ( elapsed_truth_solve + elapsed_truth_output + elapsed_relative_error_output) / ( elapsed_reduced_solve + elapsed_reduced_output + elapsed_relative_error_estimator_output) else: speedup_analysis_table[ "speedup_output_and_estimate_relative_error_output", n_int, mu_index] = NotImplemented # Print print("") print(speedup_analysis_table) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " speedup analysis ends", fill="=")) print("") # Export speedup analysis table speedup_analysis_table.save( self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename)
class RBReduction_Class(DifferentialProblemReductionMethod_DerivedClass): """ The folders used to store the snapshots and for the post processing data, the parameters for the greedy algorithm and the error estimator evaluations are initialized. :param truth_problem: class of the truth problem to be solved. :return: reduced RB class. """ def __init__(self, truth_problem, **kwargs): # Call the parent initialization DifferentialProblemReductionMethod_DerivedClass.__init__( self, truth_problem, **kwargs) # Declare a GS object self.GS = None # GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem with several components) # I/O self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots") self.folder["post_processing"] = os.path.join( self.folder_prefix, "post_processing") self.greedy_selected_parameters = GreedySelectedParametersList() self.greedy_error_estimators = GreedyErrorEstimatorsList() self.label = "RB" def _init_offline(self): # Call parent to initialize inner product and reduced problem output = DifferentialProblemReductionMethod_DerivedClass._init_offline( self) # Declare a new GS for each basis component if len(self.truth_problem.components) > 1: self.GS = dict() for component in self.truth_problem.components: assert len( self.truth_problem.inner_product[component]) == 1 inner_product = self.truth_problem.inner_product[ component][0] self.GS[component] = GramSchmidt(inner_product) else: assert len(self.truth_problem.inner_product) == 1 inner_product = self.truth_problem.inner_product[0] self.GS = GramSchmidt(inner_product) # The current value of mu may have been already used when computing lifting functions. # If so, we do not want to use that value again at the first greedy iteration, because # for steady linear problems with only one paremtrized BC the resulting first snapshot # would have been already stored in the basis, being exactly equal to the lifting. # To this end, we arbitrarily change the current value of mu to the first parameter # in the training set. if output: # do not bother changing current mu if offline stage has been already completed need_to_change_mu = False if len(self.truth_problem.components) > 1: for component in self.truth_problem.components: if self.reduced_problem.dirichlet_bc[ component] and not self.reduced_problem.dirichlet_bc_are_homogeneous[ component]: need_to_change_mu = True break else: if self.reduced_problem.dirichlet_bc and not self.reduced_problem.dirichlet_bc_are_homogeneous: need_to_change_mu = True if (need_to_change_mu and len( self.truth_problem.mu ) > 0 # there is not much we can change in the trivial case without any parameter! ): new_mu = self.training_set[0] assert self.truth_problem.mu != new_mu self.truth_problem.set_mu(new_mu) # Return return output def offline(self): """ It performs the offline phase of the reduced order model. :return: reduced_problem where all offline data are stored. """ need_to_do_offline_stage = self._init_offline() if need_to_do_offline_stage: self._offline() self._finalize_offline() return self.reduced_problem def _offline(self): print( TextBox(self.truth_problem.name() + " " + self.label + " offline phase begins", fill="=")) print("") iteration = 0 relative_error_estimator_max = 2. * self.tol while self.reduced_problem.N < self.Nmax and relative_error_estimator_max >= self.tol: print(TextLine("N = " + str(self.reduced_problem.N), fill="#")) print("truth solve for mu =", self.truth_problem.mu) snapshot = self.truth_problem.solve() self.truth_problem.export_solution(self.folder["snapshots"], "truth_" + str(iteration), snapshot) snapshot = self.postprocess_snapshot(snapshot, iteration) print("update basis matrix") self.update_basis_matrix(snapshot) iteration += 1 print("build reduced operators") self.reduced_problem.build_reduced_operators() print("reduced order solve") self.reduced_problem.solve() print("build operators for error estimation") self.reduced_problem.build_error_estimation_operators() (absolute_error_estimator_max, relative_error_estimator_max) = self.greedy() print("maximum absolute error estimator over training set =", absolute_error_estimator_max) print("maximum relative error estimator over training set =", relative_error_estimator_max) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " offline phase ends", fill="=")) print("") def update_basis_matrix(self, snapshot): """ It updates basis matrix. :param snapshot: last offline solution calculated. """ if len(self.truth_problem.components) > 1: for component in self.truth_problem.components: self.reduced_problem.basis_functions.enrich( snapshot, component=component) self.GS[component].apply( self.reduced_problem.basis_functions[component], self.reduced_problem.N_bc[component]) self.reduced_problem.N[component] += 1 self.reduced_problem.basis_functions.save( self.reduced_problem.folder["basis"], "basis") else: self.reduced_problem.basis_functions.enrich(snapshot) self.GS.apply(self.reduced_problem.basis_functions, self.reduced_problem.N_bc) self.reduced_problem.N += 1 self.reduced_problem.basis_functions.save( self.reduced_problem.folder["basis"], "basis") def greedy(self): """ It chooses the next parameter in the offline stage in a greedy fashion: wrapper with post processing of the result (in particular, set greedily selected parameter and save to file) :return: max error estimator and the comparison with the first one calculated. """ (error_estimator_max, error_estimator_argmax) = self._greedy() self.truth_problem.set_mu( self.training_set[error_estimator_argmax]) self.greedy_selected_parameters.append( self.training_set[error_estimator_argmax]) self.greedy_selected_parameters.save( self.folder["post_processing"], "mu_greedy") self.greedy_error_estimators.append(error_estimator_max) self.greedy_error_estimators.save(self.folder["post_processing"], "error_estimator_max") return (error_estimator_max, error_estimator_max / self.greedy_error_estimators[0]) def _greedy(self): """ It chooses the next parameter in the offline stage in a greedy fashion. Internal method. :return: max error estimator and the respective parameter. """ # Print some additional information on the consistency of the reduced basis print("absolute error for current mu =", self.reduced_problem.compute_error()) print("absolute error estimator for current mu =", self.reduced_problem.estimate_error()) # Carry out the actual greedy search def solve_and_estimate_error(mu): self.reduced_problem.set_mu(mu) self.reduced_problem.solve() error_estimator = self.reduced_problem.estimate_error() log( DEBUG, "Error estimator for mu = " + str(mu) + " is " + str(error_estimator)) return error_estimator print("find next mu") return self.training_set.max(solve_and_estimate_error) def error_analysis(self, N_generator=None, filename=None, **kwargs): """ It computes the error of the reduced order approximation with respect to the full order one over the testing set. :param N: dimension of reduced problem. """ self._init_error_analysis(**kwargs) self._error_analysis(N_generator, filename, **kwargs) self._finalize_error_analysis(**kwargs) def _error_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n if "components" in kwargs: components = kwargs["components"] else: components = self.truth_problem.components N = self.reduced_problem.N if isinstance(N, dict): N = min(N.values()) print( TextBox(self.truth_problem.name() + " " + self.label + " error analysis begins", fill="=")) print("") error_analysis_table = ErrorAnalysisTable(self.testing_set) error_analysis_table.set_Nmax(N) if len(components) > 1: all_components_string = "".join(components) for component in components: error_analysis_table.add_column("error_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "error_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("mean", "max")) error_analysis_table.add_column( "error_estimator_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("mean", "max")) error_analysis_table.add_column( "effectivity_" + all_components_string, group_name="solution_" + all_components_string + "_error", operations=("min", "mean", "max")) error_analysis_table.add_column( "relative_error_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_estimator_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_effectivity_" + all_components_string, group_name="solution_" + all_components_string + "_relative_error", operations=("min", "mean", "max")) else: component = components[0] error_analysis_table.add_column("error_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column("error_estimator_" + component, group_name="solution_" + component + "_error", operations=("mean", "max")) error_analysis_table.add_column( "effectivity_" + component, group_name="solution_" + component + "_error", operations=("min", "mean", "max")) error_analysis_table.add_column("relative_error_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_error_estimator_" + component, group_name="solution_" + component + "_relative_error", operations=("mean", "max")) error_analysis_table.add_column( "relative_effectivity_" + component, group_name="solution_" + component + "_relative_error", operations=("min", "mean", "max")) error_analysis_table.add_column("error_output", group_name="output_error", operations=("mean", "max")) error_analysis_table.add_column("error_estimator_output", group_name="output_error", operations=("mean", "max")) error_analysis_table.add_column("effectivity_output", group_name="output_error", operations=("min", "mean", "max")) error_analysis_table.add_column("relative_error_output", group_name="output_relative_error", operations=("mean", "max")) error_analysis_table.add_column("relative_error_estimator_output", group_name="output_relative_error", operations=("mean", "max")) error_analysis_table.add_column("relative_effectivity_output", group_name="output_relative_error", operations=("min", "mean", "max")) for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(str(mu_index), fill="#")) self.reduced_problem.set_mu(mu) for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: self.reduced_problem.solve(n_arg, **kwargs) error = self.reduced_problem.compute_error(**kwargs) if len(components) > 1: error[all_components_string] = sqrt( sum([ error[component]**2 for component in components ])) error_estimator = self.reduced_problem.estimate_error() relative_error = self.reduced_problem.compute_relative_error( **kwargs) if len(components) > 1: relative_error[all_components_string] = sqrt( sum([ relative_error[component]**2 for component in components ])) relative_error_estimator = self.reduced_problem.estimate_relative_error( ) self.reduced_problem.compute_output() error_output = self.reduced_problem.compute_error_output( **kwargs) error_output_estimator = self.reduced_problem.estimate_error_output( ) relative_error_output = self.reduced_problem.compute_relative_error_output( **kwargs) relative_error_output_estimator = self.reduced_problem.estimate_relative_error_output( ) else: if len(components) > 1: error = { component: NotImplemented for component in components } error[all_components_string] = NotImplemented else: error = NotImplemented error_estimator = NotImplemented if len(components) > 1: relative_error = { component: NotImplemented for component in components } relative_error[ all_components_string] = NotImplemented else: relative_error = NotImplemented relative_error_estimator = NotImplemented error_output = NotImplemented error_output_estimator = NotImplemented relative_error_output = NotImplemented relative_error_output_estimator = NotImplemented if len(components) > 1: for component in components: error_analysis_table["error_" + component, n, mu_index] = error[component] error_analysis_table[ "relative_error_" + component, n, mu_index] = relative_error[component] error_analysis_table[ "error_" + all_components_string, n, mu_index] = error[all_components_string] error_analysis_table["error_estimator_" + all_components_string, n, mu_index] = error_estimator error_analysis_table[ "effectivity_" + all_components_string, n, mu_index] = error_analysis_table[ "error_estimator_" + all_components_string, n, mu_index] / error_analysis_table[ "error_" + all_components_string, n, mu_index] error_analysis_table[ "relative_error_" + all_components_string, n, mu_index] = relative_error[all_components_string] error_analysis_table[ "relative_error_estimator_" + all_components_string, n, mu_index] = relative_error_estimator error_analysis_table[ "relative_effectivity_" + all_components_string, n, mu_index] = error_analysis_table[ "relative_error_estimator_" + all_components_string, n, mu_index] / error_analysis_table[ "relative_error_" + all_components_string, n, mu_index] else: component = components[0] error_analysis_table["error_" + component, n, mu_index] = error error_analysis_table["error_estimator_" + component, n, mu_index] = error_estimator error_analysis_table[ "effectivity_" + component, n, mu_index] = error_analysis_table[ "error_estimator_" + component, n, mu_index] / error_analysis_table["error_" + component, n, mu_index] error_analysis_table["relative_error_" + component, n, mu_index] = relative_error error_analysis_table[ "relative_error_estimator_" + component, n, mu_index] = relative_error_estimator error_analysis_table[ "relative_effectivity_" + component, n, mu_index] = error_analysis_table[ "relative_error_estimator_" + component, n, mu_index] / error_analysis_table[ "relative_error_" + component, n, mu_index] error_analysis_table["error_output", n, mu_index] = error_output error_analysis_table["error_estimator_output", n, mu_index] = error_output_estimator error_analysis_table["effectivity_output", n, mu_index] = error_analysis_table[ "error_estimator_output", n, mu_index] / error_analysis_table[ "error_output", n, mu_index] error_analysis_table["relative_error_output", n, mu_index] = relative_error_output error_analysis_table[ "relative_error_estimator_output", n, mu_index] = relative_error_output_estimator error_analysis_table[ "relative_effectivity_output", n, mu_index] = error_analysis_table[ "relative_error_estimator_output", n, mu_index] / error_analysis_table[ "relative_error_output", n, mu_index] # Print print("") print(error_analysis_table) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " error analysis ends", fill="=")) print("") # Export error analysis table error_analysis_table.save( self.folder["error_analysis"], "error_analysis" if filename is None else filename) def speedup_analysis(self, N_generator=None, filename=None, **kwargs): """ It computes the speedup of the reduced order approximation with respect to the full order one over the testing set. :param N: dimension of the reduced problem. """ self._init_speedup_analysis(**kwargs) self._speedup_analysis(N_generator, filename, **kwargs) self._finalize_speedup_analysis(**kwargs) def _speedup_analysis(self, N_generator=None, filename=None, **kwargs): if N_generator is None: def N_generator(n): return n N = self.reduced_problem.N if isinstance(N, dict): N = min(N.values()) print( TextBox(self.truth_problem.name() + " " + self.label + " speedup analysis begins", fill="=")) print("") speedup_analysis_table = SpeedupAnalysisTable(self.testing_set) speedup_analysis_table.set_Nmax(N) speedup_analysis_table.add_column("speedup_solve", group_name="speedup_solve", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_solve_and_estimate_error", group_name="speedup_solve_and_estimate_error", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_solve_and_estimate_relative_error", group_name="speedup_solve_and_estimate_relative_error", operations=("min", "mean", "max")) speedup_analysis_table.add_column("speedup_output", group_name="speedup_output", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_output_and_estimate_error_output", group_name="speedup_output_and_estimate_error_output", operations=("min", "mean", "max")) speedup_analysis_table.add_column( "speedup_output_and_estimate_relative_error_output", group_name="speedup_output_and_estimate_relative_error_output", operations=("min", "mean", "max")) truth_timer = Timer("parallel") reduced_timer = Timer("serial") for (mu_index, mu) in enumerate(self.testing_set): print(TextLine(str(mu_index), fill="#")) self.reduced_problem.set_mu(mu) truth_timer.start() self.truth_problem.solve(**kwargs) elapsed_truth_solve = truth_timer.stop() truth_timer.start() self.truth_problem.compute_output() elapsed_truth_output = truth_timer.stop() for n in range(1, N + 1): # n = 1, ... N n_arg = N_generator(n) if n_arg is not None: reduced_timer.start() solution = self.reduced_problem.solve(n_arg, **kwargs) elapsed_reduced_solve = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_error(**kwargs) elapsed_error = truth_timer.stop() reduced_timer.start() error_estimator = self.reduced_problem.estimate_error() elapsed_error_estimator = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_relative_error(**kwargs) elapsed_relative_error = truth_timer.stop() reduced_timer.start() relative_error_estimator = self.reduced_problem.estimate_relative_error( ) elapsed_relative_error_estimator = reduced_timer.stop() reduced_timer.start() output = self.reduced_problem.compute_output() elapsed_reduced_output = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_error_output(**kwargs) elapsed_error_output = truth_timer.stop() reduced_timer.start() error_estimator_output = self.reduced_problem.estimate_error_output( ) elapsed_error_estimator_output = reduced_timer.stop() truth_timer.start() self.reduced_problem.compute_relative_error_output( **kwargs) elapsed_relative_error_output = truth_timer.stop() reduced_timer.start() relative_error_estimator_output = self.reduced_problem.estimate_relative_error_output( ) elapsed_relative_error_estimator_output = reduced_timer.stop( ) else: solution = NotImplemented error_estimator = NotImplemented relative_error_estimator = NotImplemented output = NotImplemented error_estimator_output = NotImplemented relative_error_estimator_output = NotImplemented if solution is not NotImplemented: speedup_analysis_table[ "speedup_solve", n, mu_index] = elapsed_truth_solve / elapsed_reduced_solve else: speedup_analysis_table["speedup_solve", n, mu_index] = NotImplemented if error_estimator is not NotImplemented: speedup_analysis_table[ "speedup_solve_and_estimate_error", n, mu_index] = (elapsed_truth_solve + elapsed_error ) / (elapsed_reduced_solve + elapsed_error_estimator) else: speedup_analysis_table[ "speedup_solve_and_estimate_error", n, mu_index] = NotImplemented if relative_error_estimator is not NotImplemented: speedup_analysis_table[ "speedup_solve_and_estimate_relative_error", n, mu_index] = (elapsed_truth_solve + elapsed_relative_error) / ( elapsed_reduced_solve + elapsed_relative_error_estimator) else: speedup_analysis_table[ "speedup_solve_and_estimate_relative_error", n, mu_index] = NotImplemented if output is not NotImplemented: speedup_analysis_table[ "speedup_output", n, mu_index] = (elapsed_truth_solve + elapsed_truth_output) / ( elapsed_reduced_solve + elapsed_reduced_output) else: speedup_analysis_table["speedup_output", n, mu_index] = NotImplemented if error_estimator_output is not NotImplemented: assert output is not NotImplemented speedup_analysis_table[ "speedup_output_and_estimate_error_output", n, mu_index] = (elapsed_truth_solve + elapsed_truth_output + elapsed_error_output) / ( elapsed_reduced_solve + elapsed_reduced_output + elapsed_error_estimator_output) else: speedup_analysis_table[ "speedup_output_and_estimate_error_output", n, mu_index] = NotImplemented if relative_error_estimator_output is not NotImplemented: assert output is not NotImplemented speedup_analysis_table[ "speedup_output_and_estimate_relative_error_output", n, mu_index] = ( elapsed_truth_solve + elapsed_truth_output + elapsed_relative_error_output) / ( elapsed_reduced_solve + elapsed_reduced_output + elapsed_relative_error_estimator_output) else: speedup_analysis_table[ "speedup_output_and_estimate_relative_error_output", n, mu_index] = NotImplemented # Print print("") print(speedup_analysis_table) print("") print( TextBox(self.truth_problem.name() + " " + self.label + " speedup analysis ends", fill="=")) print("") # Export speedup analysis table speedup_analysis_table.save( self.folder["speedup_analysis"], "speedup_analysis" if filename is None else filename)