def _init_lhs(self, lhs: Matrix.Type(), bcs: (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))): # Create a copy of lhs, in order not to change # the original references when applying bcs self.lhs = lhs.copy()
def test_supercedes_2(): class A(object): pass class B(A): pass class C(object): pass assert supercedes((B, ), (A, )) assert supercedes((B, A), (A, A)) assert not supercedes((B, A), (A, B)) assert not supercedes((A, ), (B, )) assert supercedes((B, ), ((A, C), )) assert supercedes((C, ), ((A, C), )) assert not supercedes((A, ), ((B, C), )) assert not supercedes(((A, B), ), (B, )) assert supercedes(((A, B), ), (A, )) assert supercedes((A, ), ((A, B), )) assert supercedes((B, ), ((A, B), )) assert supercedes((iterable_of(B), ), (iterable_of(A), )) assert supercedes((iterable_of((B, A)), ), (iterable_of(A), )) assert supercedes((iterable_of((B, A)), ), (iterable_of((A, A)), )) assert supercedes((iterable_of((B, A)), ), (iterable_of((A, B)), )) assert supercedes((iterable_of((B, B)), ), (iterable_of((B)), )) assert supercedes((iterable_of((B, B)), ), (iterable_of((A, B)), )) assert supercedes((iterable_of(B), B), (iterable_of(A), A)) assert not supercedes((iterable_of(A), ), (iterable_of(B), )) assert supercedes((list_of(B), ), (list_of(A), )) assert supercedes((list_of((B, A)), ), (list_of(A), )) assert supercedes((list_of((B, A)), ), (list_of((A, A)), )) assert supercedes((list_of((B, A)), ), (list_of((A, B)), )) assert supercedes((list_of((B, B)), ), (list_of((B)), )) assert supercedes((list_of((B, B)), ), (list_of((A, B)), )) assert supercedes((list_of(B), B), (list_of(A), A)) assert not supercedes((list_of(A), ), (list_of(B), )) assert supercedes((list_of(B), ), (iterable_of(A), )) assert not supercedes((iterable_of(B), ), (list_of(A), )) assert supercedes((list_of((B, A)), ), (iterable_of(A), )) assert not supercedes((iterable_of((B, A)), ), (list_of(A), )) assert supercedes((list_of((B, A)), ), (iterable_of((A, A)), )) assert not supercedes((iterable_of((B, A)), ), (list_of((A, A)), )) assert supercedes((list_of((B, A)), ), (iterable_of((A, B)), )) assert not supercedes((iterable_of((B, A)), ), (list_of((A, B)), )) assert supercedes((list_of((B, B)), ), (iterable_of((B)), )) assert not supercedes((iterable_of((B, B)), ), (list_of((B)), )) assert supercedes((list_of((B, B)), ), (iterable_of((A, B)), )) assert not supercedes((iterable_of((B, B)), ), (list_of((A, B)), )) assert supercedes((list_of(B), B), (iterable_of(A), A)) assert not supercedes((iterable_of(B), B), (list_of(A), A)) assert not supercedes((list_of(A), ), (iterable_of(B), )) assert not supercedes((iterable_of(A), ), (list_of(B), )) assert supercedes((dict_of(B, C), ), (dict_of(A, C), )) assert supercedes((dict_of(B, C), ), (dict_of((A, C), C), )) assert supercedes((dict_of(C, C), ), (dict_of((A, C), C), )) assert not supercedes((dict_of((B, C), C), ), (dict_of(A, C), )) assert supercedes((dict_of(list_of(B), C), ), (dict_of(list_of(A), C), ))
def _init_basis_functions(self, current_stage="online"): # Initialize basis functions as in Parent class ExactParametrizedFunctionsDecoratedReducedProblem_DerivedClass._init_basis_functions( self, current_stage) # Patch BasisFunctionsMatrix._update_component_name_to_basis_component_length so that it also updates # the map from each basis function to component and index after BasisFunctionsMatrix.enrich() # has been called. if not hasattr( self.basis_functions, "_update_component_name_to_basis_component_length_patched" ): @overload(AbstractBasisFunctionsMatrix, None) def patched_update_component_name_to_basis_component_length( self_, component): assert len(self_._components) == 1 assert len(self_._components_name) == 1 component_0 = self_._components_name[0] _add_new_basis_functions_to_map_from_basis_function_to_component_and_index( self_, component_0) @overload(AbstractBasisFunctionsMatrix, str) def patched_update_component_name_to_basis_component_length( self_, component): _add_new_basis_functions_to_map_from_basis_function_to_component_and_index( self_, component) @overload(AbstractBasisFunctionsMatrix, dict_of(str, str)) def patched_update_component_name_to_basis_component_length( self_, component): assert len(component) == 1 for (_, component_to) in component.items(): break assert component_to in self_._components _add_new_basis_functions_to_map_from_basis_function_to_component_and_index( self_, component_to) def _add_new_basis_functions_to_map_from_basis_function_to_component_and_index( self_, component): old_component_length = self_._component_name_to_basis_component_length[ component] self_._component_name_to_basis_component_length[ component] = len(self_._components[component]) new_component_length = self_._component_name_to_basis_component_length[ component] for index in range(old_component_length, new_component_length): add_to_map_from_basis_function_to_component_and_index( self_._components[component][index], component, index) # Apply patch PatchInstanceMethod( self.basis_functions, "_update_component_name_to_basis_component_length", patched_update_component_name_to_basis_component_length ).patch() self.basis_functions._update_component_name_to_basis_component_length_patched = True
class _SnapshotsMatrix(FunctionsList): @overload(FunctionsList, (None, str, dict_of(str, str)), (None, list_of(Number)), bool) def _enrich(self, functions, component, weights, copy): if weights is not None: assert len(weights) == len(functions) for (index, function) in enumerate(functions): self._add_to_list(function, component, weights[index], copy) else: for function in functions: self._add_to_list(function, component, None, copy)
def test_competing_solutions_for_dict_of_keys(): class A(object): pass class C(A): pass class D(C): pass @dispatch(dict_of(A, int)) def h(x): return 1 @dispatch(dict_of(C, int)) def h(x): return 2 assert h({A(): 1}) == 1 assert h({C(): 2}) == 2 assert h({D(): 3}) == 2 assert raises(UnavailableSignatureError, lambda: h({A(): 4.}))
def test_competing_solutions_for_dict_of_values(): class A(object): pass class C(A): pass class D(C): pass @dispatch(dict_of(int, A)) def h(x): return 1 @dispatch(dict_of(int, C)) def h(x): return 2 assert h({1: A()}) == 1 assert h({2: C()}) == 2 assert h({3: D()}) == 2 assert raises(UnavailableSignatureError, lambda: h({4.: A()}))
def test_inheritance_for_dict_of_keys(): class A(object): pass class B(object): pass class C(A): pass @dispatch(dict_of(A, int)) def f(x): return 'a' @dispatch(dict_of(B, int)) def f(x): return 'b' assert f({A(): 1}) == 'a' assert f({B(): 2}) == 'b' assert f({C(): 3}) == 'a' assert raises(UnavailableSignatureError, lambda: f({B(): 4.}))
def test_inheritance_for_dict_of_values(): class A(object): pass class B(object): pass class C(A): pass @dispatch(dict_of(int, A)) def f(x): return 'a' @dispatch(dict_of(int, B)) def f(x): return 'b' assert f({1: A()}) == 'a' assert f({2: B()}) == 'b' assert f({3: C()}) == 'a' assert raises(UnavailableSignatureError, lambda: f({4.: B()}))
def test_inheritance_for_dict_of_keys_tuple_of(): class A(object): pass class B(object): pass class C(A): pass @dispatch(dict_of(tuple_of(A), int)) def f(x): return 'a' @dispatch(dict_of(tuple_of(B), int)) def f(x): return 'b' assert f({(A(), A()): 1}) == 'a' assert f({(B(), B()): 2}) == 'b' assert f({(C(), C()): 3}) == 'a' assert f({(C(), A()): 4}) == 'a' assert raises(UnavailableSignatureError, lambda: f({(B(), B()): 5.}))
def test_competing_solutions_for_dict_of_keys_and_values(): class A(object): pass class C(A): pass class D(C): pass @dispatch(dict_of(A, A)) def h(x): return 1 @dispatch(dict_of(C, C)) def h(x): return 2 assert h({A(): A()}) == 1 assert h({C(): C()}) == 2 assert h({D(): D()}) == 2 assert h({A(): D()}) == 1 assert h({D(): A()}) == 1 assert raises(UnavailableSignatureError, lambda: h({A(): object()}))
class FunctionsList(FunctionsList_Base): def __init__(self, V, component=None): FunctionsList_Base.__init__(self, V, component) @overload(Operator, (None, str, dict_of(str, str)), (None, list_of(Number)), bool) def _enrich(self, function, component, weight, copy): function = function_from_ufl_operators(function) FunctionsList_Base._enrich(self, function, component, weight, copy) @overload(int, Operator) def __setitem__(self, key, item): item = function_from_ufl_operators(item) FunctionsList_Base.__setitem__(self, key, item)
def test_inheritance_for_dict_of_keys_values(): class A(object): pass class B(object): pass class C(A): pass @dispatch(dict_of(A, A)) def f(x): return "a" @dispatch(dict_of(B, B)) def f(x): return "b" assert f({A(): A()}) == "a" assert f({B(): B()}) == "b" assert f({C(): C()}) == "a" assert f({A(): C()}) == "a" assert f({C(): A()}) == "a" assert raises(UnavailableSignatureError, lambda: f({A(): B()}))
def test_consistent_2(): class A(object): pass class B(A): pass class C(object): pass assert consistent((A, ), (A, )) assert consistent((B, ), (B, )) assert not consistent((A, ), (C, )) assert consistent((A, B), (A, B)) assert consistent((B, A), (A, B)) assert not consistent((B, A), (B, )) assert not consistent((B, A), (B, C)) assert consistent((iterable_of(A), ), (iterable_of(A), )) assert consistent((iterable_of(B), ), (iterable_of(B), )) assert not consistent((iterable_of(A), ), (iterable_of(C), )) assert consistent((iterable_of((A, B)), ), (iterable_of((A, B)), )) assert consistent((iterable_of((B, A)), ), (iterable_of((A, B)), )) assert consistent((iterable_of((B, A)), ), (iterable_of(B), )) assert consistent((iterable_of((B, A)), ), (iterable_of(A), )) assert not consistent((iterable_of((B, A)), ), (iterable_of((B, C)), )) assert consistent((list_of(A), ), (list_of(A), )) assert consistent((list_of(B), ), (list_of(B), )) assert not consistent((list_of(A), ), (list_of(C), )) assert consistent((list_of((A, B)), ), (list_of((A, B)), )) assert consistent((list_of((B, A)), ), (list_of((A, B)), )) assert consistent((list_of((B, A)), ), (list_of(B), )) assert consistent((list_of((B, A)), ), (list_of(A), )) assert not consistent((list_of((B, A)), ), (list_of((B, C)), )) assert not consistent((list_of(A), ), (iterable_of(A), )) assert not consistent((iterable_of(A), ), (list_of(A), )) assert not consistent((list_of(B), ), (iterable_of(B), )) assert not consistent((iterable_of(B), ), (list_of(B), )) assert not consistent((list_of(A), ), (iterable_of(C), )) assert not consistent((iterable_of(A), ), (list_of(C), )) assert not consistent((list_of((A, B)), ), (iterable_of((A, B)), )) assert not consistent((iterable_of((A, B)), ), (list_of((A, B)), )) assert not consistent((list_of((B, A)), ), (iterable_of((A, B)), )) assert not consistent((iterable_of((B, A)), ), (list_of((A, B)), )) assert not consistent((list_of((B, A)), ), (iterable_of(B), )) assert not consistent((iterable_of((B, A)), ), (list_of(B), )) assert not consistent((list_of((B, A)), ), (iterable_of(A), )) assert not consistent((iterable_of((B, A)), ), (list_of(A), )) assert not consistent((list_of((B, A)), ), (iterable_of((B, C)), )) assert not consistent((iterable_of((B, A)), ), (list_of((B, C)), )) assert consistent((dict_of(A, C), ), (dict_of(A, C), )) assert consistent((dict_of(B, C), ), (dict_of(B, C), )) assert not consistent((dict_of(A, C), ), (dict_of(C, C), ))
def _diff_content(reference_items: dict_of(object, object), current_items: dict_of(object, object), tab: str): if len(reference_items) != len(current_items): return [ tab + "@@ different lengths @@" + "\n" + tab + "- " + str(len(reference_items)) + "\n" + tab + "+ " + str(len(current_items)) + "\n" ] elif reference_items.keys() != current_items.keys(): return [ tab + "@@ different keys @@" + "\n" + tab + "- " + str(reference_items.keys()) + "\n" + tab + "+ " + str(current_items.keys()) + "\n" ] else: diff_items = list() for item_key in reference_items: diff_item = _diff_content(reference_items[item_key], current_items[item_key], tab + "\t") if len(diff_item) > 0: for d in diff_item: diff_items.append(tab + "@@ " + str(item_key) + " @@" + "\n" + d) return diff_items
class _GramSchmidt(AbstractGramSchmidt): def __init__(self, space, inner_product, component=None): if component is None: self.space = space else: self.space = wrapping.get_function_subspace(space, component) self.inner_product = inner_product def apply(self, new_basis_function, basis_functions, component=None): inner_product = self.inner_product transpose = backend.transpose new_basis_function = self._extend_or_restrict_if_needed( new_basis_function, component) for b in basis_functions: new_basis_function = wrapping.gram_schmidt_projection_step( new_basis_function, inner_product, b, transpose) norm_new_basis_function = sqrt( transpose(new_basis_function) * inner_product * new_basis_function) if norm_new_basis_function != 0.: new_basis_function /= norm_new_basis_function return new_basis_function @overload(backend.Function.Type(), (None, str)) def _extend_or_restrict_if_needed(self, function, component): return wrapping.function_extend_or_restrict(function, component, self.space, component, weight=None, copy=True) @overload(backend.Function.Type(), dict_of(str, str)) def _extend_or_restrict_if_needed(self, function, component): assert len(component) == 1 for (component_from, component_to) in component.items(): break return wrapping.function_extend_or_restrict(function, component_from, self.space, component_to, weight=None, copy=True)
def _init_rhs(self, rhs: Form, bcs: (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)): self.rhs = assemble(rhs)
class GramSchmidt(GramSchmidt_Base): @overload(Operator, (None, str, dict_of(str, str))) def _extend_or_restrict_if_needed(self, function, component): function = function_from_ufl_operators(function) return GramSchmidt_Base._extend_or_restrict_if_needed( self, function, component)
def _jacobian_bcs_apply(self, bcs: (dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))): for key in bcs: for bc in bcs[key]: bc.apply(self.jacobian_matrix)
def _residual_bcs_apply(self, bcs: (dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))): for key in bcs: for bc in bcs[key]: bc.apply(self.residual_vector, self.solution.vector())
def _init_lhs(self, lhs: Form, bcs: (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)): self.lhs = assemble(lhs, keep_diagonal=True)
class _BasisFunctionsMatrix(AbstractBasisFunctionsMatrix): def __init__(self, space, component=None): if component is not None: self.space = wrapping.get_function_subspace(space, component) else: self.space = space self.mpi_comm = wrapping.get_mpi_comm(space) self._components = dict() # of FunctionsList self._precomputed_sub_components = Cache( ) # from tuple to FunctionsList self._precomputed_slices = Cache() # from tuple to FunctionsList self._components_name = list() # filled in by init self._component_name_to_basis_component_index = ComponentNameToBasisComponentIndexDict( ) # filled in by init self._component_name_to_basis_component_length = OnlineSizeDict() def init(self, components_name): if self._components_name != components_name: # Do nothing if it was already initialized with the same dicts # Store components name self._components_name = components_name # Initialize components FunctionsList self._components.clear() for component_name in components_name: self._components[component_name] = backend.FunctionsList( self.space) # Prepare len components self._component_name_to_basis_component_length.clear() for component_name in components_name: self._component_name_to_basis_component_length[ component_name] = 0 # Intialize the component_name_to_basis_component_index dict self._component_name_to_basis_component_index.clear() for (basis_component_index, component_name) in enumerate(components_name): self._component_name_to_basis_component_index[ component_name] = basis_component_index # Reset precomputed sub components self._precomputed_sub_components.clear() # Reset precomputed slices self._precomputed_slices.clear() # Patch FunctionsList.enrich() to update internal attributes def patch_functions_list_enrich(component_name, functions_list): original_functions_list_enrich = functions_list.enrich def patched_functions_list_enrich(self_, functions, component=None, weights=None, copy=True): # Append to storage original_functions_list_enrich(functions, component, weights, copy) # Update component name to basis component length if component is not None: if isinstance(component, dict): assert len(component) == 1 for (_, component_to) in component.items(): break assert component_name == component_to else: assert component_name == component self._update_component_name_to_basis_component_length( component_name) # Reset precomputed sub components self._precomputed_sub_components.clear() # Prepare trivial precomputed sub components self._prepare_trivial_precomputed_sub_components() # Reset precomputed slices self._precomputed_slices.clear() # Prepare trivial precomputed slice self._prepare_trivial_precomputed_slice() functions_list.enrich_patch = PatchInstanceMethod( functions_list, "enrich", patched_functions_list_enrich) functions_list.enrich_patch.patch() for component_name in components_name: patch_functions_list_enrich( component_name, self._components[component_name]) def enrich(self, functions, component=None, weights=None, copy=True): assert copy is True # Append to storage self._enrich(functions, component, weights, copy) @overload( object, None, (None, list_of(Number)), bool ) # the first argument is object in order to handle FunctionsList's AdditionalFunctionType def _enrich(self, functions, component, weights, copy): assert len(self._components) == 1 assert len(self._components_name) == 1 component_0 = self._components_name[0] self._components[component_0].enrich(functions, None, weights, copy) @overload( object, str, (None, list_of(Number)), bool ) # the first argument is object in order to handle FunctionsList's AdditionalFunctionType def _enrich(self, functions, component, weights, copy): assert component in self._components self._components[component].enrich(functions, component, weights, copy) @overload( object, dict_of(str, str), (None, list_of(Number)), bool ) # the first argument is object in order to handle FunctionsList's AdditionalFunctionType def _enrich(self, functions, component, weights, copy): assert len(component) == 1 for (_, component_to) in component.items(): break assert component_to in self._components self._components[component_to].enrich(functions, component, weights) @overload(None) def _update_component_name_to_basis_component_length(self, component): assert len(self._components) == 1 assert len(self._components_name) == 1 component_0 = self._components_name[0] self._component_name_to_basis_component_length[component_0] = len( self._components[component_0]) @overload(str) def _update_component_name_to_basis_component_length(self, component): self._component_name_to_basis_component_length[component] = len( self._components[component]) @overload(dict_of(str, str)) def _update_component_name_to_basis_component_length(self, component): assert len(component) == 1 for (_, component_to) in component.items(): break assert component_to in self._components self._component_name_to_basis_component_length[component_to] = len( self._components[component_to]) def _prepare_trivial_precomputed_sub_components(self): self._precomputed_sub_components[tuple( self._components_name)] = self def _prepare_trivial_precomputed_slice(self): if len(self._components) == 1: assert len(self._components_name) == 1 component_0 = self._components_name[0] precomputed_slice_key_start = 0 precomputed_slice_key_stop = self._component_name_to_basis_component_length[ component_0] else: precomputed_slice_key_start = list() precomputed_slice_key_stop = list() for component_name in self._components_name: precomputed_slice_key_start.append(0) precomputed_slice_key_stop.append( self._component_name_to_basis_component_length[ component_name]) precomputed_slice_key_start = tuple( precomputed_slice_key_start) precomputed_slice_key_stop = tuple(precomputed_slice_key_stop) self._precomputed_slices[precomputed_slice_key_start, precomputed_slice_key_stop] = self def clear(self): components_name = self._components_name # Trick _init into re-initializing everything self._components_name = None self.init(components_name) def save(self, directory, filename): if len(self._components) > 1: def filename_and_component(component_name): return filename + "_" + component_name else: def filename_and_component(component_name): return filename for (component_name, functions_list) in self._components.items(): functions_list.save(directory, filename_and_component(component_name)) def load(self, directory, filename): return_value = True assert len(self._components) > 0 if len(self._components) > 1: def filename_and_component(component_name): return filename + "_" + component_name else: def filename_and_component(component_name): return filename for (component_name, functions_list) in self._components.items(): # Skip updating internal attributes while reading in basis functions, we will do that # only once at the end assert hasattr(functions_list, "enrich_patch") functions_list.enrich_patch.unpatch() # Load each component return_value_component = functions_list.load( directory, filename_and_component(component_name)) return_value = return_value and return_value_component # Populate component length self._update_component_name_to_basis_component_length( component_name) # Restore patched enrich method functions_list.enrich_patch.patch() # Reset precomputed sub components self._precomputed_sub_components.clear() # Prepare trivial precomputed sub components self._prepare_trivial_precomputed_sub_components() # Reset precomputed slices self._precomputed_slices.clear() # Prepare trivial precomputed slice self._prepare_trivial_precomputed_slice() # Return return return_value @overload( online_backend.OnlineMatrix.Type(), ) def __mul__(self, other): if isinstance(other.M, dict): assert set(other.M.keys()) == set(self._components_name) def BasisFunctionsMatrixWithInit(space): output = _BasisFunctionsMatrix.__new__(type(self), space) output.__init__(space) output.init(self._components_name) return output return wrapping.basis_functions_matrix_mul_online_matrix( self, other, BasisFunctionsMatrixWithInit) @overload( online_backend.OnlineFunction.Type(), ) def __mul__(self, other): return self.__mul__(online_wrapping.function_to_vector(other)) @overload( online_backend.OnlineVector.Type(), ) def __mul__(self, other): if isinstance(other.N, dict): assert set(other.N.keys()) == set(self._components_name) return wrapping.basis_functions_matrix_mul_online_vector( self, other) @overload( ThetaType, ) def __mul__(self, other): return wrapping.basis_functions_matrix_mul_online_vector( self, other) def __len__(self): assert len(self._components_name) == 1 assert len(self._component_name_to_basis_component_length) == 1 return self._component_name_to_basis_component_length[ self._components_name[0]] @overload(int) def __getitem__(self, key): # spare the user an obvious extraction of the first component return basis function number key assert len(self._components) == 1 assert len(self._components_name) == 1 component_0 = self._components_name[0] return self._components[component_0][key] @overload(str) def __getitem__(self, key): # return all basis functions for each component, then the user may use __getitem__ of FunctionsList to extract a single basis function return self._components[key] @overload(list_of(str)) def __getitem__(self, key): return self._precompute_sub_components(key) @overload(slice) # e.g. key = :N, return the first N functions def __getitem__(self, key): assert key.step is None return self._precompute_slice(key.start, key.stop) @overload( int, object ) # the second argument is object in order to handle FunctionsList's AdditionalFunctionType def __setitem__(self, key, item): assert len( self._components ) == 1, "Cannot set components, only single functions. Did you mean to call __getitem__ to extract a component and __setitem__ of a single function on that component?" assert len(self._components_name) == 1 self._components[self._components_name[0]][key] = item @overload(None, int) def _precompute_slice(self, _, N_stop): return self._precompute_slice(0, N_stop) @overload(int, None) def _precompute_slice(self, N_start, _): return self._precompute_slice(N_start, len(self)) @overload(int, int) def _precompute_slice(self, N_start, N_stop): if (N_start, N_stop) not in self._precomputed_slices: assert len(self._components) == 1 output = _BasisFunctionsMatrix.__new__(type(self), self.space) output.__init__(self.space) output.init(self._components_name) for component_name in self._components_name: output._components[component_name].enrich( self._components[component_name][N_start:N_stop], copy=False) self._precomputed_slices[N_start, N_stop] = output return self._precomputed_slices[N_start, N_stop] @overload(None, OnlineSizeDict) def _precompute_slice(self, _, N_stop): N_start = OnlineSizeDict() for component_name in self._components_name: N_start[component_name] = 0 return self._precompute_slice(N_start, N_stop) @overload(OnlineSizeDict, None) def _precompute_slice(self, N_start, _): N_stop = OnlineSizeDict() for component_name in self._components_name: N_stop[ component_name] = self._component_name_to_basis_component_length[ component_name] return self._precompute_slice(N_start, len(self)) @overload(OnlineSizeDict, OnlineSizeDict) def _precompute_slice(self, N_start, N_stop): assert set(N_start.keys()) == set(self._components_name) assert set(N_stop.keys()) == set(self._components_name) N_start_key = tuple(N_start[component_name] for component_name in self._components_name) N_stop_key = tuple(N_stop[component_name] for component_name in self._components_name) if (N_start_key, N_stop_key) not in self._precomputed_slices: output = _BasisFunctionsMatrix.__new__(type(self), self.space) output.__init__(self.space) output.init(self._components_name) for component_name in self._components_name: output._components[component_name].enrich( self._components[component_name] [N_start[component_name]:N_stop[component_name]], copy=False) self._precomputed_slices[N_start_key, N_stop_key] = output return self._precomputed_slices[N_start_key, N_stop_key] def _precompute_sub_components(self, sub_components): sub_components_key = tuple(sub_components) if sub_components_key not in self._precomputed_sub_components: assert set(sub_components).issubset(self._components_name) output = _BasisFunctionsMatrix.__new__(type(self), self.space, sub_components) output.__init__(self.space, sub_components) output.init(sub_components) for component_name in sub_components: output._components[component_name].enrich( self._components[component_name], component=component_name, copy=True) self._precomputed_sub_components[sub_components_key] = output return self._precomputed_sub_components[sub_components_key] def __iter__(self): assert len(self._components) == 1 assert len(self._components_name) == 1 component_0 = self._components_name[0] return self._components[component_0].__iter__()
class _FunctionsList(AbstractFunctionsList): def __init__(self, space, component): if component is None: self.space = space else: self.space = wrapping.get_function_subspace(space, component) self.mpi_comm = wrapping.get_mpi_comm(space) self._list = list() # of functions self._precomputed_slices = Cache() # from tuple to FunctionsList def enrich(self, functions, component=None, weights=None, copy=True): # Append to storage self._enrich(functions, component, weights, copy) # Reset precomputed slices self._precomputed_slices = Cache() # Prepare trivial precomputed slice self._precomputed_slices[0, len(self._list)] = self @overload(backend.Function.Type(), (None, str, dict_of(str, str)), (None, Number), bool) def _enrich(self, function, component, weight, copy): self._add_to_list(function, component, weight, copy) @overload((lambda cls: cls, list_of( backend.Function.Type()), tuple_of(backend.Function.Type())), (None, str, dict_of(str, str)), (None, list_of(Number)), bool) def _enrich(self, functions, component, weights, copy): if weights is not None: assert len(weights) == len(functions) for (index, function) in enumerate(functions): self._add_to_list(function, component, weights[index], copy) else: for function in functions: self._add_to_list(function, component, None, copy) @overload(TimeSeries, (None, str, dict_of(str, str)), (None, list_of(Number)), bool) def _enrich(self, functions, component, weights, copy): self._enrich(functions._list, component, weights, copy) @overload(object, (None, str, dict_of(str, str)), (None, Number, list_of(Number)), bool) def _enrich(self, function, component, weight, copy): if AdditionalIsFunction(function): function = ConvertAdditionalFunctionTypes(function) assert weight is None or isinstance(weight, Number) self._add_to_list(function, component, weight, copy) elif isinstance(function, list): converted_function = list() for function_i in function: if AdditionalIsFunction(function_i): converted_function.append( ConvertAdditionalFunctionTypes(function_i)) else: raise RuntimeError( "Invalid function provided to FunctionsList.enrich()" ) assert weight is None or isinstance(weight, list) self._enrich(converted_function, component, weight, copy) else: raise RuntimeError( "Invalid function provided to FunctionsList.enrich()") @overload(backend.Function.Type(), (None, str), (None, Number), bool) def _add_to_list(self, function, component, weight, copy): self._list.append( wrapping.function_extend_or_restrict(function, component, self.space, component, weight, copy)) @overload(backend.Function.Type(), dict_of(str, str), (None, Number), bool) def _add_to_list(self, function, component, weight, copy): assert len(component) == 1 for (component_from, component_to) in component.items(): break self._list.append( wrapping.function_extend_or_restrict(function, component_from, self.space, component_to, weight, copy)) def clear(self): self._list = list() # Reset precomputed slices self._precomputed_slices.clear() def save(self, directory, filename): self._save_Nmax(directory, filename) for (index, function) in enumerate(self._list): wrapping.function_save(function, directory, filename + "_" + str(index)) def _save_Nmax(self, directory, filename): def save_Nmax_task(): with open(os.path.join(str(directory), filename + ".length"), "w") as length: length.write(str(len(self._list))) parallel_io(save_Nmax_task, self.mpi_comm) def load(self, directory, filename): if len(self._list) > 0: # avoid loading multiple times return False Nmax = self._load_Nmax(directory, filename) for index in range(Nmax): function = backend.Function(self.space) wrapping.function_load(function, directory, filename + "_" + str(index)) self.enrich(function) return True def _load_Nmax(self, directory, filename): def load_Nmax_task(): with open(os.path.join(str(directory), filename + ".length"), "r") as length: return int(length.readline()) return parallel_io(load_Nmax_task, self.mpi_comm) @overload( online_backend.OnlineMatrix.Type(), ) def __mul__(self, other): return wrapping.functions_list_mul_online_matrix( self, other, type(self)) @overload( (online_backend.OnlineVector.Type(), ThetaType), ) def __mul__(self, other): return wrapping.functions_list_mul_online_vector(self, other) @overload( online_backend.OnlineFunction.Type(), ) def __mul__(self, other): return wrapping.functions_list_mul_online_vector( self, online_wrapping.function_to_vector(other)) def __len__(self): return len(self._list) @overload(int) def __getitem__(self, key): return self._list[key] @overload(slice) # e.g. key = :N, return the first N functions def __getitem__(self, key): if key.start is not None: start = key.start else: start = 0 assert key.step is None if key.stop is not None: stop = key.stop else: stop = len(self._list) assert start <= stop if start < stop: assert start >= 0 assert start < len(self._list) assert stop > 0 assert stop <= len(self._list) # elif start == stop # trivial case which will result in an empty FunctionsList if (start, stop) not in self._precomputed_slices: output = _FunctionsList.__new__(type(self), self.space) output.__init__(self.space) if start < stop: output._list = self._list[key] self._precomputed_slices[start, stop] = output return self._precomputed_slices[start, stop] @overload(int, backend.Function.Type()) def __setitem__(self, key, item): self._list[key] = item @overload(int, object) def __setitem__(self, key, item): if AdditionalIsFunction(item): item = ConvertAdditionalFunctionTypes(item) self._list[key] = item else: raise RuntimeError( "Invalid function provided to FunctionsList.__setitem__()") def __iter__(self): return self._list.__iter__()
class OnlineSizeDict(OrderedDict): __slots__ = () def __init__(self, *args, **kwargs): super(OnlineSizeDict, self).__init__(*args, **kwargs) @staticmethod def generate_from_N_and_kwargs(components_, default, N, **kwargs): # need to add underscore to components_ becuase "components" is also a possible kwargs key if len(components_) > 1: if N is None: all_components_in_kwargs = components_[0] in kwargs for component in components_: if all_components_in_kwargs: assert component in kwargs, ( "You need to specify the online size of all components in kwargs" ) else: assert component not in kwargs, ( "You need to specify the online size of all components in kwargs" ) if all_components_in_kwargs: N = OnlineSizeDict() for component in components_: N[component] = kwargs[component] del kwargs[component] else: assert isinstance(default, dict) N = OnlineSizeDict(default) # copy the default dict else: assert isinstance(N, (int, OnlineSizeDict)) if isinstance(N, int): N_int = N N = OnlineSizeDict() for component in components_: N[component] = N_int assert component not in kwargs, "You cannot provide both an int and kwargs for components" elif isinstance(N, OnlineSizeDict): # check that components are the same, and are ordered correctly assert list(N.keys()) == list(default.keys()) else: raise TypeError("Invalid N") else: assert len(components_) == 1 component_0 = components_[0] if N is None: if component_0 in kwargs: N_int = kwargs[component_0] else: assert isinstance(default, int) N_int = default else: assert isinstance(N, int) N_int = N N = OnlineSizeDict() N[component_0] = N_int return N, kwargs def __getitem__(self, k): return super(OnlineSizeDict, self).__getitem__(k) def __setitem__(self, k, v): return super(OnlineSizeDict, self).__setitem__(k, v) def __delitem__(self, k): return super(OnlineSizeDict, self).__delitem__(k) def get(self, k, default=None): return super(OnlineSizeDict, self).get(k, default) def setdefault(self, k, default=None): return super(OnlineSizeDict, self).setdefault(k, default) def pop(self, k): return super(OnlineSizeDict, self).pop(k) def update(self, **kwargs): super(OnlineSizeDict, self).update(**kwargs) def __contains__(self, k): return super(OnlineSizeDict, self).__contains__(k) # Override N += N_bc so that it is possible to increment online size due to boundary conditions # (several components) @overload(lambda cls: cls) def __iadd__(self, other): for key in self: self[key] += other[key] return self # Override N += N_bc so that it is possible to increment online size due to boundary conditions # (single component) @overload(int) def __iadd__(self, other): assert len(self) == 1 for key in self: self[key] += other return self # Override N + N_bc as well def __add__(self, other): output = OnlineSizeDict(self) output += other return output # Override __eq__ so that it is possible to check equality of dictionary with an int @overload(int) def __eq__(self, other): for (key, value) in self.items(): if value != other: return False return True @overload((lambda cls: cls, dict_of(str, int))) def __eq__(self, other): return super(OnlineSizeDict, self).__eq__(other) # Override __ne__ so that it is possible to check not equality of dictionary with an int @overload(int) def __ne__(self, other): for (key, value) in self.items(): if value == other: return False return True @overload((lambda cls: cls, dict_of(str, int))) def __ne__(self, other): return super(OnlineSizeDict, self).__ne__(other) # Override __lt__ so that it is possible to check if dictionary is less than an int @overload(int) def __lt__(self, other): for (key, value) in self.items(): if value >= other: return False return True @overload((lambda cls: cls, dict_of(str, int))) def __lt__(self, other): return super(OnlineSizeDict, self).__lt__(other) # Override __gt__ so that it is possible to check if dictionary is greater than an int @overload(int) def __gt__(self, other): for (key, value) in self.items(): if value <= other: return False return True @overload((lambda cls: cls, dict_of(str, int))) def __gt__(self, other): return super(OnlineSizeDict, self).__gt__(other) # Override __str__ to print an integer if all values are the same def __str__(self): if len(set(self.values())) == 1: for (_, value) in self.items(): break return str(value) else: return "{" + ", ".join( [key + ": " + str(value) for (key, value) in self.items()]) + "}"
from ufl import Form from dolfin import assemble, DirichletBC, PETScLUSolver from rbnics.backends.abstract import LinearSolver as AbstractLinearSolver, LinearProblemWrapper from rbnics.backends.dolfin.evaluate import evaluate from rbnics.backends.dolfin.function import Function from rbnics.backends.dolfin.matrix import Matrix from rbnics.backends.dolfin.parametrized_tensor_factory import ParametrizedTensorFactory from rbnics.backends.dolfin.vector import Vector from rbnics.backends.dolfin.wrapping.dirichlet_bc import ProductOutputDirichletBC from rbnics.utils.decorators import BackendFor, dict_of, list_of, overload @BackendFor("dolfin", inputs=((Form, Matrix.Type(), ParametrizedTensorFactory, LinearProblemWrapper), Function.Type(), (Form, ParametrizedTensorFactory, Vector.Type(), None), (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None))) class LinearSolver(AbstractLinearSolver): @overload((Form, Matrix.Type(), ParametrizedTensorFactory), Function.Type(), (Form, ParametrizedTensorFactory, Vector.Type()), (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def __init__(self, lhs, solution, rhs, bcs=None): self.solution = solution self._init_lhs(lhs, bcs) self._init_rhs(rhs, bcs) self._apply_bcs(bcs) self._linear_solver = "default" self.monitor = None @overload(LinearProblemWrapper, Function.Type())
class LinearSolver(AbstractLinearSolver): @overload((Form, Matrix.Type(), ParametrizedTensorFactory), Function.Type(), (Form, ParametrizedTensorFactory, Vector.Type()), (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def __init__(self, lhs, solution, rhs, bcs=None): self.solution = solution self._init_lhs(lhs, bcs) self._init_rhs(rhs, bcs) self._apply_bcs(bcs) self._linear_solver = "default" self.monitor = None @overload(LinearProblemWrapper, Function.Type()) def __init__(self, problem_wrapper, solution): self.__init__(problem_wrapper.matrix_eval(), solution, problem_wrapper.vector_eval(), problem_wrapper.bc_eval()) self.monitor = problem_wrapper.monitor @overload(Form, (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def _init_lhs(self, lhs, bcs): self.lhs = assemble(lhs, keep_diagonal=True) @overload(ParametrizedTensorFactory, (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def _init_lhs(self, lhs, bcs): self.lhs = evaluate(lhs) @overload(Matrix.Type(), None) def _init_lhs(self, lhs, bcs): self.lhs = lhs @overload(Matrix.Type(), (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))) def _init_lhs(self, lhs, bcs): # Create a copy of lhs, in order not to change # the original references when applying bcs self.lhs = lhs.copy() @overload(Form, (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def _init_rhs(self, rhs, bcs): self.rhs = assemble(rhs) @overload(ParametrizedTensorFactory, (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)) def _init_rhs(self, rhs, bcs): self.rhs = evaluate(rhs) @overload(Vector.Type(), None) def _init_rhs(self, rhs, bcs): self.rhs = rhs @overload(Vector.Type(), (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))) def _init_rhs(self, rhs, bcs): # Create a copy of rhs, in order not to change # the original references when applying bcs self.rhs = rhs.copy() @overload(None) def _apply_bcs(self, bcs): pass @overload((list_of(DirichletBC), ProductOutputDirichletBC)) def _apply_bcs(self, bcs): for bc in bcs: bc.apply(self.lhs, self.rhs) @overload((dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))) def _apply_bcs(self, bcs): for key in bcs: for bc in bcs[key]: bc.apply(self.lhs, self.rhs) def set_parameters(self, parameters): assert len(parameters) in (0, 1) if len(parameters) == 1: assert "linear_solver" in parameters self._linear_solver = parameters.get("linear_solver", "default") def solve(self): solver = PETScLUSolver(self._linear_solver) solver.solve(self.lhs, self.solution.vector(), self.rhs) if self.monitor is not None: self.monitor(self.solution)
def _init_rhs(self, rhs: ParametrizedTensorFactory, bcs: (list_of(DirichletBC), ProductOutputDirichletBC, dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC), None)): self.rhs = evaluate(rhs)
def _apply_bcs(self, bcs: (dict_of(str, list_of(DirichletBC)), dict_of(str, ProductOutputDirichletBC))): for key in bcs: for bc in bcs[key]: bc.apply(self.lhs, self.rhs)