def gen_vector(self,v=None): """ Generate/initialize a dolfin generic vector to be compatible with the size of dof. """ if v is None: vec = df.Vector(self.mpi_comm,self.dim) # dofs not aligned to function.vector() in parallel # vec = df.Function(self.V).vector() vec.zero() else: if type(v) in (df.Vector,df.PETScVector): vec = df.Vector(v) elif type(v) is np.ndarray: vec = df.Vector(self.mpi_comm,len(v)) # vec = df.Function(self.V).vector() # vec[:]=np.array(v) # import pydevd; pydevd.settrace() dofmap = self.V.dofmap() dof_first, dof_last = dofmap.ownership_range() unowned = dofmap.local_to_global_unowned() dofs = filter(lambda dof: dofmap.local_to_global_index(dof) not in unowned, range(dof_first,dof_last)) vec.set_local(v[list(dofs)]); #vec.apply('insert') else: df.warning('Unknown type.') vec=None return vec
def filter_values(values, operator): """ Remove inf's and nan's, and apply operator to each value in values. Return sorted array of nonzero values, and the number of zeroes. """ values = array(values) # Remove infs and nans upper_bound = ascot_parameters["inf"] values = values[where(abs(values) < upper_bound)] if any(abs(values) > 1.e8): warning( "Some eigenvalues are larger than 1.e8. Consider checking your inner products!" ) # Replace small (possibly negative numbers) by zero! is_nonzero = abs(values) >= ascot_parameters["eps"] zeros = values[abs(values) < ascot_parameters["eps"]] values = values[where(is_nonzero)] # Apply operator (sqrt) if indicated values = array(list(map(operator, values))) # Sort values values.sort() return values, sum(is_nonzero == False)
def collapse(x): """Compute an explicit matrix representation of an operator. For example, given a block_ mul object M=A*B, collapse(M) performs the actual matrix multiplication. """ # Since _collapse works recursively, this method is a user-visible wrapper # to print timing, and to check input/output arguments. from time import time from dolfin import EpetraMatrix, info, warning T = time() res = _collapse(x) if getattr(res, 'transposed', False): # transposed matrices will normally be converted to a non-transposed # one by matrix multiplication or addition, but if the transpose is the # outermost operation then this doesn't work. warning('Transposed matrix returned from collapse() -- this matrix can be used for multiplications, ' + 'but not (for example) as input to ML. Try to convert from (A*B)^T to B^T*A^T in your call.') from block.block_compose import block_transpose return block_transpose(EpetraMatrix(res.M)) info('computed explicit matrix representation %s in %.2f s'%(str(res),time()-T)) result = EpetraMatrix(res.M) # Sanity check. Cannot trust EpetraExt.Multiply always, it seems. from block import block_vec v = x.create_vec() block_vec([v]).randomize() xv = x*v err = (xv-result*v).norm('l2')/(xv).norm('l2') if (err > 1e-3): raise RuntimeError('collapse computed wrong result; ||(a-a\')x||/||ax|| = %g'%err) return result
def update_functional(self, s): """ Update the adjoint RHS associated with the functional at the end of timestep s. """ if not isinstance(s, int) or s < 0: raise InvalidArgumentException("s must be a non-negative integer") if not isinstance(self.__functional, TimeFunctional): return a_rhs = OrderedDict() for f_dep in self.__functional.dependencies(s): if is_static_coefficient(f_dep): pass elif isinstance(f_dep, dolfin.Function): a_x = self.__a_map[f_dep] a_rhs[a_x] = self.__functional.derivative(f_dep, s) elif isinstance(f_dep, dolfin.Constant): pass else: raise DependencyException("Invalid dependency") self.__a_L_rhs = [None for i in xrange(len(self.__a_x))] for a_x in a_rhs: if not a_x in self.__a_keys: dolfin.warning("Missing functional dependency %s" % a_x.name()) else: self.__a_L_rhs[self.__a_keys[a_x]] = a_rhs[a_x] return
def update_functional(self, s): """ Update the adjoint RHS associated with the functional at the end of timestep s. """ if not isinstance(s, int) or s < 0: raise InvalidArgumentException("s must be a non-negative integer") if not isinstance(self.__functional, TimeFunctional): return a_rhs = OrderedDict() for f_dep in self.__functional.dependencies(s): if is_static_coefficient(f_dep): pass elif isinstance(f_dep, dolfin.Function): a_x = self.__a_map[f_dep] a_rhs[a_x] = self.__functional.derivative(f_dep, s) elif isinstance(f_dep, dolfin.Constant): pass else: raise DependencyException("Invalid dependency") self.__a_L_rhs = [None for i in range(len(self.__a_x))] for a_x in a_rhs: if not a_x in self.__a_keys: dolfin.warning("Missing functional dependency %s" % a_x.name()) else: self.__a_L_rhs[self.__a_keys[a_x]] = a_rhs[a_x] return
def _as_petscmat(self): if df.has_petsc4py(): from petsc4py import PETSc mat = PETSc.Mat().createPython(df.as_backend_type(self.prior.M).mat().getSizes(), comm = self.prior.mpi_comm) # mat = PETSc.Mat().createPython(self.dim, comm = self.prior.mpi_comm) mat.setPythonContext(self) return df.PETScMatrix(mat) else: df.warning('Petsc4py not installed: cannot generate PETScMatrix with specified size!') pass
def copy(obj): """Return a deep copy of the object""" if hasattr(obj, 'copy'): return obj.copy() else: import copy try: return copy.deepcopy(obj) except TypeError: from dolfin import warning warning( "Don't know how to make a deep copy of (%d,%d), making shallow copy" % (i, j)) return copy.copy(obj)
def matvec(self, b): from time import time from block.block_vec import block_vec from dolfin import log, info, Progress TRACE = 13 # dolfin.TRACE T = time() # If x and initial_guess are block_vecs, some of the blocks may be # scalars (although they are normally converted to vectors by bc # application). To be sure, call allocate() on them. if isinstance(b, block_vec): # Create a shallow copy to call allocate() on, to avoid changing the caller's copy of b b = block_vec(len(b), b.blocks) b.allocate(self.A, dim=0) if self.initial_guess: # Most (all?) solvers modify x, so make a copy to avoid changing # the caller's copy of x from block.block_util import copy x = copy(self.initial_guess) if isinstance(x, block_vec): x.allocate(self.A, dim=1) else: x = self.A.create_vec(dim=1) x.zero() try: log(TRACE, self.__class__.__name__+' solve of '+str(self.A)) if self.B != 1.0: log(TRACE, 'Using preconditioner: '+str(self.B)) progress = Progress(self.name, self.maxiter) if self.tolerance < 0: tolerance = -self.tolerance relative = True else: tolerance = self.tolerance relative = False x = self.method(self.B, self.AR, x, b, tolerance=tolerance, relativeconv=self.relativeconv, maxiter=self.maxiter, progress=progress, callback=self.callback, **self.kwargs) del progress # trigger final printout except Exception, e: from dolfin import warning warning("Error solving " + self.name) raise
def timestep_range(T, dt): """Return a matching time step range for given end time and time step. Note that the time step may be adjusted so that it matches the given end time.""" # Compute range ds = dt n = ceil(T / dt) t_range = linspace(0, T, n + 1)[1:] dt = t_range[0] # Warn about changing time step if ds != dt: warning("Changing time step from %g to %g" % (ds, dt)) return dt, t_range
def gen_vector(self, v=None): """ Generate/initialize a dolfin generic vector to be compatible with the size of dof. """ if v is None: vec = df.Vector(self.mpi_comm, self.dim) vec.zero() else: if type(v) is df.Vector: vec = df.Vector(v) elif type(v) is np.ndarray: vec = df.Vector(self.mpi_comm, len(v)) vec[:] = np.array(v) else: df.warning('Unknown type.') vec = None return vec
def XDMFTempSeries(path, V, first=0, last=None): ''' Read in the temp series of functions in V from XDMF file. If V is not a function space then a finite element has to be provided for constructing the space on the recovered mesh. ''' # NOTE: in 2017.2.0 fenics only stores vertex values so CG1 functions # is what we go for _, ext = os.path.splitext(path) assert ext == '.xdmf' tree = ET.parse(path) domain = list(tree.getroot())[0] grid = list(domain)[0] times = [] # Only collect time stamps so that we access in right order h5_file = '' # Consistency of piece as VisualisationVector ... for item in grid: _, __, time, attrib = list(item) time = time.attrib['Value'] times.append(time) piece = list(attrib)[0] h5_file_, fdata = piece.text.split(':/') assert not h5_file or h5_file == h5_file_ h5_file = h5_file_ times = times[slice(first, last, None)] # We read visualization vector from this h5_file = os.path.join(os.path.dirname(os.path.abspath(path)), h5_file) if not isinstance(V, FunctionSpace): warning('Setting up P1 space on the recovered mesh') cell_type = V.cell() # Dangerously assuming this is a UFL element mesh = read_h5_mesh(h5_file, cell_type) V = FunctionSpace(mesh, V) V = get_P1_space(V) functions = read_h5_function(h5_file, times, V) ft_pairs = zip(functions, map(float, times)) return TempSeries(ft_pairs)
def rand_aux(self, cov='I'): """ Generate random vector: _v ~ N(0,_K), _K = I + Nu_r * (D_r-I_r) * Nu_r' """ noise = dl.Vector() self.model.prior.init_vector(noise, "noise") hp.random.Random.normal(noise, 1., True) _v = self.model.model_stat.generate_vector(hp.PARAMETER) _v_help = dl.Vector(_v) self.model.whtprior.sample(noise, _v_help) if cov is 'I': _v[:] = _v_help elif cov is 'K': self.rtK(_v_help, _v) else: dl.warning('Wrong covariance specification!') pass return _v
def apply(self, a): # Manual application of bcs if isinstance(a, dolfin.Matrix): A = a # Modif A: zero bc row & set diagonal to 1 A.ident_local(self.dof_set) A.apply("insert") elif isinstance(a, dolfin.GenericVector): b = a # Modif b: entry in the bc row is taken from bc_f bc_values = self.bc_f.vector().array() b_values = b.array() b_values[self.dof_set] = bc_values[self.dof_set] b.set_local(b_values) b.apply("insert") else: dolfin.warning("Could not apply Point BC.")
def test_stability(forms, inner_products, spaces, bcs=None): """ Test Babuska-Brezzi stability for a variational problem specified by (a set of) forms, inner products and a discretization spaces parameterized over a mesh family. """ if ascot_parameters["check_continuity"]: info("Checking continuity of form") continuous = test_continuity(forms, inner_products, spaces) if not continuous.is_stable(): warning( "The form does not seems to be bounded!. Check your inner products!" ) if is_saddle_point(forms): return _test_brezzi_stability(forms, inner_products, spaces, bcs) return _test_babuska_stability(forms, inner_products, spaces, bcs)
def compute_signs(self, AA, bb): self.signs = [None]*len(self) bb.allocate(AA, dim=0) for i in range(len(self)): if not self[i]: # No BC on this block, sign doesn't matter continue if numpy.isscalar(AA[i,i]): xAx = AA[i,i] else: # Do not use a constant vector, as that may be in the null space # before boundary conditions are applied x = AA[i,i].create_vec(dim=1) ran = numpy.random.random(x.local_size()) x.set_local(ran) Ax = AA[i,i]*x xAx = x.inner(Ax) if xAx == 0: from dolfin import warning warning("block_bc: zero or semi-definite block (%d,%d), using sign +1"%(i,i)) self.signs[i] = -1 if xAx < 0 else 1 dolfin.info('Calculated signs of diagonal blocks:' + str(self.signs))
def PVDTempSeries(path, V=None, first=0, last=None): ''' Read in the temp series of functions in V from PVD file. If V is not a function space then a finite element has to be provided for constructing the space on the recovered mesh. ''' _, ext = os.path.splitext(path) assert ext == '.pvd' tree = ET.parse(path) collection = list(tree.getroot())[0] path = os.path.dirname(os.path.abspath(path)) # Read in paths/timestamps for VTUs. NOTE: as thus is supposed to be serial # assert part 0 vtus, times = [], [] for dataset in collection: assert dataset.attrib['part'] == '0' vtus.append(os.path.join(path, dataset.attrib['file'])) times.append(float(dataset.attrib['timestep'])) vtus, times = vtus[slice(first, last, None)], times[slice(first, last, None)] # path.vtu -> function. But vertex values!!!! if not isinstance(V, FunctionSpace): warning('Setting up P1 space on the recovered mesh') cell_type = V.cell() # Dangerously assuming this is a UFL element mesh = read_vtu_mesh(vtus[0], cell_type) V = FunctionSpace(mesh, V) V = get_P1_space(V) functions = read_vtu_function(vtus, V) ft_pairs = zip(functions, times) return TempSeries(ft_pairs)
def compute_signs(self, AA, bb): self.signs = [None] * len(self) bb.allocate(AA, dim=0) for i in range(len(self)): if not self[i]: # No BC on this block, sign doesn't matter continue if numpy.isscalar(AA[i, i]): xAx = AA[i, i] else: # Do not use a constant vector, as that may be in the null space # before boundary conditions are applied x = AA[i, i].create_vec(dim=1) ran = numpy.random.random(x.local_size()) x.set_local(ran) Ax = AA[i, i] * x xAx = x.inner(Ax) if xAx == 0: from dolfin import warning warning( "block_bc: zero or semi-definite block (%d,%d), using sign +1" % (i, i)) self.signs[i] = -1 if xAx < 0 else 1 dolfin.info('Calculated signs of diagonal blocks:' + str(self.signs))
def dolfin_to_carpfile(mesh, basename, markers=None, \ vert_fields=None, cell_fields=None): """ NOT DEBUGGED: Write carp mesh and fields to file from dolfin data mesh : dolfin.Mesh The dolfin.mesh which should be written to file basename : str Basename of file which all data will be written to markers : dict (optional) A dict of name to markers of facet booundaries contained in the mesh vert_fields : dict (optional) A dict between named vertex field data and dolfin Functions cell_fields : dict (optional) A dict between named cell field data and dolfin Functions """ import dolfin as d import numpy as np boundary = d.CompiledSubDomain("on_boundary") d.warning("This function is not tested...") boundary_facets = d.FacetFunction("size_t", mesh, 0) boundary.mark(boundary_facets, 1) num_boundary_facets = np.sum(boundary_facets.array() == 1) with open(basename + ".pts", "w") as f: f.write("{0}\n".format(mesh.num_vertices())) [f.write("{0:.10f} {1:.10f} {2:.10f}\n".format(*coord)) \ for coord in mesh.coordinates()] with open(basename + ".elem", "w") as f: f.write("{0}\n".format(mesh.num_cells())) [f.write("Tt {0} {1} {2} {3} 0\n".format(*cell)) \ for cell in mesh.cells()] with open(basename + ".surf", "w") as f: f.write("{0}\n".format(num_boundary_facets)) [f.write("Tr {0} {1} {2}\n".format(*facet.entities(0))) \ for facet in d.SubsetIterator(boundary_facets, 1)] # If generating mapping between vertices and boundaries if markers: # Get the facet markers facet_markers = mesh.domains().facet_domains(mesh) # Iterate over markers for boundary_name, marker in markers.items(): vertices = set() for face in SubsetIterator(facet_markers, marker): vertices.update(face.entities(0)) with open(basename + "_" + boundary_name + ".vtx", "w") as f: f.write("{0:d}\nintra \n".format(int(len(vertices)))) [f.write("{0}\n".format(vert)) for vert in vertices] # Apex node... #with open(basename+"_apex.vtx", "w") as f: # f.write("{0:d}\nintra \n".format(1)) # f.write("{0:d}\n".format((apex_point.array()==1).nonzero()[0][0])) # If outputing vertex fields if vert_fields: # Get dof mapping dofs_to_vert, vectordofs_to_vert, vectordofs_to_subvert = \ dofs_to_verts(mesh) # Iterate over the passed fields for field_name, func in vert_fields.items(): values = func.vector().array() # If scalar field if mesh.num_vertices() == len(dofs): reordered_values = values[dofs_to_vert] # Write the field to file with open(basename + "_" + field_name + ".dat", "w") as f: [ f.write("{0:.10f}\n".format(value)) for value in reordered_values ] # If vector field elif mesh.num_vertices() == 3 * len(dofs): raise NotImplementedError else: raise ValueError("Field and mesh do not match: " + field_name)
from collections import namedtuple import numpy as np import dolfin as df try: import mshr except ImportError: df.warning('mshr is not installed') geometry = namedtuple("geometry", "mesh, ffun, markers") if df.__version__.startswith('20'): # Year based versioning DOLFIN_VERSION_MAJOR = float(df.__version__.split('.')[0]) else: DOLFIN_VERSION_MAJOR = float('.'.join(df.__version__.split('.')[:2])) def mpi_comm_world(): if DOLFIN_VERSION_MAJOR >= 2018: return df.MPI.comm_world else: return df.mpi_comm_world() def value_size(obj): if DOLFIN_VERSION_MAJOR >= 2018: value_shape = obj.value_shape() if len(value_shape) == 0: return 1 else: return [0] else: return obj.value_size()
def solve(self): """ Solve the equation """ x, pre_assembly_parameters = self.x(), self.pre_assembly_parameters() if not self.__initial_guess is None and not self.__initial_guess is x: x.assign(self.__initial_guess) if self.is_linear(): bcs, linear_solver = self.bcs(), self.linear_solver() if isinstance(self.__a, dolfin.GenericMatrix): L = assemble(self.__L, copy = len(bcs) > 0) enforce_bcs(L, bcs) linear_solver.solve(x.vector(), L) elif self.__a.rank() == 2: a = assemble(self.__a, copy = len(bcs) > 0) L = assemble(self.__L, copy = len(bcs) > 0) apply_bcs(a, bcs, L = L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"]) linear_solver.set_operator(a) linear_solver.solve(x.vector(), L) else: assert(self.__a.rank() == 1) assert(linear_solver is None) a = assemble(self.__a, copy = False) L = assemble(self.__L, copy = False) assert(L.local_range() == a.local_range()) x.vector().set_local(L.array() / a.array()) x.vector().apply("insert") enforce_bcs(x.vector(), bcs) elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton": # Newton solver, intended to have near identical behaviour to the Newton # solver supplied with DOLFIN. See # http://fenicsproject.org/documentation/tutorial/nonlinear.html for # further details. default_parameters = dolfin.NewtonSolver.default_parameters() solver_parameters = self.solver_parameters() if "newton_solver" in solver_parameters: parameters = solver_parameters["newton_solver"] else: parameters = {} linear_solver = self.linear_solver() atol = default_parameters["absolute_tolerance"] rtol = default_parameters["relative_tolerance"] max_its = default_parameters["maximum_iterations"] omega = default_parameters["relaxation_parameter"] err = default_parameters["error_on_nonconvergence"] r_def = default_parameters["convergence_criterion"] for key in parameters.keys(): if key == "absolute_tolerance": atol = parameters[key] elif key == "convergence_criterion": r_def = parameters[key] elif key == "error_on_nonconvergence": err = parameters[key] elif key == "maximum_iterations": max_its = parameters[key] elif key == "relative_tolerance": rtol = parameters[key] elif key == "relaxation_parameter": omega = parameters[key] elif key in ["linear_solver", "preconditioner", "lu_solver", "krylov_solver"]: pass elif key in ["method", "report"]: raise NotImplementedException("Unsupported Newton solver parameter: %s" % key) else: raise ParameterException("Unexpected Newton solver parameter: %s" % key) eq, bcs, hbcs = self.eq(), self.bcs(), self.hbcs() a, L = self.__a, self.__L x_name = x.name() x = x.vector() enforce_bcs(x, bcs) dx = self.__dx if not isinstance(linear_solver, dolfin.GenericLUSolver): dx.zero() if r_def == "residual": l_L = assemble(L, copy = len(hbcs) > 0) enforce_bcs(l_L, hbcs) r_0 = l_L.norm("l2") it = 0 if r_0 >= atol: l_a = assemble(a, copy = len(hbcs) > 0) apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"]) linear_solver.set_operator(l_a) linear_solver.solve(dx, l_L) x.axpy(omega, dx) it += 1 atol = max(atol, r_0 * rtol) while it < max_its: l_L = assemble(L, copy = len(hbcs) > 0) enforce_bcs(l_L, hbcs) r = l_L.norm("l2") if r < atol: break l_a = assemble(a, copy = len(hbcs) > 0) apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"]) linear_solver.set_operator(l_a) linear_solver.solve(dx, l_L) x.axpy(omega, dx) it += 1 elif r_def == "incremental": l_a = assemble(a, copy = len(hbcs) > 0) l_L = assemble(L, copy = len(hbcs) > 0) apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"]) linear_solver.set_operator(l_a) linear_solver.solve(dx, l_L) x.axpy(omega, dx) it = 1 r_0 = dx.norm("l2") if r_0 >= atol: atol = max(atol, rtol * r_0) while it < max_its: l_a = assemble(a, copy = len(hbcs) > 0) l_L = assemble(L, copy = len(hbcs) > 0) apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"]) linear_solver.set_operator(l_a) linear_solver.solve(dx, l_L) x.axpy(omega, dx) it += 1 if dx.norm("l2") < atol: break else: raise ParameterException("Invalid convergence criterion: %s" % r_def) if it == max_its: if err: raise StateException("Newton solve for %s failed to converge after %i iterations" % (x_name, it)) else: dolfin.warning("Newton solve for %s failed to converge after %i iterations" % (x_name, it)) # dolfin.info("Newton solve for %s converged after %i iterations" % (x_name, it)) else: problem = dolfin.NonlinearVariationalProblem(self.eq().lhs - self.eq().rhs, x, bcs = self.bcs(), J = self.J()) nl_solver = dolfin.NonlinearVariationalSolver(problem) nl_solver.parameters.update(self.solver_parameters()) nl_solver.solve() return
# This is the most fragile component of the package so be advised that # these ARE NOT GENERAL PURPOSE READEDERS from dolfin import Function, dof_to_vertex_map, warning, Mesh, MeshEditor import xml.etree.ElementTree as ET from itertools import dropwhile from mpi4py import MPI import numpy as np try: import h5py except ImportError: warning('H5Py missing') assert MPI.COMM_WORLD.size == 1, 'No parallel (for your own good)' def data_reordering(V): '''Reshaping/reordering data read from files''' # HDF5/VTK store 3d vectors and 3d tensor so we need to chop the data # also reorder as in 2017.2.0 only(?) vertex values are dumped if V.ufl_element().value_shape() == (): dof2v = dof_to_vertex_map(V) reorder = lambda a: a[dof2v] return reorder Vi = V.sub(0).collapse() dof2v = dof_to_vertex_map(Vi)
from collections import namedtuple import numpy as np import dolfin as df try: import mshr except ImportError: df.warning("mshr is not installed") geometry = namedtuple("geometry", "mesh, ffun, markers") if df.__version__.startswith("20"): # Year based versioning DOLFIN_VERSION_MAJOR = float(df.__version__.split(".")[0]) else: try: DOLFIN_VERSION_MAJOR = float(".".join(df.__version__.split(".")[:2])) except: DOLFIN_VERSION_MAJOR = 1.6 def mpi_comm_world(): if DOLFIN_VERSION_MAJOR >= 2018: return df.MPI.comm_world else: return df.mpi_comm_world() def value_size(obj): if DOLFIN_VERSION_MAJOR >= 2018: value_shape = obj.value_shape()
def trace_mat_no_restrict(V, TV, trace_mesh=None, tag_data=None): '''The first cell connected to the facet gets to set the values of TV''' mesh = V.mesh() if trace_mesh is None: trace_mesh = TV.mesh() fdim = trace_mesh.topology().dim() # None means all if tag_data is None: tag_data = (MeshFunction('size_t', trace_mesh, trace_mesh.topology().dim(), 0), set((0, ))) trace_mesh_subdomains, tags = tag_data # Init/extract the mapping try: assert get_entity_map(mesh, trace_mesh, trace_mesh_subdomains, tags) except (AssertionError, IndexError): warning('Using non-conforming trace') # So non-conforming matrix returns PETSc.Mat return nonconforming_trace_mat(V, TV) # We can get it mapping = trace_mesh.parent_entity_map[mesh.id()][ fdim] # Map cell of TV to cells of V mesh.init(fdim, fdim + 1) f2c = mesh.topology()(fdim, fdim + 1) # Facets of V to cell of V # The idea is to evaluate TV's degrees of freedom at basis functions # of V Tdmap = TV.dofmap() TV_dof = DegreeOfFreedom(TV) dmap = V.dofmap() V_basis_f = FEBasisFunction(V) # Only look at tagged cells trace_cells = itertools.chain(*[ itertools.imap(operator.methodcaller('index'), SubsetIterator(trace_mesh_subdomains, tag)) for tag in tags ]) # Rows visited_dofs = [False] * TV.dim() # Column values dof_values = np.zeros(V_basis_f.elm.space_dimension(), dtype='double') with petsc_serial_matrix(TV, V) as mat: for trace_cell in trace_cells: # We might TV_dof.cell = trace_cell trace_dofs = Tdmap.cell_dofs(trace_cell) # Figure out the dofs of V to use here. Does not matter which # cell of the connected ones we pick cell = f2c(mapping[trace_cell])[0] V_basis_f.cell = cell dofs = dmap.cell_dofs(cell) for local_T, dof_T in enumerate(trace_dofs): if visited_dofs[dof_T]: continue else: visited_dofs[dof_T] = True # Define trace dof TV_dof.dof = local_T # Eval at V basis functions for local, dof in enumerate(dofs): # Set which basis foo V_basis_f.dof = local dof_values[local] = TV_dof.eval(V_basis_f) # Can fill the matrix now col_indices = np.array(dofs, dtype='int32') # Insert mat.setValues([dof_T], col_indices, dof_values, PETSc.InsertMode.INSERT_VALUES) return mat
import dolfin import ffc import instant import numpy import types import ufl from .embedded_cpp import * from .versions import * __all__ = [] # Only versions 1.5.x and 1.6.x have been tested. if dolfin_version() < (1, 5, 0) or dolfin_version() >= (1, 7, 0): dolfin.warning("DOLFIN version %s not supported" % dolfin.__version__) if ufl_version() < (1, 5, 0) or ufl_version() >= (1, 7, 0): dolfin.warning("UFL version %s not supported" % ufl.__version__) if ffc_version() < (1, 5, 0) or ffc_version() >= (1, 7, 0): dolfin.warning("FFC version %s not supported" % ffc.__version__) if instant_version() < (1, 5, 0) or instant_version() >= (1, 7, 0): dolfin.warning("Instant version %s not supported" % instant.__version__) # DOLFIN patches. if dolfin_version() < (1, 1, 0): __all__ += \ [ "GenericLinearSolver", "GenericLUSolver", "FacetFunction", "MeshFunction",
def expand_expr(expr): """ Recursively expand the supplied Expr into the largest possible Sum. """ if not isinstance(expr, ufl.expr.Expr): raise InvalidArgumentException("expr must be an Expr") if isinstance(expr, ufl.algebra.Sum): terms = [] for term in expr.operands(): terms += expand_expr(term) return terms elif isinstance(expr, ufl.algebra.Product): ops = expr.operands() fact1 = ops[0] fact2 = ops[1] for op in ops[2:]: fact2 *= op fact1_terms = expand_expr(fact1) fact2_terms = expand_expr(fact2) terms = [] for term1 in fact1_terms: for term2 in fact2_terms: terms.append(term1 * term2) return terms elif isinstance(expr, ufl.indexed.Indexed): ops = expr.operands() assert(len(ops) == 2) return [ufl.indexed.Indexed(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.tensors.ComponentTensor): ops = expr.operands() assert(len(ops) == 2) return [ufl.tensors.ComponentTensor(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.algebra.Division): ops = expr.operands() assert(len(ops) == 2) return [ufl.algebra.Division(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.restriction.PositiveRestricted): ops = expr.operands() assert(len(ops) == 1) return [ufl.restriction.PositiveRestricted(term) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.restriction.NegativeRestricted): ops = expr.operands() assert(len(ops) == 1) return [ufl.restriction.NegativeRestricted(term) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.differentiation.Grad): ops = expr.operands() assert(len(ops) == 1) return [ufl.differentiation.Grad(term) for term in expand_expr(ops[0])] elif isinstance(expr, (ufl.tensoralgebra.Dot, ufl.tensoralgebra.Inner, ufl.differentiation.CoefficientDerivative, ufl.differentiation.VariableDerivative)): return expand_expr(expand(expr)) # Expr types white-list. These cannot be expanded. elif isinstance(expr, (ufl.constantvalue.ConstantValue, ufl.argument.Argument, dolfin.Expression, dolfin.Function, dolfin.Constant, ufl.geometry.Circumradius, ufl.algebra.Abs, ufl.geometry.FacetNormal, ufl.mathfunctions.Sqrt, ufl.classes.Variable, ufl.mathfunctions.Exp, ufl.algebra.Power, ufl.indexing.MultiIndex, ufl.classes.Label)): return [expr] # Expr types grey-list. It might be possible to expand these, but just ignore # them at present. elif isinstance(expr, (ufl.tensors.ListTensor, ufl.classes.Conditional, ufl.indexsum.IndexSum)): return [expr] else: dolfin.warning("Expr type %s not expanded by expand_expr" % expr.__class__) return [expr]
def nonconforming_trace_mat(V, T): ''' Matrix taking function f from d dim space V to g in (d-1) space T. T(f) ~ g should hold. ''' # For this to work I only make sure that function values are the same assert V.dolfin_element().value_rank() == T.dolfin_element().value_rank() assert V.ufl_element().value_shape() == T.ufl_element().value_shape() # I want to evaluate T degrees of freedom at V basis functions, i.e. # L^T_k{phi^V_l}. The picture is # # ------ # \ /\ # ===\==/==\====T # \/----\ # # and thus is assume that each dof of T space (row) will involve # collisions with several cells/basis function of V space mesh = V.mesh() # The (d-1)trace mesh tree = mesh.bounding_box_tree() limit = mesh.topology().size_global(mesh.topology().dim()) Tdm = T.dofmap() elm_T = T.element() # Colliding cells with trace dofs collisions = [] for Tdof_x in T.tabulate_dof_coordinates().reshape((T.dim(), -1)): cs = tree.compute_entity_collisions(Point(*Tdof_x)) if any(c >= limit for c in cs): warning('Some colliding cells not found') cs = filter(lambda c: c < limit, cs) collisions.append(cs) # So we fill rows by checking basis functions of on the isected cells Vdm = V.dofmap() elm_V = V.element() V_basis_function = FEBasisFunction(V) T_degree_of_freedom = DegreeOfFreedom(T) visited_dofs = [False]*T.dim() with petsc_serial_matrix(T, V) as mat: for Tcell in range(T.mesh().num_cells()): # Set for this cell T_degree_of_freedom.cell = Tcell Tdofs = Tdm.cell_dofs(Tcell) for local_T, Tdof in enumerate(Tdofs): # Seen the row? if visited_dofs[Tdof]: continue visited_dofs[Tdof] = True # Set to current dof T_degree_of_freedom.dof = local_T col_indices, col_values = [], [] # Now all the V cells and their basis functions for c in collisions[Tdof]: # Set the dof cell V_basis_function.cell = c Vdofs = Vdm.cell_dofs(c) # Columns for local_V, Vdof in enumerate(Vdofs): if Vdof in col_indices: continue # Set as basis_function V_basis_function.dof = local_V # Evaluate trace dof at basis function dof_value = T_degree_of_freedom.eval(V_basis_function) col_indices.append(Vdof) col_values.append(dof_value) # Can fill the matrix row col_indices = np.array(col_indices, dtype='int32') col_values = np.array(col_values) mat.setValues([Tdof], col_indices, col_values, PETSc.InsertMode.INSERT_VALUES) return mat
import dolfin import ffc import instant import numpy import types import ufl from embedded_cpp import * from versions import * __all__ = [] # Only versions 1.2.x and 1.3.x have been tested. if dolfin_version() < (1, 2, 0) or dolfin_version() >= (1, 5, 0): dolfin.warning("DOLFIN version %s not supported" % dolfin.__version__) if ufl_version() < (1, 2, 0) or ufl_version() >= (1, 5, 0): dolfin.warning("UFL version %s not supported" % ufl.__version__) if ffc_version() < (1, 2, 0) or ffc_version() >= (1, 5, 0): dolfin.warning("FFC version %s not supported" % ffc.__version__) if instant_version() < (1, 2, 0) or instant_version() >= (1, 5, 0): dolfin.warning("Instant version %s not supported" % instant.__version__) # DOLFIN patches. if dolfin_version() < (1, 1, 0): __all__ += \ [ "GenericLinearSolver", "GenericLUSolver", "FacetFunction", "MeshFunction",
def nonconforming_trace_mat(V, T): ''' Matrix taking function f from d dim space V to g in (d-1) space T. T(f) ~ g should hold. ''' # For this to work I only make sure that function values are the same assert V.ufl_element().value_shape() == T.ufl_element().value_shape() # I want to evaluate T degrees of freedom at V basis functions, i.e. # L^T_k{phi^V_l}. The picture is # # ------ # \ /\ # ===\==/==\====T # \/----\ # # and thus is assume that each dof of T space (row) will involve # collisions with several cells/basis function of V space. However # here we only snap to the first one mesh = V.mesh() # The (d-1)trace mesh tree = mesh.bounding_box_tree() limit = mesh.num_entities_global(mesh.topology().dim()) Tdm = T.dofmap() elm_T = T.element() # Colliding cells with trace dofs collisions = [] for Tdof_x in T.tabulate_dof_coordinates().reshape((T.dim(), -1)): c = tree.compute_first_entity_collision(df.Point(*Tdof_x)) # Contained? c >= limit and df.warning('Some colliding cells not found') collisions.append(c) # So we fill rows by checking basis functions of on the isected cells Vdm = V.dofmap() elm_V = V.element() V_basis_function = FEBasisFunction(V) T_degree_of_freedom = DegreeOfFreedom(T) X_T = T.tabulate_dof_coordinates().reshape((T.dim(), -1)) X_V = V.tabulate_dof_coordinates().reshape((V.dim(), -1)) visited_dofs = np.zeros(T.dim(), dtype=bool) col_values = np.zeros(V_basis_function.elm.space_dimension(), dtype='double') with petsc_serial_matrix(T, V) as mat: for Tcell in range(T.mesh().num_cells()): # Set for this cell T_degree_of_freedom.cell = Tcell Tdofs = Tdm.cell_dofs(Tcell) for local_T, Tdof in enumerate(Tdofs): # Seen the row? if visited_dofs[Tdof]: continue visited_dofs[Tdof] = True # Set to current dof T_degree_of_freedom.dof = local_T # Now all the V cells and their basis functions c = collisions[Tdof] # If we have no reasonable cell leave the row empty if c >= limit: continue # Set the dof cell V_basis_function.cell = c Vdofs = np.array(Vdm.cell_dofs(c), dtype='int32') # These are columns # Fill column for local_V, Vdof in enumerate(Vdofs): # Set as basis_function V_basis_function.dof = local_V # Evaluate trace dof at basis function dof_value = T_degree_of_freedom.eval(V_basis_function) col_values[local_V] = dof_value mat.setValues([Tdof], Vdofs, col_values, PETSc.InsertMode.INSERT_VALUES) return mat
def cbc_warning(msg): "Raise warning on master process." if on_master_process(): warning(msg)
def expand_expr(expr): """ Recursively expand the supplied Expr into the largest possible Sum. """ if not isinstance(expr, ufl.core.expr.Expr): raise InvalidArgumentException("expr must be an Expr") if isinstance(expr, ufl.algebra.Sum): terms = [] for term in expr.operands(): terms += expand_expr(term) return terms elif isinstance(expr, ufl.algebra.Product): ops = expr.operands() fact1 = ops[0] fact2 = ops[1] for op in ops[2:]: fact2 *= op fact1_terms = expand_expr(fact1) fact2_terms = expand_expr(fact2) terms = [] for term1 in fact1_terms: for term2 in fact2_terms: terms.append(term1 * term2) return terms elif isinstance(expr, ufl.indexed.Indexed): ops = expr.operands() assert(len(ops) == 2) return [ufl.indexed.Indexed(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.tensors.ComponentTensor): ops = expr.operands() assert(len(ops) == 2) return [ufl.tensors.ComponentTensor(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.algebra.Division): ops = expr.operands() assert(len(ops) == 2) return [ufl.algebra.Division(term, ops[1]) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.restriction.PositiveRestricted): ops = expr.operands() assert(len(ops) == 1) return [ufl.restriction.PositiveRestricted(term) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.restriction.NegativeRestricted): ops = expr.operands() assert(len(ops) == 1) return [ufl.restriction.NegativeRestricted(term) for term in expand_expr(ops[0])] elif isinstance(expr, ufl.differentiation.Grad): ops = expr.operands() assert(len(ops) == 1) return [ufl.differentiation.Grad(term) for term in expand_expr(ops[0])] elif isinstance(expr, (ufl.tensoralgebra.Dot, ufl.tensoralgebra.Inner, ufl.differentiation.CoefficientDerivative, ufl.differentiation.VariableDerivative)): return expand_expr(expand(expr)) # Expr types white-list. These cannot be expanded. elif isinstance(expr, (ufl.constantvalue.ConstantValue, ufl.argument.Argument, dolfin.Expression, dolfin.Function, dolfin.Constant, ufl.geometry.Circumradius, ufl.algebra.Abs, ufl.geometry.FacetNormal, ufl.mathfunctions.Sqrt, ufl.classes.Variable, ufl.mathfunctions.Exp, ufl.algebra.Power, ufl.indexing.MultiIndex, ufl.classes.Label)): return [expr] # Expr types grey-list. It might be possible to expand these, but just ignore # them at present. elif isinstance(expr, (ufl.tensors.ListTensor, ufl.classes.Conditional, ufl.indexsum.IndexSum)): return [expr] else: dolfin.warning("Expr type %s not expanded by expand_expr" % expr.__class__) return [expr]
def trace_mat_no_restrict(V, TV, trace_mesh=None): '''The first cell connected to the facet gets to set the values of TV''' mesh = V.mesh() if trace_mesh is None: trace_mesh = TV.mesh() fdim = trace_mesh.topology().dim() # Init/extract the mapping try: assert get_entity_map(mesh, trace_mesh) except AssertionError: warning('Using non-conforming trace') return nonconforming_trace_mat(V, TV) # We can get it mapping = trace_mesh.parent_entity_map[mesh.id()][fdim] # Map cell of TV to cells of V mesh.init(fdim, fdim+1) f2c = mesh.topology()(fdim, fdim+1) # Facets of V to cell of V # The idea is to evaluate TV's degrees of freedom at basis functions # of V Tdmap = TV.dofmap() TV_dof = DegreeOfFreedom(TV) dmap = V.dofmap() V_basis_f = FEBasisFunction(V) # Rows visited_dofs = [False]*TV.dim() # Column values dof_values = np.zeros(V_basis_f.elm.space_dimension(), dtype='double') with petsc_serial_matrix(TV, V) as mat: for trace_cell in range(TV.mesh().num_cells()): TV_dof.cell = trace_cell trace_dofs = Tdmap.cell_dofs(trace_cell) # Figure out the dofs of V to use here. Does not matter which # cell of the connected ones we pick cell = f2c(mapping[trace_cell])[0] V_basis_f.cell = cell dofs = dmap.cell_dofs(cell) for local_T, dof_T in enumerate(trace_dofs): if visited_dofs[dof_T]: continue else: visited_dofs[dof_T] = True # Define trace dof TV_dof.dof = local_T # Eval at V basis functions for local, dof in enumerate(dofs): # Set which basis foo V_basis_f.dof = local dof_values[local] = TV_dof.eval(V_basis_f) # Can fill the matrix now col_indices = np.array(dofs, dtype='int32') # Insert mat.setValues([dof_T], col_indices, dof_values, PETSc.InsertMode.INSERT_VALUES) return mat
def matvec(self, b): from time import time from block.block_vec import block_vec from dolfin import info, Progress from ffc.log import log TRACE = 13 # dolfin.TRACE T = time() # If x and initial_guess are block_vecs, some of the blocks may be # scalars (although they are normally converted to vectors by bc # application). To be sure, call allocate() on them. if isinstance(b, block_vec): # Create a shallow copy to call allocate() on, to avoid changing the caller's copy of b b = block_vec(len(b), b.blocks) b.allocate(self.A, dim=0) if self.initial_guess: # Most (all?) solvers modify x, so make a copy to avoid changing # the caller's copy of x from block.block_util import copy x = copy(self.initial_guess) if isinstance(x, block_vec): x.allocate(self.A, dim=1) else: x = self.A.create_vec(dim=1) x.zero() try: log(TRACE, self.__class__.__name__ + ' solve of ' + str(self.A)) if self.B != 1.0: log(TRACE, 'Using preconditioner: ' + str(self.B)) progress = Progress(self.name, self.maxiter) if self.tolerance < 0: tolerance = -self.tolerance relative = True else: tolerance = self.tolerance relative = False x = self.method(self.B, self.AR, x, b, tolerance=tolerance, relativeconv=self.relativeconv, maxiter=self.maxiter, progress=progress, callback=self.callback, **self.kwargs) del progress # trigger final printout except Exception as e: from dolfin import warning warning("Error solving " + self.name) raise x, self.residuals, self.alphas, self.betas = x if self.tolerance == 0: msg = "done" elif self.converged: msg = "converged" else: msg = "NOT CONV." if self.show == 1: info('%s %s [iter=%2d, time=%.2fs, res=%.1e]' \ % (self.name, msg, self.iterations, time()-T, self.residuals[-1])) elif self.show >= 2: info('%s %s [iter=%2d, time=%.2fs, res=%.1e, true res=%.1e]' \ % (self.name, msg, self.iterations, time()-T, self.residuals[-1], (self.A*x-b).norm('l2'))) if self.show == 3: from dolfin import MPI if MPI.rank(None) == 0: try: from matplotlib import pyplot pyplot.figure('%s convergence (show=3)' % self.name) pyplot.semilogy(self.residuals) pyplot.show(block=True) except: pass if self.R is not None: x = self.R * x if self.retain_guess: self.initial_guess = x if not self.converged and self.nonconvergence_is_fatal: raise RuntimeError('Not converged') return x
def trace_mat_no_restrict(V, TV, trace_mesh=None): '''The first cell connected to the facet gets to set the values of TV''' mesh = V.mesh() if trace_mesh is None: trace_mesh = TV.mesh() fdim = trace_mesh.topology().dim() # Init/extract the mapping try: assert get_entity_map(mesh, trace_mesh) except AssertionError: warning('Using non-conforming trace') return nonconforming_trace_mat(V, TV) # We can get it mapping = trace_mesh.parent_entity_map[mesh.id()][ fdim] # Map cell of TV to cells of V mesh.init(fdim, fdim + 1) f2c = mesh.topology()(fdim, fdim + 1) # Facets of V to cell of V # The idea is to evaluate TV's degrees of freedom at basis functions # of V Tdmap = TV.dofmap() TV_dof = DegreeOfFreedom(TV) dmap = V.dofmap() V_basis_f = FEBasisFunction(V) # Rows visited_dofs = [False] * TV.dim() # Column values dof_values = np.zeros(V_basis_f.elm.space_dimension(), dtype='double') with petsc_serial_matrix(TV, V) as mat: for trace_cell in range(TV.mesh().num_cells()): TV_dof.cell = trace_cell trace_dofs = Tdmap.cell_dofs(trace_cell) # Figure out the dofs of V to use here. Does not matter which # cell of the connected ones we pick cell = f2c(mapping[trace_cell])[0] V_basis_f.cell = cell dofs = dmap.cell_dofs(cell) for local_T, dof_T in enumerate(trace_dofs): if visited_dofs[dof_T]: continue else: visited_dofs[dof_T] = True # Define trace dof TV_dof.dof = local_T # Eval at V basis functions for local, dof in enumerate(dofs): # Set which basis foo V_basis_f.dof = local dof_values[local] = TV_dof.eval(V_basis_f) # Can fill the matrix now col_indices = np.array(dofs, dtype='int32') # Insert mat.setValues([dof_T], col_indices, dof_values, PETSc.InsertMode.INSERT_VALUES) return mat
def test_stokes_noflow(gamma, Re, nu_interp, postprocessor): #set_log_level(WARNING) basename = postprocessor.basename label = "{}_{}_gamma_{}_Re_{:.0e}".format(basename, nu_interp, gamma, Re) c = postprocessor.get_coefficients() c[r"\nu_1"] = c[r"\rho_1"] / Re c[r"\nu_2"] = c[r"r_visc"] * c[r"\nu_1"] c[r"\nu_1"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"] c[r"\nu_2"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"] cc = wrap_coeffs_as_constants(c) nu = eval("nu_" + nu_interp) # choose viscosity interpolation for level in range(1, 4): mesh, boundary_markers, pinpoint, periodic_bnd = create_domain(level) periodic_bnd = None W = create_mixed_space(mesh, periodic_boundary=periodic_bnd) bcs = create_bcs(W, boundary_markers, periodic_boundary=periodic_bnd, pinpoint=pinpoint) phi = create_fixed_vfract(mesh, c) # Create forms a, L = create_forms(W, rho(phi, cc), nu(phi, cc), c[r"g_a"], boundary_markers, gamma) # Solve problem w = df.Function(W) A, b = df.assemble_system(a, L, bcs) solver = df.LUSolver("mumps") df.PETScOptions.set("fieldsplit_u_mat_mumps_icntl_14", 500) solver.set_operator(A) try: solver.solve(w.vector(), b) except: df.warning("Ooops! Something went wrong: {}".format( sys.exc_info()[0])) continue # Pre-process results v, p = w.split(True) v.rename("v", "velocity") p.rename("p", "pressure") V_dv = df.FunctionSpace(mesh, "DG", W.sub(0).ufl_element().degree() - 1) div_v = df.project(df.div(v), V_dv) div_v.rename("div_v", "velocity-divergence") D_22 = df.project(v.sub(1).dx(1), V_dv) p_h = create_hydrostatic_pressure(mesh, cc) #p_ref = df.project(p_h, W.sub(1).ufl_element()) p_ref = df.project( p_h, df.FunctionSpace(mesh, df.FiniteElement("CG", mesh.ufl_cell(), 4))) v_errL2, v_errH10, div_errL2, p_errL2 = compute_errornorms( v, div_v, p, p_ref) if nu_interp[:2] == "PW": V_nu = df.FunctionSpace(mesh, "DG", phi.ufl_element().degree()) else: V_nu = phi.function_space() nu_0 = df.project(nu(phi, cc), V_nu) T_22 = df.project(2.0 * nu(phi, cc) * v.sub(1).dx(1), V_nu) # Save results make_cut = postprocessor._make_cut rs = dict(ndofs=W.dim(), level=level, h=mesh.hmin(), r_dens=c[r"r_dens"], r_visc=c[r"r_visc"], gamma=gamma, Re=Re, nu_interp=nu_interp) rs[r"$v_2$"] = make_cut(v.sub(1)) rs[r"$p$"] = make_cut(p) rs[r"$\phi$"] = make_cut(phi) rs[r"$D_{22}$"] = make_cut(D_22) rs[r"$T_{22}$"] = make_cut(T_22) rs[r"$\nu$"] = make_cut(nu_0) rs[r"$||\mathbf{v} - \mathbf{v}_h||_{L^2}$"] = v_errL2 rs[r"$||\nabla (\mathbf{v} - \mathbf{v}_h)||_{L^2}$"] = v_errH10 rs[r"$||\mathrm{div} \mathbf{v}_h||_{L^2}$"] = div_errL2 rs[r"$||\mathbf{p} - \mathbf{p}_h||_{L^2}$"] = p_errL2 print(label, level) # Send to posprocessor comm = mesh.mpi_comm() rank = df.MPI.rank(comm) postprocessor.add_result(rank, rs) # Plot results obtained in the last round outdir = os.path.join(postprocessor.outdir, "XDMFoutput") with df.XDMFFile(os.path.join(outdir, "v.xdmf")) as file: file.write(v, 0.0) with df.XDMFFile(os.path.join(outdir, "p.xdmf")) as file: file.write(p, 0.0) with df.XDMFFile(os.path.join(outdir, "phi.xdmf")) as file: file.write(phi, 0.0) with df.XDMFFile(os.path.join(outdir, "div_v.xdmf")) as file: file.write(div_v, 0.0) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Flush plots as we now have data for all level values postprocessor.pop_items(["level", "h"]) postprocessor.flush_plots() # Cleanup df.set_log_level(df.INFO) gc.collect()
def trace_mat_no_restrict(V, TV, trace_mesh=None, tag_data=None): '''The first cell connected to the facet gets to set the values of TV''' mesh = V.mesh() if trace_mesh is None: trace_mesh = TV.mesh() fdim = trace_mesh.topology().dim() # None means all if tag_data is None: try: marking_function = trace_mesh.marking_function tag_data = (marking_function, set(marking_function.array())) except AttributeError: tag_data = (MeshFunction('size_t', trace_mesh, trace_mesh.topology().dim(), 0), set( (0, ))) trace_mesh_subdomains, tags = tag_data # Init/extract the mapping try: assert get_entity_map(mesh, trace_mesh, trace_mesh_subdomains, tags) except (AssertionError, IndexError): warning('Using non-conforming trace') # So non-conforming matrix returns PETSc.Mat return nonconforming_trace_mat(V, TV) if V.ufl_element().family() == 'HDiv Trace': assert V.ufl_element().degree() == 0 # In this case return DLT_trace_mat(V, TV, trace_mesh=trace_mesh, tag_data=tag_data) # We can get it mapping = trace_mesh.parent_entity_map[mesh.id()][ fdim] # Map cell of TV to cells of V mesh.init(fdim, fdim + 1) f2c = mesh.topology()(fdim, fdim + 1) # Facets of V to cell of V # The idea is to evaluate TV's degrees of freedom at basis functions of V Tdmap = TV.dofmap() TV_dof = DegreeOfFreedom(TV) dmap = V.dofmap() V_basis_f = FEBasisFunction(V) # Only look at tagged cells trace_cells = list( itertools.chain(*[ map(operator.methodcaller('index'), SubsetIterator(trace_mesh_subdomains, tag)) for tag in tags ])) ndofs_elm, nbasis_elm = TV_dof.elm.space_dimension( ), V_basis_f.elm.space_dimension() local_values = np.zeros((nbasis_elm, ndofs_elm)) if len(trace_cells) > 10_000: print(f'Trace mat {TV.ufl_element()} -> {V.ufl_element()}') trace_cells = tqdm.tqdm(trace_cells, total=len(trace_cells)) rows, cols, values = [], [], [] # DG spaces don't share rows between cells so we take advantage of # this in special branch if TV.ufl_element().family() == 'Discontinuous Lagrange': for trace_cell in trace_cells: TV_dof.cell = trace_cell # Many rows at once trace_dofs = Tdmap.cell_dofs(trace_cell) # Figure out the dofs of V to use here. Does not matter which # cell of the connected ones we pick cell = f2c(mapping[trace_cell])[0] V_basis_f.cell = cell # Columns for the rows dofs = dmap.cell_dofs(cell) for local, dof in enumerate(dofs): # Set which basis foo V_basis_f.dof = local # Get all rows at once local_values[local][:] = TV_dof.eval_dofs(V_basis_f) # Indices for the filled piece rows_ = np.tile(trace_dofs, nbasis_elm) cols_ = np.repeat(dofs, ndofs_elm) rows.extend(rows_) cols.extend(cols_) values.extend(local_values.flat) # FIXME: Othewise we need to take care of duplicate entrieselse: else: needs_fill = np.ones(TV.dim(), dtype=bool) for trace_cell in trace_cells: TV_dof.cell = trace_cell # Many rows at once trace_dofs = Tdmap.cell_dofs(trace_cell) # Don't add duplicates unseen = needs_fill[trace_dofs] # Some will be true and # For the future needs_fill[trace_dofs[unseen]] = False # Figure out the dofs of V to use here. Does not matter which # cell of the connected ones we pick cell = f2c(mapping[trace_cell])[0] V_basis_f.cell = cell # Columns for the rows dofs = dmap.cell_dofs(cell) for local, dof in enumerate(dofs): # Set which basis foo V_basis_f.dof = local # Get all rows at once local_values[local][:] = TV_dof.eval_dofs(V_basis_f) # Indices for the filled piece rows_ = np.tile(trace_dofs[unseen], nbasis_elm) cols_ = np.repeat(dofs, sum(unseen)) rows.extend(rows_) cols.extend(cols_) values.extend(local_values[:, unseen].flat) mat = csr_matrix((values, (rows, cols)), shape=(TV.dim(), V.dim())) return PETSc.Mat().createAIJ(comm=PETSc.COMM_WORLD, size=mat.shape, csr=(mat.indptr, mat.indices, mat.data))