def _rhs(self): from firedrake.assemble import create_assembly_callable u = function.Function(self.trial_space) b = function.Function(self.test_space) if isinstance(self.A.a, slate.TensorBase): expr = -self.A.a * slate.AssembledVector(u) else: expr = -ufl.action(self.A.a, u) return u, create_assembly_callable(expr, tensor=b), b
def _rhs(self): from firedrake.assemble import assemble u = function.Function(self.trial_space) b = function.Function(self.test_space) expr = -action(self.A.a, u) return u, functools.partial(assemble, expr, tensor=b, assembly_type="residual"), b
def __init__(self, a, row_bcs=[], col_bcs=[], fc_params=None, appctx=None): self.a = a self.aT = a.T if isinstance(self.a, slate.TensorBase) else adjoint(a) self.fc_params = fc_params self.appctx = appctx self.row_bcs = row_bcs self.col_bcs = col_bcs # create functions from test and trial space to help # with 1-form assembly test_space, trial_space = [ a.arguments()[i].function_space() for i in (0, 1) ] from firedrake import function self._y = function.Function(test_space) self._x = function.Function(trial_space) # These are temporary storage for holding the BC # values during matvec application. _xbc is for # the action and ._ybc is for transpose. if len(self.row_bcs) > 0: self._xbc = function.Function(trial_space) if len(self.col_bcs) > 0: self._ybc = function.Function(test_space) # Get size information from template vecs on test and trial spaces trial_vec = trial_space.dof_dset.layout_vec test_vec = test_space.dof_dset.layout_vec self.col_sizes = trial_vec.getSizes() self.row_sizes = test_vec.getSizes() self.block_size = (test_vec.getBlockSize(), trial_vec.getBlockSize()) if isinstance(self.a, slate.TensorBase): self.action = self.a * slate.AssembledVector(self._x) self.actionT = self.aT * slate.AssembledVector(self._y) else: self.action = action(self.a, self._x) self.actionT = action(self.aT, self._y) from firedrake.assemble import create_assembly_callable self._assemble_action = create_assembly_callable( self.action, tensor=self._y, form_compiler_parameters=self.fc_params) self._assemble_actionT = create_assembly_callable( self.actionT, tensor=self._x, form_compiler_parameters=self.fc_params)
def CubedSphereMesh(radius, refinement_level=0, degree=1, reorder=None, comm=COMM_WORLD): """Generate an cubed approximation to the surface of the sphere. :arg radius: The radius of the sphere to approximate. :kwarg refinement_level: optional number of refinements (0 is a cube). :kwarg degree: polynomial degree of coordinate space (defaults to 1: bilinear quads) :kwarg reorder: (optional), should the mesh be reordered? """ if refinement_level < 0 or refinement_level % 1: raise RuntimeError("Number of refinements must be a non-negative integer") if degree < 1: raise ValueError("Mesh coordinate degree must be at least 1") cells, coords = _cubedsphere_cells_and_coords(radius, refinement_level) plex = mesh._from_cell_list(2, cells, coords, comm) m = mesh.Mesh(plex, dim=3, reorder=reorder) if degree > 1: new_coords = function.Function(functionspace.VectorFunctionSpace(m, "Q", degree)) new_coords.interpolate(ufl.SpatialCoordinate(m)) # "push out" to sphere new_coords.dat.data[:] *= (radius / np.linalg.norm(new_coords.dat.data, axis=1)).reshape(-1, 1) m = mesh.Mesh(new_coords) m._radius = radius return m
def create_output(V, name=None): if isinstance(V, functionspaceimpl.WithGeometry): return function.Function(V, name=name) elif isinstance(V, function.Function): return V else: raise ValueError("Can't project into target object %r" % V)
def assemble_expression(expr, subset=None): """Evaluates UFL expressions on :class:`.Function`\s pointwise and assigns into a new :class:`.Function`.""" result = function.Function(ExpressionWalker().walk(expr)[2]) evaluate_expression(Assign(result, expr), subset) return result
def function_arg(self, g): '''Set the value of this boundary condition.''' if isinstance(g, function.Function) and g.function_space() != self._function_space: raise RuntimeError("%r is defined on incompatible FunctionSpace!" % g) if not isinstance(g, expression.Expression): try: # Bare constant? as_ufl(g) except UFLException: try: # List of bare constants? Convert to UFL expression g = as_ufl(as_tensor(g)) if g.ufl_shape != self._function_space.shape: raise ValueError("%r doesn't match the shape of the function space." % (g,)) except UFLException: raise ValueError("%r is not a valid DirichletBC expression" % (g,)) if isinstance(g, expression.Expression) or has_type(as_ufl(g), SpatialCoordinate): if isinstance(g, expression.Expression): self._expression_state = g._state try: g = function.Function(self._function_space).interpolate(g) # Not a point evaluation space, need to project onto V except NotImplementedError: g = projection.project(g, self._function_space) self._function_arg = g self._currently_zeroed = False
def sanitise_input(v, V): if isinstance(v, expression.Expression): shape = v.value_shape() # Build a function space that supports PointEvaluation so that # we can interpolate into it. deg = max(as_tuple(V.ufl_element().degree())) if v.rank() == 0: fs = functionspace.FunctionSpace(V.mesh(), 'DG', deg+1) elif v.rank() == 1: fs = functionspace.VectorFunctionSpace(V.mesh(), 'DG', deg+1, dim=shape[0]) else: fs = functionspace.TensorFunctionSpace(V.mesh(), 'DG', deg+1, shape=shape) f = function.Function(fs) f.interpolate(v) return f elif isinstance(v, function.Function): return v elif isinstance(v, ufl.classes.Expr): return v else: raise ValueError("Can't project from source object %r" % v)
def spatial_index(self): """Spatial index to quickly find which cell contains a given point.""" from firedrake import function, functionspace from firedrake.parloops import par_loop, READ, RW gdim = self.ufl_cell().geometric_dimension() if gdim <= 1: info_red("libspatialindex does not support 1-dimension, falling back on brute force.") return None # Calculate the bounding boxes for all cells by running a kernel V = functionspace.VectorFunctionSpace(self, "DG", 0, dim=gdim) coords_min = function.Function(V) coords_max = function.Function(V) coords_min.dat.data.fill(np.inf) coords_max.dat.data.fill(-np.inf) kernel = """ for (int d = 0; d < gdim; d++) { for (int i = 0; i < nodes_per_cell; i++) { f_min[0][d] = fmin(f_min[0][d], f[i][d]); f_max[0][d] = fmax(f_max[0][d], f[i][d]); } } """ cell_node_list = self.coordinates.function_space().cell_node_list nodes_per_cell = len(cell_node_list[0]) kernel = kernel.replace("gdim", str(gdim)) kernel = kernel.replace("nodes_per_cell", str(nodes_per_cell)) par_loop(kernel, ufl.dx, {'f': (self.coordinates, READ), 'f_min': (coords_min, RW), 'f_max': (coords_max, RW)}) # Reorder bounding boxes according to the cell indices we use column_list = V.cell_node_list.reshape(-1) coords_min = self._order_data_by_cell_index(column_list, coords_min.dat.data_ro_with_halos) coords_max = self._order_data_by_cell_index(column_list, coords_max.dat.data_ro_with_halos) # Build spatial index return spatialindex.from_regions(coords_min, coords_max)
def IcosahedralSphereMesh(radius, refinement_level=0, degree=1, reorder=None): """Generate an icosahedral approximation to the surface of the sphere. :arg radius: The radius of the sphere to approximate. For a radius R the edge length of the underlying icosahedron will be. .. math:: a = \\frac{R}{\\sin(2 \\pi / 5)} :kwarg refinement_level: optional number of refinements (0 is an icosahedron). :kwarg degree: polynomial degree of coordinate space (defaults to 1: flat triangles) :kwarg reorder: (optional), should the mesh be reordered? """ if degree < 1: raise ValueError("Mesh coordinate degree must be at least 1") from math import sqrt phi = (1 + sqrt(5)) / 2 # vertices of an icosahedron with an edge length of 2 vertices = np.array([[-1, phi, 0], [1, phi, 0], [-1, -phi, 0], [1, -phi, 0], [0, -1, phi], [0, 1, phi], [0, -1, -phi], [0, 1, -phi], [phi, 0, -1], [phi, 0, 1], [-phi, 0, -1], [-phi, 0, 1]]) # faces of the base icosahedron faces = np.array( [[0, 11, 5], [0, 5, 1], [0, 1, 7], [0, 7, 10], [0, 10, 11], [1, 5, 9], [5, 11, 4], [11, 10, 2], [10, 7, 6], [7, 1, 8], [3, 9, 4], [3, 4, 2], [3, 2, 6], [3, 6, 8], [3, 8, 9], [4, 9, 5], [2, 4, 11], [6, 2, 10], [8, 6, 7], [9, 8, 1]], dtype=np.int32) plex = mesh._from_cell_list(2, faces, vertices) plex.setRefinementUniform(True) for i in range(refinement_level): plex = plex.refine() coords = plex.getCoordinatesLocal().array.reshape(-1, 3) scale = (radius / np.linalg.norm(coords, axis=1)).reshape(-1, 1) coords *= scale m = mesh.Mesh(plex, dim=3, reorder=reorder) if degree > 1: new_coords = function.Function( functionspace.VectorFunctionSpace(m, "CG", degree)) new_coords.interpolate(expression.Expression(("x[0]", "x[1]", "x[2]"))) # "push out" to sphere new_coords.dat.data[:] *= ( radius / np.linalg.norm(new_coords.dat.data, axis=1)).reshape( -1, 1) m = mesh.Mesh(new_coords) m._icosahedral_sphere = radius return m
def _coordinates_function(self): """The :class:`.Function` containing the coordinates of this mesh.""" import firedrake.functionspace as functionspace import firedrake.function as function self.init() coordinates_fs = self._coordinates.function_space() V = functionspace.WithGeometry(coordinates_fs, self) f = function.Function(V, val=self._coordinates) return f
def _Abcs(self): """A function storing the action of the operator on a zero Function satisfying the BCs. Used in the presence of BCs. """ b = function.Function(self._W) for bc in self.A.bcs: bc.apply(b) from firedrake.assemble import _assemble return _assemble(ufl.action(self.A.a, b))
def __init__(self, problems): problems = as_tuple(problems) self._problems = problems # Build the jacobian with the correct sparsity pattern. Note # that since matrix assembly is lazy this doesn't actually # force an additional assembly of the matrix since in # form_jacobian we call assemble again which drops this # computation on the floor. from firedrake.assemble import assemble self._jacs = tuple( assemble(problem.J, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) if problems[-1].Jp is not None: self._pjacs = tuple( assemble( problem.Jp, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) else: self._pjacs = self._jacs # Function to hold current guess self._xs = tuple(function.Function(problem.u) for problem in problems) self.Fs = tuple( ufl.replace(problem.F, {problem.u: x}) for problem, x in zip(problems, self._xs)) self.Js = tuple( ufl.replace(problem.J, {problem.u: x}) for problem, x in zip(problems, self._xs)) if problems[-1].Jp is not None: self.Jps = tuple( ufl.replace(problem.Jp, {problem.u: x}) for problem, x in zip(problems, self._xs)) else: self.Jps = tuple(None for _ in problems) self._Fs = tuple( function.Function(F.arguments()[0].function_space()) for F in self.Fs) self._jacobians_assembled = [False for _ in problems]
def _build_monolithic_basis(self): """Build a basis for the complete mixed space. The monolithic basis is formed by the cartesian product of the bases forming each sub part. """ from itertools import product bvecs = [[None] for _ in self] # Get the complete list of basis vectors for each component in # the mixed basis. for idx, basis in enumerate(self): if isinstance(basis, VectorSpaceBasis): v = [] if basis._constant: v = [ function.Function(self._function_space[idx]).assign(1) ] bvecs[idx] = basis._vecs + v # Basis for mixed space is cartesian product of all the basis # vectors we just made. allbvecs = [x for x in product(*bvecs)] vecs = [function.Function(self._function_space) for _ in allbvecs] # Build the functions representing the monolithic basis. for vidx, bvec in enumerate(allbvecs): for idx, b in enumerate(bvec): if b: vecs[vidx].sub(idx).assign(b) for v in vecs: v /= v.dat.norm self._vecs = vecs self._petsc_vecs = [] for v in self._vecs: with v.dat.vec_ro as v_: self._petsc_vecs.append(v_) self._nullspace = PETSc.NullSpace().create(constant=False, vectors=self._petsc_vecs)
def _Abcs(self): """A function storing the action of the operator on a zero Function satisfying the BCs. Used in the presence of BCs. """ b = function.Function(self._W) for bc in self.A.bcs: bc.apply(b) from firedrake.assemble import _assemble if isinstance(self.A.a, slate.TensorBase): return _assemble(self.A.a * slate.AssembledVector(b)) else: return _assemble(ufl.action(self.A.a, b))
def _build_monolithic_basis(self): r"""Build a basis for the complete mixed space. The monolithic basis is formed by the cartesian product of the bases forming each sub part. """ self._vecs = [] for idx, basis in enumerate(self): if isinstance(basis, VectorSpaceBasis): vecs = basis._vecs if basis._constant: vecs = vecs + (function.Function( self._function_space[idx]).assign(1), ) for vec in vecs: mvec = function.Function(self._function_space) mvec.sub(idx).assign(vec) self._vecs.append(mvec) self._petsc_vecs = [] for v in self._vecs: with v.dat.vec_ro as v_: self._petsc_vecs.append(v_) # orthonormalize: basis = self._petsc_vecs for i, vec in enumerate(basis): alphas = [] for vec_ in basis[:i]: alphas.append(vec.dot(vec_)) for alpha, vec_ in zip(alphas, basis[:i]): vec.axpy(-alpha, vec_) vec.normalize() self._nullspace = PETSc.NullSpace().create(constant=False, vectors=self._petsc_vecs, comm=self.comm)
def project(v, V, bcs=None, mesh=None, solver_parameters=None, form_compiler_parameters=None, name=None): """Project an :class:`.Expression` or :class:`.Function` into a :class:`.FunctionSpace` :arg v: the :class:`.Expression`, :class:`ufl.Expr` or :class:`.Function` to project :arg V: the :class:`.FunctionSpace` or :class:`.Function` to project into :arg bcs: boundary conditions to apply in the projection :arg mesh: the mesh to project into :arg solver_parameters: parameters to pass to the solver used when projecting. :arg form_compiler_parameters: parameters to the form compiler :arg name: name of the resulting :class:`.Function` If ``V`` is a :class:`.Function` then ``v`` is projected into ``V`` and ``V`` is returned. If `V` is a :class:`.FunctionSpace` then ``v`` is projected into a new :class:`.Function` and that :class:`.Function` is returned. The ``bcs``, ``mesh`` and ``form_compiler_parameters`` are currently ignored.""" from firedrake import function if isinstance(V, functionspace.FunctionSpaceBase): ret = function.Function(V, name=name) elif isinstance(V, function.Function): ret = V V = V.function_space() else: raise RuntimeError( 'Can only project into functions and function spaces, not %r' % type(V)) if isinstance(v, expression.Expression): shape = v.value_shape() # Build a function space that supports PointEvaluation so that # we can interpolate into it. if isinstance(V.ufl_element().degree(), tuple): deg = max(V.ufl_element().degree()) else: deg = V.ufl_element().degree() if v.rank() == 0: fs = functionspace.FunctionSpace(V.mesh(), 'DG', deg + 1) elif v.rank() == 1: fs = functionspace.VectorFunctionSpace(V.mesh(), 'DG', deg + 1, dim=shape[0]) else: fs = functionspace.TensorFunctionSpace(V.mesh(), 'DG', deg + 1, shape=shape) f = function.Function(fs) f.interpolate(v) v = f elif isinstance(v, function.Function): if v.function_space().mesh() != ret.function_space().mesh(): raise RuntimeError("Can't project between mismatching meshes") elif not isinstance(v, ufl.core.expr.Expr): raise RuntimeError( "Can't only project from expressions and functions, not %r" % type(v)) if v.ufl_shape != ret.ufl_shape: raise RuntimeError( 'Shape mismatch between source %s and target function spaces %s in project' % (v.ufl_shape, ret.ufl_shape)) p = ufl_expr.TestFunction(V) q = ufl_expr.TrialFunction(V) a = ufl.inner(p, q) * ufl.dx(domain=V.mesh()) L = ufl.inner(p, v) * ufl.dx(domain=V.mesh()) # Default to 1e-8 relative tolerance if solver_parameters is None: solver_parameters = {'ksp_type': 'cg', 'ksp_rtol': 1e-8} else: solver_parameters.setdefault('ksp_type', 'cg') solver_parameters.setdefault('ksp_rtol', 1e-8) _solve(a == L, ret, bcs=bcs, solver_parameters=solver_parameters, form_compiler_parameters=form_compiler_parameters) return ret
def _assemble(f, tensor=None, bcs=None, form_compiler_parameters=None, inverse=False, mat_type=None, sub_mat_type=None, appctx={}, options_prefix=None, collect_loops=False, allocate_only=False): """Assemble the form or Slate expression f and return a Firedrake object representing the result. This will be a :class:`float` for 0-forms/rank-0 Slate tensors, a :class:`.Function` for 1-forms/rank-1 Slate tensors and a :class:`.Matrix` for 2-forms/rank-2 Slate tensors. :arg bcs: A tuple of :class`.DirichletBC`\s to be applied. :arg tensor: An existing tensor object into which the form should be assembled. If this is not supplied, a new tensor will be created for the purpose. :arg form_compiler_parameters: (optional) dict of parameters to pass to the form compiler. :arg inverse: (optional) if f is a 2-form, then assemble the inverse of the local matrices. :arg mat_type: (optional) type for assembled matrices, one of "nest", "aij", "baij", or "matfree". :arg sub_mat_type: (optional) type for assembled sub matrices inside a "nest" matrix. One of "aij" or "baij". :arg appctx: Additional information to hang on the assembled matrix if an implicit matrix is requested (mat_type "matfree"). :arg options_prefix: An options prefix for the PETSc matrix (ignored if not assembling a bilinear form). """ if mat_type is None: mat_type = parameters.parameters["default_matrix_type"] if mat_type not in ["matfree", "aij", "baij", "nest"]: raise ValueError("Unrecognised matrix type, '%s'" % mat_type) if sub_mat_type is None: sub_mat_type = parameters.parameters["default_sub_matrix_type"] if sub_mat_type not in ["aij", "baij"]: raise ValueError("Invalid submatrix type, '%s' (not 'aij' or 'baij')", sub_mat_type) if form_compiler_parameters: form_compiler_parameters = form_compiler_parameters.copy() else: form_compiler_parameters = {} form_compiler_parameters["assemble_inverse"] = inverse topology = f.ufl_domains()[0].topology for m in f.ufl_domains(): # Ensure mesh is "initialised" (could have got here without # building a functionspace (e.g. if integrating a constant)). m.init() if m.topology != topology: raise NotImplementedError( "All integration domains must share a mesh topology.") for o in chain(f.arguments(), f.coefficients()): domain = o.ufl_domain() if domain is not None and domain.topology != topology: raise NotImplementedError( "Assembly with multiple meshes not supported.") if isinstance(f, slate.TensorBase): kernels = slac.compile_expression( f, tsfc_parameters=form_compiler_parameters) integral_types = [kernel.kinfo.integral_type for kernel in kernels] else: kernels = tsfc_interface.compile_form( f, "form", parameters=form_compiler_parameters, inverse=inverse) integral_types = [ integral.integral_type() for integral in f.integrals() ] rank = len(f.arguments()) is_mat = rank == 2 is_vec = rank == 1 if any((coeff.function_space() and coeff.function_space().component is not None) for coeff in f.coefficients()): raise NotImplementedError( "Integration of subscripted VFS not yet implemented") if inverse and rank != 2: raise ValueError("Can only assemble the inverse of a 2-form") zero_tensor = lambda: None if is_mat: matfree = mat_type == "matfree" nest = mat_type == "nest" if nest: baij = sub_mat_type == "baij" else: baij = mat_type == "baij" if matfree: # intercept matrix-free matrices here if inverse: raise NotImplementedError( "Inverse not implemented with matfree") if collect_loops: raise NotImplementedError("Can't collect loops with matfree") if tensor is None: return matrix.ImplicitMatrix( f, bcs, fc_params=form_compiler_parameters, appctx=appctx, options_prefix=options_prefix) if not isinstance(tensor, matrix.ImplicitMatrix): raise ValueError("Expecting implicit matrix with matfree") tensor.assemble() return tensor test, trial = f.arguments() map_pairs = [] cell_domains = [] exterior_facet_domains = [] interior_facet_domains = [] if tensor is None: # For horizontal facets of extruded meshes, the corresponding domain # in the base mesh is the cell domain. Hence all the maps used for top # bottom and interior horizontal facets will use the cell to dofs map # coming from the base mesh as a starting point for the actual dynamic map # computation. for integral_type in integral_types: if integral_type == "cell": cell_domains.append(op2.ALL) elif integral_type == "exterior_facet": exterior_facet_domains.append(op2.ALL) elif integral_type == "interior_facet": interior_facet_domains.append(op2.ALL) elif integral_type == "exterior_facet_bottom": cell_domains.append(op2.ON_BOTTOM) elif integral_type == "exterior_facet_top": cell_domains.append(op2.ON_TOP) elif integral_type == "exterior_facet_vert": exterior_facet_domains.append(op2.ALL) elif integral_type == "interior_facet_horiz": cell_domains.append(op2.ON_INTERIOR_FACETS) elif integral_type == "interior_facet_vert": interior_facet_domains.append(op2.ALL) else: raise ValueError('Unknown integral type "%s"' % integral_type) # To avoid an extra check for extruded domains, the maps that are being passed in # are DecoratedMaps. For the non-extruded case the DecoratedMaps don't restrict the # space over which we iterate as the domains are dropped at Sparsity construction # time. In the extruded case the cell domains are used to identify the regions of the # mesh which require allocation in the sparsity. if cell_domains: map_pairs.append( (op2.DecoratedMap(test.cell_node_map(), cell_domains), op2.DecoratedMap(trial.cell_node_map(), cell_domains))) if exterior_facet_domains: map_pairs.append( (op2.DecoratedMap(test.exterior_facet_node_map(), exterior_facet_domains), op2.DecoratedMap(trial.exterior_facet_node_map(), exterior_facet_domains))) if interior_facet_domains: map_pairs.append( (op2.DecoratedMap(test.interior_facet_node_map(), interior_facet_domains), op2.DecoratedMap(trial.interior_facet_node_map(), interior_facet_domains))) map_pairs = tuple(map_pairs) # Construct OP2 Mat to assemble into fs_names = (test.function_space().name, trial.function_space().name) try: sparsity = op2.Sparsity((test.function_space().dof_dset, trial.function_space().dof_dset), map_pairs, "%s_%s_sparsity" % fs_names, nest=nest, block_sparse=baij) except SparsityFormatError: raise ValueError( "Monolithic matrix assembly is not supported for systems with R-space blocks." ) result_matrix = matrix.Matrix(f, bcs, mat_type, sparsity, numpy.float64, "%s_%s_matrix" % fs_names, options_prefix=options_prefix) tensor = result_matrix._M else: if isinstance(tensor, matrix.ImplicitMatrix): raise ValueError("Expecting matfree with implicit matrix") result_matrix = tensor # Replace any bcs on the tensor we passed in result_matrix.bcs = bcs tensor = tensor._M zero_tensor = tensor.zero if result_matrix.block_shape != (1, 1) and mat_type == "baij": raise ValueError( "BAIJ matrix type makes no sense for mixed spaces, use 'aij'") def mat(testmap, trialmap, i, j): m = testmap(test.function_space()[i]) n = trialmap(trial.function_space()[j]) maps = (m[op2.i[0]] if m else None, n[op2.i[1 if m else 0]] if n else None) return tensor[i, j](op2.INC, maps) result = lambda: result_matrix if allocate_only: result_matrix._assembly_callback = None return result_matrix elif is_vec: test = f.arguments()[0] if tensor is None: result_function = function.Function(test.function_space()) tensor = result_function.dat else: result_function = tensor tensor = result_function.dat zero_tensor = tensor.zero def vec(testmap, i): _testmap = testmap(test.function_space()[i]) return tensor[i](op2.INC, _testmap[op2.i[0]] if _testmap else None) result = lambda: result_function else: # 0-forms are always scalar if tensor is None: tensor = op2.Global(1, [0.0]) else: raise ValueError("Can't assemble 0-form into existing tensor") result = lambda: tensor.data[0] coefficients = f.coefficients() domains = f.ufl_domains() # These will be used to correctly interpret the "otherwise" # subdomain all_integer_subdomain_ids = defaultdict(list) for k in kernels: if k.kinfo.subdomain_id != "otherwise": all_integer_subdomain_ids[k.kinfo.integral_type].append( k.kinfo.subdomain_id) for k, v in all_integer_subdomain_ids.items(): all_integer_subdomain_ids[k] = tuple(sorted(v)) # Since applying boundary conditions to a matrix changes the # initial assembly, to support: # A = assemble(a) # bc.apply(A) # solve(A, ...) # we need to defer actually assembling the matrix until just # before we need it (when we know if there are any bcs to be # applied). To do so, we build a closure that carries out the # assembly and stash that on the Matrix object. When we hit a # solve, we funcall the closure with any bcs the Matrix now has to # assemble it. # In collecting loops mode, we collect the loops, and assume the # boundary conditions provided are the ones we want. It therefore # is only used inside residual and jacobian assembly. loops = [] def thunk(bcs): if collect_loops: loops.append(zero_tensor) else: zero_tensor() for indices, kinfo in kernels: kernel = kinfo.kernel integral_type = kinfo.integral_type domain_number = kinfo.domain_number subdomain_id = kinfo.subdomain_id coeff_map = kinfo.coefficient_map pass_layer_arg = kinfo.pass_layer_arg needs_orientations = kinfo.oriented needs_cell_facets = kinfo.needs_cell_facets needs_cell_sizes = kinfo.needs_cell_sizes m = domains[domain_number] subdomain_data = f.subdomain_data()[m] # Find argument space indices if is_mat: i, j = indices elif is_vec: i, = indices else: assert len(indices) == 0 sdata = subdomain_data.get(integral_type, None) if integral_type != 'cell' and sdata is not None: raise NotImplementedError( "subdomain_data only supported with cell integrals.") # Extract block from tensor and test/trial spaces # FIXME Ugly variable renaming required because functions are not # lexical closures in Python and we're writing to these variables if is_mat and result_matrix.block_shape > (1, 1): tsbc = [] trbc = [] # Unwind ComponentFunctionSpace to check for matching BCs for bc in bcs: fs = bc.function_space() if fs.component is not None: fs = fs.parent if fs.index == i: tsbc.append(bc) if fs.index == j: trbc.append(bc) elif is_mat: tsbc, trbc = bcs, bcs # Now build arguments for the par_loop kwargs = {} # Some integrals require non-coefficient arguments at the # end (facet number information). extra_args = [] # Decoration for applying to matrix maps in extruded case decoration = None itspace = m.measure_set(integral_type, subdomain_id, all_integer_subdomain_ids) if integral_type == "cell": itspace = sdata or itspace if subdomain_id not in ["otherwise", "everywhere"] and \ sdata is not None: raise ValueError( "Cannot use subdomain data and subdomain_id") def get_map(x, bcs=None, decoration=None): return x.cell_node_map(bcs) elif integral_type in ("exterior_facet", "exterior_facet_vert"): extra_args.append(m.exterior_facets.local_facet_dat(op2.READ)) def get_map(x, bcs=None, decoration=None): return x.exterior_facet_node_map(bcs) elif integral_type in ("exterior_facet_top", "exterior_facet_bottom"): # In the case of extruded meshes with horizontal facet integrals, two # parallel loops will (potentially) get created and called based on the # domain id: interior horizontal, bottom or top. decoration = { "exterior_facet_top": op2.ON_TOP, "exterior_facet_bottom": op2.ON_BOTTOM }[integral_type] kwargs["iterate"] = decoration def get_map(x, bcs=None, decoration=None): map_ = x.cell_node_map(bcs) if decoration is not None: return op2.DecoratedMap(map_, decoration) return map_ elif integral_type in ("interior_facet", "interior_facet_vert"): extra_args.append(m.interior_facets.local_facet_dat(op2.READ)) def get_map(x, bcs=None, decoration=None): return x.interior_facet_node_map(bcs) elif integral_type == "interior_facet_horiz": decoration = op2.ON_INTERIOR_FACETS kwargs["iterate"] = decoration def get_map(x, bcs=None, decoration=None): map_ = x.cell_node_map(bcs) if decoration is not None: return op2.DecoratedMap(map_, decoration) return map_ else: raise ValueError("Unknown integral type '%s'" % integral_type) # Output argument if is_mat: tensor_arg = mat(lambda s: get_map(s, tsbc, decoration), lambda s: get_map(s, trbc, decoration), i, j) elif is_vec: tensor_arg = vec(lambda s: get_map(s), i) else: tensor_arg = tensor(op2.INC) coords = m.coordinates args = [ kernel, itspace, tensor_arg, coords.dat(op2.READ, get_map(coords)[op2.i[0]]) ] if needs_orientations: o = m.cell_orientations() args.append(o.dat(op2.READ, get_map(o)[op2.i[0]])) if needs_cell_sizes: o = m.cell_sizes args.append(o.dat(op2.READ, get_map(o)[op2.i[0]])) for n in coeff_map: c = coefficients[n] for c_ in c.split(): m_ = get_map(c_) args.append(c_.dat(op2.READ, m_ and m_[op2.i[0]])) if needs_cell_facets: assert integral_type == "cell" extra_args.append(m.cell_to_facets(op2.READ)) args.extend(extra_args) kwargs["pass_layer_arg"] = pass_layer_arg try: with collecting_loops(collect_loops): loops.append(op2.par_loop(*args, **kwargs)) except MapValueError: raise RuntimeError( "Integral measure does not match measure of all coefficients/arguments" ) # Must apply bcs outside loop over kernels because we may wish # to apply bcs to a block which is otherwise zero, and # therefore does not have an associated kernel. if bcs is not None and is_mat: for bc in bcs: fs = bc.function_space() # Evaluate this outwith a "collecting_loops" block, # since creation of the bc nodes actually can create a # par_loop. nodes = bc.nodes if len(fs) > 1: raise RuntimeError( """Cannot apply boundary conditions to full mixed space. Did you forget to index it?""" ) shape = result_matrix.block_shape with collecting_loops(collect_loops): for i in range(shape[0]): for j in range(shape[1]): # Set diagonal entries on bc nodes to 1 if the current # block is on the matrix diagonal and its index matches the # index of the function space the bc is defined on. if i != j: continue if fs.component is None and fs.index is not None: # Mixed, index (no ComponentFunctionSpace) if fs.index == i: loops.append(tensor[ i, j].set_local_diagonal_entries(nodes)) elif fs.component is not None: # ComponentFunctionSpace, check parent index if fs.parent.index is not None: # Mixed, index doesn't match if fs.parent.index != i: continue # Index matches loops.append( tensor[i, j].set_local_diagonal_entries( nodes, idx=fs.component)) elif fs.index is None: loops.append(tensor[ i, j].set_local_diagonal_entries(nodes)) else: raise RuntimeError("Unhandled BC case") if bcs is not None and is_vec: if len(bcs) > 0 and collect_loops: raise NotImplementedError( "Loop collection not handled in this case") for bc in bcs: bc.apply(result_function) if is_mat: # Queue up matrix assembly (after we've done all the other operations) loops.append(tensor.assemble()) return result() if collect_loops: thunk(bcs) return loops if is_mat: result_matrix._assembly_callback = thunk return result() else: return thunk(bcs)
def __init__(self, a, row_bcs=[], col_bcs=[], fc_params=None, appctx=None): self.a = a self.aT = adjoint(a) self.fc_params = fc_params self.appctx = appctx # Collect all DirichletBC instances including # DirichletBCs applied to an EquationBC. # all bcs (DirichletBC, EquationBCSplit) self.bcs = row_bcs self.bcs_col = col_bcs self.row_bcs = tuple(bc for bc in itertools.chain(*row_bcs) if isinstance(bc, DirichletBC)) self.col_bcs = tuple(bc for bc in itertools.chain(*col_bcs) if isinstance(bc, DirichletBC)) # create functions from test and trial space to help # with 1-form assembly test_space, trial_space = [ a.arguments()[i].function_space() for i in (0, 1) ] from firedrake import function self._y = function.Function(test_space) self._x = function.Function(trial_space) # These are temporary storage for holding the BC # values during matvec application. _xbc is for # the action and ._ybc is for transpose. if len(self.bcs) > 0: self._xbc = function.Function(trial_space) if len(self.col_bcs) > 0: self._ybc = function.Function(test_space) # Get size information from template vecs on test and trial spaces trial_vec = trial_space.dof_dset.layout_vec test_vec = test_space.dof_dset.layout_vec self.col_sizes = trial_vec.getSizes() self.row_sizes = test_vec.getSizes() self.block_size = (test_vec.getBlockSize(), trial_vec.getBlockSize()) self.action = action(self.a, self._x) self.actionT = action(self.aT, self._y) from firedrake.assemble import create_assembly_callable # For assembling action(f, self._x) self.bcs_action = [] for bc in self.bcs: if isinstance(bc, DirichletBC): self.bcs_action.append(bc) elif isinstance(bc, EquationBCSplit): self.bcs_action.append(bc.reconstruct(action_x=self._x)) self._assemble_action = create_assembly_callable( self.action, tensor=self._y, bcs=self.bcs_action, form_compiler_parameters=self.fc_params) # For assembling action(adjoint(f), self._y) # Sorted list of equation bcs self.objs_actionT = [] for bc in self.bcs: self.objs_actionT += bc.sorted_equation_bcs() self.objs_actionT.append(self) # Each par_loop is to run with appropriate masks on self._y self._assemble_actionT = [] # Deepest EquationBCs first for bc in self.bcs: for ebc in bc.sorted_equation_bcs(): self._assemble_actionT.append( create_assembly_callable( action(adjoint(ebc.f), self._y), tensor=self._xbc, bcs=None, form_compiler_parameters=self.fc_params)) # Domain last self._assemble_actionT.append( create_assembly_callable( self.actionT, tensor=self._x if len(self.bcs) == 0 else self._xbc, bcs=None, form_compiler_parameters=self.fc_params))
def __init__(self, mesh, layers, layer_height=None, extrusion_type='uniform', kernel=None, gdim=None): # A cache of function spaces that have been built on this mesh import firedrake.function as function import firedrake.functionspace as functionspace self._cache = {} mesh.init() self._old_mesh = mesh if layers < 1: raise RuntimeError( "Must have at least one layer of extruded cells (not %d)" % layers) # All internal logic works with layers of base mesh (not layers of cells) self._layers = layers + 1 self.parent = mesh.parent self.uid = mesh.uid self.name = mesh.name self._plex = mesh._plex self._plex_renumbering = mesh._plex_renumbering self._cell_numbering = mesh._cell_numbering self._entity_classes = mesh._entity_classes interior_f = self._old_mesh.interior_facets self._interior_facets = _Facets(self, interior_f.classes, "interior", interior_f.facet_cell, interior_f.local_facet_number) exterior_f = self._old_mesh.exterior_facets self._exterior_facets = _Facets( self, exterior_f.classes, "exterior", exterior_f.facet_cell, exterior_f.local_facet_number, exterior_f.markers, unique_markers=exterior_f.unique_markers) self.ufl_cell_element = ufl.FiniteElement("Lagrange", domain=mesh.ufl_cell(), degree=1) self.ufl_interval_element = ufl.FiniteElement("Lagrange", domain=ufl.Cell( "interval", 1), degree=1) self.fiat_base_element = fiat_utils.fiat_from_ufl_element( self.ufl_cell_element) self.fiat_vert_element = fiat_utils.fiat_from_ufl_element( self.ufl_interval_element) fiat_element = FIAT.tensor_finite_element.TensorFiniteElement( self.fiat_base_element, self.fiat_vert_element) if extrusion_type == "uniform": # *must* add a new dimension self._ufl_cell = ufl.OuterProductCell( mesh.ufl_cell(), ufl.Cell("interval", 1), gdim=mesh.ufl_cell().geometric_dimension() + 1) elif extrusion_type in ("radial", "radial_hedgehog"): # do not allow radial extrusion if tdim = gdim if mesh.ufl_cell().geometric_dimension() == mesh.ufl_cell( ).topological_dimension(): raise RuntimeError( "Cannot radially-extrude a mesh with equal geometric and topological dimension" ) # otherwise, all is fine, so make cell self._ufl_cell = ufl.OuterProductCell(mesh.ufl_cell(), ufl.Cell("interval", 1)) else: # check for kernel if kernel is None: raise RuntimeError( "If the custom extrusion_type is used, a kernel must be provided" ) # otherwise, use the gdim that was passed in if gdim is None: raise RuntimeError( "The geometric dimension of the mesh must be specified if a custom extrusion kernel is used" ) self._ufl_cell = ufl.OuterProductCell(mesh.ufl_cell(), ufl.Cell("interval", 1), gdim=gdim) self._ufl_domain = ufl.Domain(self.ufl_cell(), data=self) flat_temp = fiat_utils.FlattenedElement(fiat_element) # Calculated dofs_per_column from flattened_element and layers. # The mirrored elements have to be counted only once. # Then multiply by layers and layers - 1 accordingly. self.dofs_per_column = eutils.compute_extruded_dofs( fiat_element, flat_temp.entity_dofs(), layers) # Compute Coordinates of the extruded mesh if layer_height is None: # Default to unit layer_height = 1.0 / layers if extrusion_type == 'radial_hedgehog': hfamily = "DG" else: hfamily = mesh.coordinates.element().family() hdegree = mesh.coordinates.element().degree() self._coordinate_fs = functionspace.VectorFunctionSpace(self, hfamily, hdegree, vfamily="CG", vdegree=1) self.coordinates = function.Function(self._coordinate_fs) self._ufl_domain = ufl.Domain(self.coordinates) eutils.make_extruded_coords(self, layer_height, extrusion_type=extrusion_type, kernel=kernel) if extrusion_type == "radial_hedgehog": fs = functionspace.VectorFunctionSpace(self, "CG", hdegree, vfamily="CG", vdegree=1) self.radial_coordinates = function.Function(fs) eutils.make_extruded_coords(self, layer_height, extrusion_type="radial", output_coords=self.radial_coordinates) # Build a new ufl element for this function space with the # correct domain. This is necessary since this function space # is in the cache and will be picked up by later # VectorFunctionSpace construction. self._coordinate_fs._ufl_element = self._coordinate_fs.ufl_element( ).reconstruct(domain=self.ufl_domain()) # HACK alert! # Replace coordinate Function by one that has a real domain on it (but don't copy values) self.coordinates = function.Function(self._coordinate_fs, val=self.coordinates.dat) # Add subdomain_data to the measure objects we store with # the mesh. These are weakrefs for consistency with the # "global" measure objects self._dx = ufl.Measure('cell', subdomain_data=weakref.ref(self.coordinates)) self._ds = ufl.Measure('exterior_facet', subdomain_data=weakref.ref(self.coordinates)) self._dS = ufl.Measure('interior_facet', subdomain_data=weakref.ref(self.coordinates)) self._ds_t = ufl.Measure('exterior_facet_top', subdomain_data=weakref.ref(self.coordinates)) self._ds_b = ufl.Measure('exterior_facet_bottom', subdomain_data=weakref.ref(self.coordinates)) self._ds_v = ufl.Measure('exterior_facet_vert', subdomain_data=weakref.ref(self.coordinates)) self._dS_h = ufl.Measure('interior_facet_horiz', subdomain_data=weakref.ref(self.coordinates)) self._dS_v = ufl.Measure('interior_facet_vert', subdomain_data=weakref.ref(self.coordinates)) # Set the subdomain_data on all the default measures to this # coordinate field. We don't set the domain on the measure # since this causes an uncollectable reference in the global # space (dx is global). Furthermore, it's never used anyway. for measure in [ ufl.ds, ufl.dS, ufl.dx, ufl.ds_t, ufl.ds_b, ufl.ds_v, ufl.dS_h, ufl.dS_v ]: measure._subdomain_data = weakref.ref(self.coordinates)
def __init__(self, problem, mat_type, pmat_type, appctx=None, pre_jacobian_callback=None, pre_function_callback=None, options_prefix=None): from firedrake.assemble import allocate_matrix, create_assembly_callable if pmat_type is None: pmat_type = mat_type self.mat_type = mat_type self.pmat_type = pmat_type matfree = mat_type == 'matfree' pmatfree = pmat_type == 'matfree' self._problem = problem self._pre_jacobian_callback = pre_jacobian_callback self._pre_function_callback = pre_function_callback fcp = problem.form_compiler_parameters # Function to hold current guess self._x = problem.u if appctx is None: appctx = {} if matfree or pmatfree: # A split context will already get the full state. # TODO, a better way of doing this. # Now we don't have a temporary state inside the snes # context we could just require the user to pass in the # full state on the outside. appctx.setdefault("state", self._x) self.appctx = appctx self.matfree = matfree self.pmatfree = pmatfree self.F = problem.F self.J = problem.J self._jac = allocate_matrix(self.J, bcs=problem.bcs, form_compiler_parameters=fcp, mat_type=mat_type, appctx=appctx, options_prefix=options_prefix) self._assemble_jac = create_assembly_callable( self.J, tensor=self._jac, bcs=problem.bcs, form_compiler_parameters=fcp, mat_type=mat_type) self.is_mixed = self._jac.block_shape != (1, 1) if mat_type != pmat_type or problem.Jp is not None: # Need separate pmat if either Jp is different or we want # a different pmat type to the mat type. if problem.Jp is None: self.Jp = self.J else: self.Jp = problem.Jp self._pjac = allocate_matrix(self.Jp, bcs=problem.bcs, form_compiler_parameters=fcp, mat_type=pmat_type, appctx=appctx, options_prefix=options_prefix) self._assemble_pjac = create_assembly_callable( self.Jp, tensor=self._pjac, bcs=problem.bcs, form_compiler_parameters=fcp, mat_type=pmat_type) else: # pmat_type == mat_type and Jp is None self.Jp = None self._pjac = self._jac self._F = function.Function(self.F.arguments()[0].function_space()) self._assemble_residual = create_assembly_callable( self.F, tensor=self._F, form_compiler_parameters=fcp) self._jacobian_assembled = False self._splits = {} self._coarse = None self._fine = None
def _F(self): return function.Function(self.F.arguments()[0].function_space())
def split(self, fields): from firedrake import replace, as_vector, split from firedrake import NonlinearVariationalProblem as NLVP fields = tuple(tuple(f) for f in fields) splits = self._splits.get(tuple(fields)) if splits is not None: return splits splits = [] problem = self._problem splitter = ExtractSubBlock() for field in fields: F = splitter.split(problem.F, argument_indices=(field, )) J = splitter.split(problem.J, argument_indices=(field, field)) us = problem.u.split() V = F.arguments()[0].function_space() # Exposition: # We are going to make a new solution Function on the sub # mixed space defined by the relevant fields. # But the form may refer to the rest of the solution # anyway. # So we pull it apart and will make a new function on the # subspace that shares data. pieces = [us[i].dat for i in field] if len(pieces) == 1: val, = pieces subu = function.Function(V, val=val) subsplit = (subu, ) else: val = op2.MixedDat(pieces) subu = function.Function(V, val=val) # Split it apart to shove in the form. subsplit = split(subu) # Permutation from field indexing to indexing of pieces field_renumbering = dict([f, i] for i, f in enumerate(field)) vec = [] for i, u in enumerate(us): if i in field: # If this is a field we're keeping, get it from # the new function. Otherwise just point to the # old data. u = subsplit[field_renumbering[i]] if u.ufl_shape == (): vec.append(u) else: for idx in numpy.ndindex(u.ufl_shape): vec.append(u[idx]) # So now we have a new representation for the solution # vector in the old problem. For the fields we're going # to solve for, it points to a new Function (which wraps # the original pieces). For the rest, it points to the # pieces from the original Function. # IOW, we've reinterpreted our original mixed solution # function as being made up of some spaces we're still # solving for, and some spaces that have just become # coefficients in the new form. u = as_vector(vec) F = replace(F, {problem.u: u}) J = replace(J, {problem.u: u}) if problem.Jp is not None: Jp = splitter.split(problem.Jp, argument_indices=(field, field)) Jp = replace(Jp, {problem.u: u}) else: Jp = None bcs = [] for bc in problem.bcs: Vbc = bc.function_space() if Vbc.parent is not None and isinstance(Vbc.parent.ufl_element(), VectorElement): index = Vbc.parent.index else: index = Vbc.index cmpt = Vbc.component # TODO: need to test this logic if index in field: if len(field) == 1: W = V else: W = V.sub(field_renumbering[index]) if cmpt is not None: W = W.sub(cmpt) bcs.append(type(bc)(W, bc.function_arg, bc.sub_domain, method=bc.method)) new_problem = NLVP(F, subu, bcs=bcs, J=J, Jp=Jp, form_compiler_parameters=problem.form_compiler_parameters) new_problem._constant_jacobian = problem._constant_jacobian splits.append(type(self)(new_problem, mat_type=self.mat_type, pmat_type=self.pmat_type, appctx=self.appctx)) return self._splits.setdefault(tuple(fields), splits)
def _rhs(self): from firedrake.assemble import create_assembly_callable u = function.Function(self.trial_space) b = function.Function(self.test_space) expr = -action(self.A.a, u) return u, create_assembly_callable(expr, tensor=b), b
def _b(self): """A function to store the RHS. Used in presence of BCs.""" return function.Function(self._W)
def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', kernel=None, gdim=None): """Build an extruded mesh from an input mesh :arg mesh: the unstructured base mesh :arg layers: number of extruded cell layers in the "vertical" direction. :arg layer_height: the layer height, assuming all layers are evenly spaced. If this is omitted, the value defaults to 1/layers (i.e. the extruded mesh has total height 1.0) unless a custom kernel is used. :arg extrusion_type: the algorithm to employ to calculate the extruded coordinates. One of "uniform", "radial", "radial_hedgehog" or "custom". See below. :arg kernel: a :class:`pyop2.Kernel` to produce coordinates for the extruded mesh. See :func:`~.make_extruded_coords` for more details. :arg gdim: number of spatial dimensions of the resulting mesh (this is only used if a custom kernel is provided) The various values of ``extrusion_type`` have the following meanings: ``"uniform"`` the extruded mesh has an extra spatial dimension compared to the base mesh. The layers exist in this dimension only. ``"radial"`` the extruded mesh has the same number of spatial dimensions as the base mesh; the cells are radially extruded outwards from the origin. This requires the base mesh to have topological dimension strictly smaller than geometric dimension. ``"radial_hedgehog"`` similar to `radial`, but the cells are extruded in the direction of the outward-pointing cell normal (this produces a P1dgxP1 coordinate field). In this case, a radially extruded coordinate field (generated with ``extrusion_type="radial"``) is available in the :attr:`radial_coordinates` attribute. ``"custom"`` use a custom kernel to generate the extruded coordinates For more details see the :doc:`manual section on extruded meshes <extruded-meshes>`. """ import firedrake.functionspace as functionspace import firedrake.function as function mesh.init() topology = ExtrudedMeshTopology(mesh.topology, layers) if extrusion_type == "uniform": pass elif extrusion_type in ("radial", "radial_hedgehog"): # do not allow radial extrusion if tdim = gdim if mesh.ufl_cell().geometric_dimension() == mesh.ufl_cell().topological_dimension(): raise RuntimeError("Cannot radially-extrude a mesh with equal geometric and topological dimension") else: # check for kernel if kernel is None: raise RuntimeError("If the custom extrusion_type is used, a kernel must be provided") # otherwise, use the gdim that was passed in if gdim is None: raise RuntimeError("The geometric dimension of the mesh must be specified if a custom extrusion kernel is used") # Compute Coordinates of the extruded mesh if layer_height is None: # Default to unit layer_height = 1.0 / layers if extrusion_type == 'radial_hedgehog': hfamily = "DG" else: hfamily = mesh._coordinates.ufl_element().family() hdegree = mesh._coordinates.ufl_element().degree() if gdim is None: gdim = mesh.ufl_cell().geometric_dimension() + (extrusion_type == "uniform") coordinates_fs = functionspace.VectorFunctionSpace(topology, hfamily, hdegree, dim=gdim, vfamily="Lagrange", vdegree=1) coordinates = function.CoordinatelessFunction(coordinates_fs, name="Coordinates") eutils.make_extruded_coords(topology, mesh._coordinates, coordinates, layer_height, extrusion_type=extrusion_type, kernel=kernel) self = make_mesh_from_coordinates(coordinates) self._base_mesh = mesh if extrusion_type == "radial_hedgehog": fs = functionspace.VectorFunctionSpace(self, "CG", hdegree, dim=gdim, vfamily="CG", vdegree=1) self.radial_coordinates = function.Function(fs) eutils.make_extruded_coords(topology, mesh._coordinates, self.radial_coordinates, layer_height, extrusion_type="radial", kernel=kernel) return self
def split(self, fields): from firedrake import replace, as_vector, split from firedrake_ts.ts_solver import DAEProblem from firedrake.bcs import DirichletBC, EquationBC fields = tuple(tuple(f) for f in fields) splits = self._splits.get(tuple(fields)) if splits is not None: return splits splits = [] problem = self._problem splitter = ExtractSubBlock() for field in fields: F = splitter.split(problem.F, argument_indices=(field, )) J = splitter.split(problem.J, argument_indices=(field, field)) us = problem.u.split() V = F.arguments()[0].function_space() # Exposition: # We are going to make a new solution Function on the sub # mixed space defined by the relevant fields. # But the form may refer to the rest of the solution # anyway. # So we pull it apart and will make a new function on the # subspace that shares data. pieces = [us[i].dat for i in field] if len(pieces) == 1: (val, ) = pieces subu = function.Function(V, val=val) subsplit = (subu, ) else: val = op2.MixedDat(pieces) subu = function.Function(V, val=val) # Split it apart to shove in the form. subsplit = split(subu) # Permutation from field indexing to indexing of pieces field_renumbering = dict([f, i] for i, f in enumerate(field)) vec = [] for i, u in enumerate(us): if i in field: # If this is a field we're keeping, get it from # the new function. Otherwise just point to the # old data. u = subsplit[field_renumbering[i]] if u.ufl_shape == (): vec.append(u) else: for idx in numpy.ndindex(u.ufl_shape): vec.append(u[idx]) # So now we have a new representation for the solution # vector in the old problem. For the fields we're going # to solve for, it points to a new Function (which wraps # the original pieces). For the rest, it points to the # pieces from the original Function. # IOW, we've reinterpreted our original mixed solution # function as being made up of some spaces we're still # solving for, and some spaces that have just become # coefficients in the new form. u = as_vector(vec) F = replace(F, {problem.u: u}) J = replace(J, {problem.u: u}) if problem.Jp is not None: Jp = splitter.split(problem.Jp, argument_indices=(field, field)) Jp = replace(Jp, {problem.u: u}) else: Jp = None bcs = [] for bc in problem.bcs: if isinstance(bc, DirichletBC): bc_temp = bc.reconstruct( field=field, V=V, g=bc.function_arg, sub_domain=bc.sub_domain, method=bc.method, ) elif isinstance(bc, EquationBC): bc_temp = bc.reconstruct(field, V, subu, u) if bc_temp is not None: bcs.append(bc_temp) new_problem = DAEProblem( F, subu, problem.udot, problem.tspan, bcs=bcs, J=J, Jp=Jp, form_compiler_parameters=problem.form_compiler_parameters, ) new_problem._constant_jacobian = problem._constant_jacobian splits.append( type(self)( new_problem, mat_type=self.mat_type, pmat_type=self.pmat_type, appctx=self.appctx, transfer_manager=self.transfer_manager, )) return self._splits.setdefault(tuple(fields), splits)
def init_cell_orientations(self, expr): """Compute and initialise :attr:`cell_orientations` relative to a specified orientation. :arg expr: an :class:`.Expression` evaluated to produce a reference normal direction. """ import firedrake.function as function import firedrake.functionspace as functionspace if expr.value_shape()[0] != 3: raise NotImplementedError('Only implemented for 3-vectors') if self.ufl_cell() not in (ufl.Cell('triangle', 3), ufl.Cell("quadrilateral", 3), ufl.OuterProductCell(ufl.Cell('interval'), ufl.Cell('interval'), gdim=3)): raise NotImplementedError('Only implemented for triangles and quadrilaterals embedded in 3d') if hasattr(self.topology, '_cell_orientations'): raise RuntimeError("init_cell_orientations already called, did you mean to do so again?") v0 = lambda x: ast.Symbol("v0", (x,)) v1 = lambda x: ast.Symbol("v1", (x,)) n = lambda x: ast.Symbol("n", (x,)) x = lambda x: ast.Symbol("x", (x,)) coords = lambda x, y: ast.Symbol("coords", (x, y)) body = [] body += [ast.Decl("double", v(3)) for v in [v0, v1, n, x]] body.append(ast.Decl("double", "dot")) body.append(ast.Assign("dot", 0.0)) body.append(ast.Decl("int", "i")) # if triangle, use v0 = x1 - x0, v1 = x2 - x0 # otherwise, for the various quads, use v0 = x2 - x0, v1 = x1 - x0 # recall reference element ordering: # triangle: 2 quad: 1 3 # 0 1 0 2 if self.ufl_cell() == ufl.Cell('triangle', 3): body.append(ast.For(ast.Assign("i", 0), ast.Less("i", 3), ast.Incr("i", 1), [ast.Assign(v0("i"), ast.Sub(coords(1, "i"), coords(0, "i"))), ast.Assign(v1("i"), ast.Sub(coords(2, "i"), coords(0, "i"))), ast.Assign(x("i"), 0.0)])) else: body.append(ast.For(ast.Assign("i", 0), ast.Less("i", 3), ast.Incr("i", 1), [ast.Assign(v0("i"), ast.Sub(coords(2, "i"), coords(0, "i"))), ast.Assign(v1("i"), ast.Sub(coords(1, "i"), coords(0, "i"))), ast.Assign(x("i"), 0.0)])) # n = v0 x v1 body.append(ast.Assign(n(0), ast.Sub(ast.Prod(v0(1), v1(2)), ast.Prod(v0(2), v1(1))))) body.append(ast.Assign(n(1), ast.Sub(ast.Prod(v0(2), v1(0)), ast.Prod(v0(0), v1(2))))) body.append(ast.Assign(n(2), ast.Sub(ast.Prod(v0(0), v1(1)), ast.Prod(v0(1), v1(0))))) body.append(ast.For(ast.Assign("i", 0), ast.Less("i", 3), ast.Incr("i", 1), [ast.Incr(x(j), coords("i", j)) for j in range(3)])) body.extend([ast.FlatBlock("dot += (%(x)s) * n[%(i)d];\n" % {"x": x_, "i": i}) for i, x_ in enumerate(expr.code)]) body.append(ast.Assign("orientation[0][0]", ast.Ternary(ast.Less("dot", 0), 1, 0))) kernel = op2.Kernel(ast.FunDecl("void", "cell_orientations", [ast.Decl("int**", "orientation"), ast.Decl("double**", "coords")], ast.Block(body)), "cell_orientations") # Build the cell orientations as a DG0 field (so that we can # pass it in for facet integrals and the like) fs = functionspace.FunctionSpace(self, 'DG', 0) cell_orientations = function.Function(fs, name="cell_orientations", dtype=np.int32) op2.par_loop(kernel, self.cell_set, cell_orientations.dat(op2.WRITE, cell_orientations.cell_node_map()), self.coordinates.dat(op2.READ, self.coordinates.cell_node_map())) self.topology._cell_orientations = cell_orientations
def _assemble(f, tensor=None, bcs=None, form_compiler_parameters=None, inverse=False, nest=None): """Assemble the form f and return a Firedrake object representing the result. This will be a :class:`float` for 0-forms, a :class:`.Function` for 1-forms and a :class:`.Matrix` for 2-forms. :arg bcs: A tuple of :class`.DirichletBC`\s to be applied. :arg tensor: An existing tensor object into which the form should be assembled. If this is not supplied, a new tensor will be created for the purpose. :arg form_compiler_parameters: (optional) dict of parameters to pass to the form compiler. :arg inverse: (optional) if f is a 2-form, then assemble the inverse of the local matrices. :arg nest: (optional) flag indicating if matrices on mixed spaces should be built in blocks or monolithically. """ if form_compiler_parameters: form_compiler_parameters = form_compiler_parameters.copy() else: form_compiler_parameters = {} form_compiler_parameters["assemble_inverse"] = inverse kernels = tsfc_interface.compile_form(f, "form", parameters=form_compiler_parameters, inverse=inverse) rank = len(f.arguments()) is_mat = rank == 2 is_vec = rank == 1 if any((coeff.function_space() and coeff.function_space().component is not None) for coeff in f.coefficients()): raise NotImplementedError("Integration of subscripted VFS not yet implemented") if inverse and rank != 2: raise ValueError("Can only assemble the inverse of a 2-form") integrals = f.integrals() if nest is None: nest = parameters.parameters["matnest"] # Pass this through for assembly caching purposes form_compiler_parameters["matnest"] = nest zero_tensor = lambda: None if is_mat: test, trial = f.arguments() map_pairs = [] cell_domains = [] exterior_facet_domains = [] interior_facet_domains = [] if tensor is None: # For horizontal facets of extrded meshes, the corresponding domain # in the base mesh is the cell domain. Hence all the maps used for top # bottom and interior horizontal facets will use the cell to dofs map # coming from the base mesh as a starting point for the actual dynamic map # computation. for integral in integrals: integral_type = integral.integral_type() if integral_type == "cell": cell_domains.append(op2.ALL) elif integral_type == "exterior_facet": exterior_facet_domains.append(op2.ALL) elif integral_type == "interior_facet": interior_facet_domains.append(op2.ALL) elif integral_type == "exterior_facet_bottom": cell_domains.append(op2.ON_BOTTOM) elif integral_type == "exterior_facet_top": cell_domains.append(op2.ON_TOP) elif integral_type == "exterior_facet_vert": exterior_facet_domains.append(op2.ALL) elif integral_type == "interior_facet_horiz": cell_domains.append(op2.ON_INTERIOR_FACETS) elif integral_type == "interior_facet_vert": interior_facet_domains.append(op2.ALL) else: raise ValueError('Unknown integral type "%s"' % integral_type) # To avoid an extra check for extruded domains, the maps that are being passed in # are DecoratedMaps. For the non-extruded case the DecoratedMaps don't restrict the # space over which we iterate as the domains are dropped at Sparsity construction # time. In the extruded case the cell domains are used to identify the regions of the # mesh which require allocation in the sparsity. if cell_domains: map_pairs.append((op2.DecoratedMap(test.cell_node_map(), cell_domains), op2.DecoratedMap(trial.cell_node_map(), cell_domains))) if exterior_facet_domains: map_pairs.append((op2.DecoratedMap(test.exterior_facet_node_map(), exterior_facet_domains), op2.DecoratedMap(trial.exterior_facet_node_map(), exterior_facet_domains))) if interior_facet_domains: map_pairs.append((op2.DecoratedMap(test.interior_facet_node_map(), interior_facet_domains), op2.DecoratedMap(trial.interior_facet_node_map(), interior_facet_domains))) map_pairs = tuple(map_pairs) # Construct OP2 Mat to assemble into fs_names = (test.function_space().name, trial.function_space().name) sparsity = op2.Sparsity((test.function_space().dof_dset, trial.function_space().dof_dset), map_pairs, "%s_%s_sparsity" % fs_names, nest=nest) result_matrix = matrix.Matrix(f, bcs, sparsity, numpy.float64, "%s_%s_matrix" % fs_names) tensor = result_matrix._M else: result_matrix = tensor # Replace any bcs on the tensor we passed in result_matrix.bcs = bcs tensor = tensor._M zero_tensor = lambda: tensor.zero() def mat(testmap, trialmap, i, j): return tensor[i, j](op2.INC, (testmap(test.function_space()[i])[op2.i[0]], trialmap(trial.function_space()[j])[op2.i[1]]), flatten=True) result = lambda: result_matrix elif is_vec: test = f.arguments()[0] if tensor is None: result_function = function.Function(test.function_space()) tensor = result_function.dat else: result_function = tensor tensor = result_function.dat zero_tensor = lambda: tensor.zero() def vec(testmap, i): return tensor[i](op2.INC, testmap(test.function_space()[i])[op2.i[0]], flatten=True) result = lambda: result_function else: # 0-forms are always scalar if tensor is None: tensor = op2.Global(1, [0.0]) result = lambda: tensor.data[0] subdomain_data = f.subdomain_data() coefficients = f.coefficients() domains = f.ufl_domains() if len(domains) != 1: raise NotImplementedError("Assembly of forms with more than one domain not supported") m = domains[0] # Ensure mesh is "initialised" (could have got here without # building a functionspace (e.g. if integrating a constant)). m.init() subdomain_data = subdomain_data[m] # Since applying boundary conditions to a matrix changes the # initial assembly, to support: # A = assemble(a) # bc.apply(A) # solve(A, ...) # we need to defer actually assembling the matrix until just # before we need it (when we know if there are any bcs to be # applied). To do so, we build a closure that carries out the # assembly and stash that on the Matrix object. When we hit a # solve, we funcall the closure with any bcs the Matrix now has to # assemble it. def thunk(bcs): zero_tensor() for indices, (kernel, integral_type, needs_orientations, subdomain_id, coeff_map) in kernels: # Find argument space indices if is_mat: i, j = indices elif is_vec: i, = indices else: assert len(indices) == 0 sdata = subdomain_data.get(integral_type, None) if integral_type != 'cell' and sdata is not None: raise NotImplementedError("subdomain_data only supported with cell integrals.") # Extract block from tensor and test/trial spaces # FIXME Ugly variable renaming required because functions are not # lexical closures in Python and we're writing to these variables if is_mat and tensor.sparsity.shape > (1, 1): tsbc = [] trbc = [] # Unwind ComponentFunctionSpace to check for matching BCs for bc in bcs: fs = bc.function_space() if fs.component is not None: fs = fs.parent if fs.index == i: tsbc.append(bc) if fs.index == j: trbc.append(bc) elif is_mat: tsbc, trbc = bcs, bcs # Now build arguments for the par_loop kwargs = {} # Some integrals require non-coefficient arguments at the # end (facet number information). extra_args = [] # Decoration for applying to matrix maps in extruded case decoration = None if integral_type == "cell": itspace = sdata or m.cell_set def get_map(x, bcs=None, decoration=None): return x.cell_node_map(bcs) elif integral_type in ("exterior_facet", "exterior_facet_vert"): itspace = m.exterior_facets.measure_set(integral_type, subdomain_id) extra_args.append(m.exterior_facets.local_facet_dat(op2.READ)) def get_map(x, bcs=None, decoration=None): return x.exterior_facet_node_map(bcs) elif integral_type in ("exterior_facet_top", "exterior_facet_bottom"): # In the case of extruded meshes with horizontal facet integrals, two # parallel loops will (potentially) get created and called based on the # domain id: interior horizontal, bottom or top. index, itspace = m.exterior_facets.measure_set(integral_type, subdomain_id) decoration = index kwargs["iterate"] = index def get_map(x, bcs=None, decoration=None): map_ = x.cell_node_map(bcs) if decoration is not None: return op2.DecoratedMap(map_, decoration) return map_ elif integral_type in ("interior_facet", "interior_facet_vert"): itspace = m.interior_facets.set extra_args.append(m.interior_facets.local_facet_dat(op2.READ)) def get_map(x, bcs=None, decoration=None): return x.interior_facet_node_map(bcs) elif integral_type == "interior_facet_horiz": itspace = m.interior_facets.measure_set(integral_type, subdomain_id) decoration = op2.ON_INTERIOR_FACETS kwargs["iterate"] = op2.ON_INTERIOR_FACETS def get_map(x, bcs=None, decoration=None): map_ = x.cell_node_map(bcs) if decoration is not None: return op2.DecoratedMap(map_, decoration) return map_ else: raise ValueError("Unknown integral type '%s'" % integral_type) # Output argument if is_mat: tensor_arg = mat(lambda s: get_map(s, tsbc, decoration), lambda s: get_map(s, trbc, decoration), i, j) elif is_vec: tensor_arg = vec(lambda s: get_map(s), i) else: tensor_arg = tensor(op2.INC) coords = m.coordinates args = [kernel, itspace, tensor_arg, coords.dat(op2.READ, get_map(coords), flatten=True)] if needs_orientations: o = m.cell_orientations() args.append(o.dat(op2.READ, get_map(o), flatten=True)) for n in coeff_map: c = coefficients[n] args.append(c.dat(op2.READ, get_map(c), flatten=True)) args.extend(extra_args) try: op2.par_loop(*args, **kwargs) except MapValueError: raise RuntimeError("Integral measure does not match measure of all coefficients/arguments") # Must apply bcs outside loop over kernels because we may wish # to apply bcs to a block which is otherwise zero, and # therefore does not have an associated kernel. if bcs is not None and is_mat: for bc in bcs: fs = bc.function_space() if len(fs) > 1: raise RuntimeError("""Cannot apply boundary conditions to full mixed space. Did you forget to index it?""") shape = tensor.sparsity.shape for i in range(shape[0]): for j in range(shape[1]): # Set diagonal entries on bc nodes to 1 if the current # block is on the matrix diagonal and its index matches the # index of the function space the bc is defined on. if i != j: continue if fs.component is None and fs.index is not None: # Mixed, index (no ComponentFunctionSpace) if fs.index == i: tensor[i, j].set_local_diagonal_entries(bc.nodes) elif fs.component is not None: # ComponentFunctionSpace, check parent index if fs.parent.index is not None: # Mixed, index doesn't match if fs.parent.index != i: continue # Index matches tensor[i, j].set_local_diagonal_entries(bc.nodes, idx=fs.component) elif fs.index is None: tensor[i, j].set_local_diagonal_entries(bc.nodes) else: raise RuntimeError("Unhandled BC case") if bcs is not None and is_vec: for bc in bcs: bc.apply(result_function) if is_mat: # Queue up matrix assembly (after we've done all the other operations) tensor.assemble() return result() thunk = assembly_cache._cache_thunk(thunk, f, result(), form_compiler_parameters) if is_mat: result_matrix._assembly_callback = thunk return result() else: return thunk(bcs)
def callback(self): import firedrake.function as function import firedrake.functionspace as functionspace del self._callback if op2.MPI.comm.size > 1: self._plex.distributeOverlap(1) self._grown_halos = True if reorder: with timed_region("Mesh: reorder"): old_to_new = self._plex.getOrdering( PETSc.Mat.OrderingType.RCM).indices reordering = np.empty_like(old_to_new) reordering[old_to_new] = np.arange(old_to_new.size, dtype=old_to_new.dtype) else: # No reordering reordering = None # Mark OP2 entities and derive the resulting Plex renumbering with timed_region("Mesh: renumbering"): dmplex.mark_entity_classes(self._plex) self._entity_classes = dmplex.get_entity_classes(self._plex) self._plex_renumbering = dmplex.plex_renumbering( self._plex, self._entity_classes, reordering) with timed_region("Mesh: cell numbering"): # Derive a cell numbering from the Plex renumbering entity_dofs = np.zeros(topological_dim + 1, dtype=np.int32) entity_dofs[-1] = 1 self._cell_numbering = self._plex.createSection( [1], entity_dofs, perm=self._plex_renumbering) entity_dofs[:] = 0 entity_dofs[0] = 1 self._vertex_numbering = self._plex.createSection( [1], entity_dofs, perm=self._plex_renumbering) # Note that for bendy elements, this needs to change. with timed_region("Mesh: coordinate field"): if periodic_coords is not None: if self.ufl_cell().geometric_dimension() != 1: raise NotImplementedError( "Periodic coordinates in more than 1D are unsupported" ) # We've been passed a periodic coordinate field, so use that. self._coordinate_fs = functionspace.VectorFunctionSpace( self, "DG", 1) self.coordinates = function.Function(self._coordinate_fs, val=periodic_coords, name="Coordinates") else: self._coordinate_fs = functionspace.VectorFunctionSpace( self, "Lagrange", 1) coordinates = dmplex.reordered_coords( self._plex, self._coordinate_fs._global_numbering, (self.num_vertices(), geometric_dim)) self.coordinates = function.Function(self._coordinate_fs, val=coordinates, name="Coordinates") self._ufl_domain = ufl.Domain(self.coordinates) # Build a new ufl element for this function space with the # correct domain. This is necessary since this function space # is in the cache and will be picked up by later # VectorFunctionSpace construction. self._coordinate_fs._ufl_element = self._coordinate_fs.ufl_element( ).reconstruct(domain=self.ufl_domain()) # HACK alert! # Replace coordinate Function by one that has a real domain on it (but don't copy values) self.coordinates = function.Function(self._coordinate_fs, val=self.coordinates.dat) # Add subdomain_data to the measure objects we store with # the mesh. These are weakrefs for consistency with the # "global" measure objects self._dx = ufl.Measure('cell', subdomain_data=weakref.ref( self.coordinates)) self._ds = ufl.Measure('exterior_facet', subdomain_data=weakref.ref( self.coordinates)) self._dS = ufl.Measure('interior_facet', subdomain_data=weakref.ref( self.coordinates)) # Set the subdomain_data on all the default measures to this # coordinate field. # We don't set the domain on the measure since this causes # an uncollectable reference in the global space (dx is # global). Furthermore, it's never used anyway. for measure in [ufl.dx, ufl.ds, ufl.dS]: measure._subdomain_data = weakref.ref(self.coordinates)