def create_subdm(dm, fields, *args, **kwargs): """Callback to create a sub-DM describing the specified fields. :arg DM: The DM. :arg fields: The fields in the new sub-DM. .. note:: This should, but currently does not, transfer appropriately split application contexts onto the sub-DMs. """ W = get_function_space(dm) # TODO: Correct splitting of SNESContext for len(fields) > 1 case if len(fields) == 1: # Subspace is just a single FunctionSpace. idx = fields[0] subdm = W[idx].dm iset = W._ises[idx] return iset, subdm else: try: # Look up the subspace in the cache iset, subspace = W._subspaces[tuple(fields)] return iset, subspace.dm except KeyError: pass # Need to build an MFS for the subspace subspace = firedrake.MixedFunctionSpace([W[f] for f in fields]) # Index set mapping from W into subspace. iset = PETSc.IS().createGeneral(numpy.concatenate([W._ises[f].indices for f in fields]), comm=W.comm) # Keep hold of strong reference to created subspace (given we # only hold a weakref in the shell DM), and so we can # reuse it later. W._subspaces[tuple(fields)] = iset, subspace return iset, subspace.dm
def construct_kronecker_matrix(self, interp_1d): """ Construct the tensorized interpolation matrix. Do this by computing the kron product of the rows of the 1d univariate interpolation matrices. In the future, this may be done matrix-free. """ # this is one of the two bottlenecks that slow down initiating Bsplines IFW = PETSc.Mat().create(self.comm) IFW.setType(PETSc.Mat.Type.AIJ) comm = self.comm # owned part of global problem local_N = self.N // comm.size + int(comm.rank < (self.N % comm.size)) (lsize, gsize) = interp_1d[0].getSizes()[0] IFW.setSizes(((lsize, gsize), (local_N, self.N))) # guess sparsity pattern from interp_1d[0] for row in range(lsize): row = self.lg_map_fe.apply([row])[0] nnz_ = len(interp_1d[0].getRow(row)[0]) # lenght of nnz-array self.IFWnnz = max(self.IFWnnz, nnz_**self.dim) IFW.setPreallocationNNZ(self.IFWnnz) IFW.setUp() for row in range(lsize): row = self.lg_map_fe.apply([row])[0] M = [[A.getRow(row)[0], A.getRow(row)[1], A.getSize()[1]] for A in interp_1d] M = reduce(self.vectorkron, M) columns, values, lenght = M IFW.setValues([row], columns, values) IFW.assemble() return IFW
def create_subdm(dm, fields, *args, **kwargs): """Callback to create a sub-DM describing the specified fields. :arg DM: The DM. :arg fields: The fields in the new sub-DM. """ W = get_function_space(dm) ctx = get_appctx(dm) coarsen = get_ctx_coarsener(dm) if len(fields) == 1: # Subspace is just a single FunctionSpace. idx, = fields subdm = W[idx].dm iset = W._ises[idx] if ctx is not None: ctx, = ctx.split([(idx, )]) push_appctx(subdm, ctx) push_ctx_coarsener(subdm, coarsen) return iset, subdm else: # Need to build an MFS for the subspace subspace = firedrake.MixedFunctionSpace([W[f] for f in fields]) # Pass any transfer operators over prolong, restrict, inject = get_transfer_operators(dm) push_transfer_operators(subspace.dm, prolong, restrict, inject) # Index set mapping from W into subspace. iset = PETSc.IS().createGeneral(numpy.concatenate( [W._ises[f].indices for f in fields]), comm=W.comm) if ctx is not None: ctx, = ctx.split([fields]) push_appctx(subspace.dm, ctx) push_ctx_coarsener(subspace.dm, coarsen) return iset, subspace.dm
def create_interpolation(dmc, dmf): cctx = firedrake.dmhooks.get_appctx(dmc) fctx = firedrake.dmhooks.get_appctx(dmf) prolong, _, _ = firedrake.dmhooks.get_transfer_operators(dmc) _, restrict, _ = firedrake.dmhooks.get_transfer_operators(dmf) V_c = cctx._problem.u.function_space() V_f = fctx._problem.u.function_space() row_size = V_f.dof_dset.layout_vec.getSizes() col_size = V_c.dof_dset.layout_vec.getSizes() cfn = firedrake.Function(V_c) ffn = firedrake.Function(V_f) cbcs = cctx._problem.bcs fbcs = fctx._problem.bcs ctx = Interpolation(cfn, ffn, prolong, restrict, cbcs, fbcs) mat = PETSc.Mat().create(comm=dmc.comm) mat.setSizes((row_size, col_size)) mat.setType(mat.Type.PYTHON) mat.setPythonContext(ctx) mat.setUp() return mat, None
def initialize(self, obj): A, P = obj.getOperators() prefix = obj.getOptionsPrefix() V = get_function_space(obj.getDM()) mesh = V.mesh() family = str(V.ufl_element().family()) degree = V.ufl_element().degree() if family != 'Raviart-Thomas' or degree != 1: raise ValueError( "Hypre ADS requires lowest order RT elements! (not %s of degree %d)" % (family, degree)) P1 = FunctionSpace(mesh, "Lagrange", 1) NC1 = FunctionSpace(mesh, "N1curl", 1) # DiscreteGradient G = Interpolator(grad(TestFunction(P1)), NC1).callable().handle # DiscreteCurl C = Interpolator(curl(TestFunction(NC1)), V).callable().handle pc = PETSc.PC().create(comm=obj.comm) pc.incrementTabLevel(1, parent=obj) pc.setOptionsPrefix(prefix + "hypre_ads_") pc.setOperators(A, P) pc.setType('hypre') pc.setHYPREType('ads') pc.setHYPREDiscreteGradient(G) pc.setHYPREDiscreteCurl(C) V = VectorFunctionSpace(mesh, "Lagrange", 1) linear_coordinates = interpolate(SpatialCoordinate(mesh), V).dat.data_ro.copy() pc.setCoordinates(linear_coordinates) pc.setUp() self.pc = pc
def _build_monolithic_basis(self): r"""Build a basis for the complete mixed space. The monolithic basis is formed by the cartesian product of the bases forming each sub part. """ self._vecs = [] for idx, basis in enumerate(self): if isinstance(basis, VectorSpaceBasis): vecs = basis._vecs if basis._constant: vecs = vecs + (function.Function( self._function_space[idx]).assign(1), ) for vec in vecs: mvec = function.Function(self._function_space) mvec.sub(idx).assign(vec) self._vecs.append(mvec) self._petsc_vecs = [] for v in self._vecs: with v.dat.vec_ro as v_: self._petsc_vecs.append(v_) # orthonormalize: basis = self._petsc_vecs for i, vec in enumerate(basis): alphas = [] for vec_ in basis[:i]: alphas.append(vec.dot(vec_)) for alpha, vec_ in zip(alphas, basis[:i]): vec.axpy(-alpha, vec_) vec.normalize() self._nullspace = PETSc.NullSpace().create(constant=False, vectors=self._petsc_vecs, comm=self.comm)
def plot_matrix(a_form, bcs=[], **kwargs): """Provides a plot of a matrix.""" fig, ax = plt.subplots(1, 1) A = assemble(a_form, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle size = petsc_mat.getSize() Mij = PETSc.Mat() petsc_mat.convert("aij", Mij) n, m = size Mnp = np.array(Mij.getValues(range(n), range(m))) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, cmap=my_cmap, **kwargs) # Remove axis ticks and values ax.tick_params(length=0) ax.set_xticklabels([]) ax.set_yticklabels([]) return plot
def test_duplicate(a, bcs): test, trial = a.arguments() if test.function_space().shape == (): rhs_form = inner(Constant(1), test)*dx elif test.function_space().shape == (2, ): rhs_form = inner(Constant((1, 1)), test)*dx if bcs is not None: Af = assemble(a, mat_type="matfree", bcs=bcs) rhs = assemble(rhs_form, bcs=bcs) else: Af = assemble(a, mat_type="matfree") rhs = assemble(rhs_form) # matrix-free duplicate creates a matrix-free copy of Af # we have not implemented the default copy = False B_petsc = Af.petscmat.duplicate(copy=True) ksp = PETSc.KSP().create() ksp.setOperators(Af.petscmat) ksp.setFromOptions() solution1 = Function(test.function_space()) solution2 = Function(test.function_space()) # Solve system with original matrix A with rhs.dat.vec_ro as b, solution1.dat.vec as x: ksp.solve(b, x) # Multiply with copied matrix B with solution1.dat.vec_ro as x, solution2.dat.vec_ro as y: B_petsc.mult(x, y) # Check if original rhs is equal to BA^-1 (rhs) assert np.allclose(rhs.vector().array(), solution2.vector().array())
def initialize(self, pc): """Set up the problem context. Take the original mixed problem and reformulate the problem as a hybridized mixed system. A KSP is created for the Lagrange multiplier system. """ from firedrake import (FunctionSpace, Function, Constant, TrialFunction, TrialFunctions, TestFunction, DirichletBC, assemble) from firedrake.assemble import (allocate_matrix, create_assembly_callable) from firedrake.formmanipulation import split_form from ufl.algorithms.replace import replace # Extract the problem context prefix = pc.getOptionsPrefix() + "hybridization_" _, P = pc.getOperators() self.cxt = P.getPythonContext() if not isinstance(self.cxt, ImplicitMatrixContext): raise ValueError("The python context must be an ImplicitMatrixContext") test, trial = self.cxt.a.arguments() V = test.function_space() mesh = V.mesh() if len(V) != 2: raise ValueError("Expecting two function spaces.") if all(Vi.ufl_element().value_shape() for Vi in V): raise ValueError("Expecting an H(div) x L2 pair of spaces.") # Automagically determine which spaces are vector and scalar for i, Vi in enumerate(V): if Vi.ufl_element().sobolev_space().name == "HDiv": self.vidx = i else: assert Vi.ufl_element().sobolev_space().name == "L2" self.pidx = i # Create the space of approximate traces. W = V[self.vidx] if W.ufl_element().family() == "Brezzi-Douglas-Marini": tdegree = W.ufl_element().degree() else: try: # If we have a tensor product element h_deg, v_deg = W.ufl_element().degree() tdegree = (h_deg - 1, v_deg - 1) except TypeError: tdegree = W.ufl_element().degree() - 1 TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree) # Break the function spaces and define fully discontinuous spaces broken_elements = ufl.MixedElement([ufl.BrokenElement(Vi.ufl_element()) for Vi in V]) V_d = FunctionSpace(mesh, broken_elements) # Set up the functions for the original, hybridized # and schur complement systems self.broken_solution = Function(V_d) self.broken_residual = Function(V_d) self.trace_solution = Function(TraceSpace) self.unbroken_solution = Function(V) self.unbroken_residual = Function(V) # Set up the KSP for the hdiv residual projection hdiv_mass_ksp = PETSc.KSP().create(comm=pc.comm) hdiv_mass_ksp.setOptionsPrefix(prefix + "hdiv_residual_") # HDiv mass operator p = TrialFunction(V[self.vidx]) q = TestFunction(V[self.vidx]) mass = ufl.dot(p, q)*ufl.dx # TODO: Bcs? M = assemble(mass, bcs=None, form_compiler_parameters=self.cxt.fc_params) M.force_evaluation() Mmat = M.petscmat hdiv_mass_ksp.setOperators(Mmat) hdiv_mass_ksp.setUp() hdiv_mass_ksp.setFromOptions() self.hdiv_mass_ksp = hdiv_mass_ksp # Storing the result of A.inv * r, where A is the HDiv # mass matrix and r is the HDiv residual self._primal_r = Function(V[self.vidx]) tau = TestFunction(V_d[self.vidx]) self._assemble_broken_r = create_assembly_callable( ufl.dot(self._primal_r, tau)*ufl.dx, tensor=self.broken_residual.split()[self.vidx], form_compiler_parameters=self.cxt.fc_params) # Create the symbolic Schur-reduction: # Original mixed operator replaced with "broken" # arguments arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)} Atilde = Tensor(replace(self.cxt.a, arg_map)) gammar = TestFunction(TraceSpace) n = ufl.FacetNormal(mesh) sigma = TrialFunctions(V_d)[self.vidx] # We zero out the contribution of the trace variables on the exterior # boundary. Extruded cells will have both horizontal and vertical # facets if mesh.cell_set._extruded: trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), "on_boundary"), DirichletBC(TraceSpace, Constant(0.0), "bottom"), DirichletBC(TraceSpace, Constant(0.0), "top")] K = Tensor(gammar('+') * ufl.dot(sigma, n) * ufl.dS_h + gammar('+') * ufl.dot(sigma, n) * ufl.dS_v) else: trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), "on_boundary")] K = Tensor(gammar('+') * ufl.dot(sigma, n) * ufl.dS) # If boundary conditions are contained in the ImplicitMatrixContext: if self.cxt.row_bcs: raise NotImplementedError("Strong BCs not currently handled. Try imposing them weakly.") # Assemble the Schur complement operator and right-hand side self.schur_rhs = Function(TraceSpace) self._assemble_Srhs = create_assembly_callable( K * Atilde.inv * self.broken_residual, tensor=self.schur_rhs, form_compiler_parameters=self.cxt.fc_params) schur_comp = K * Atilde.inv * K.T self.S = allocate_matrix(schur_comp, bcs=trace_bcs, form_compiler_parameters=self.cxt.fc_params) self._assemble_S = create_assembly_callable(schur_comp, tensor=self.S, bcs=trace_bcs, form_compiler_parameters=self.cxt.fc_params) self._assemble_S() self.S.force_evaluation() Smat = self.S.petscmat # Nullspace for the multiplier problem nullspace = create_schur_nullspace(P, -K * Atilde, V, V_d, TraceSpace, pc.comm) if nullspace: Smat.setNullSpace(nullspace) # Set up the KSP for the system of Lagrange multipliers trace_ksp = PETSc.KSP().create(comm=pc.comm) trace_ksp.setOptionsPrefix(prefix) trace_ksp.setOperators(Smat) trace_ksp.setUp() trace_ksp.setFromOptions() self.trace_ksp = trace_ksp split_mixed_op = dict(split_form(Atilde.form)) split_trace_op = dict(split_form(K.form)) # Generate reconstruction calls self._reconstruction_calls(split_mixed_op, split_trace_op) # NOTE: The projection stage *might* be replaced by a Fortin # operator. We may want to allow the user to specify if they # wish to use a Fortin operator over a projection, or vice-versa. # In a future add-on, we can add a switch which chooses either # the Fortin reconstruction or the usual KSP projection. # Set up the projection KSP hdiv_projection_ksp = PETSc.KSP().create(comm=pc.comm) hdiv_projection_ksp.setOptionsPrefix(prefix + 'hdiv_projection_') # Reuse the mass operator from the hdiv_mass_ksp hdiv_projection_ksp.setOperators(Mmat) # Construct the RHS for the projection stage self._projection_rhs = Function(V[self.vidx]) self._assemble_projection_rhs = create_assembly_callable( ufl.dot(self.broken_solution.split()[self.vidx], q)*ufl.dx, tensor=self._projection_rhs, form_compiler_parameters=self.cxt.fc_params) # Finalize ksp setup hdiv_projection_ksp.setUp() hdiv_projection_ksp.setFromOptions() self.hdiv_projection_ksp = hdiv_projection_ksp
def nonlocal_integral_eq( mesh, scatterer_bdy_id, outer_bdy_id, wave_number, options_prefix=None, solver_parameters=None, fspace=None, vfspace=None, true_sol_grad_expr=None, actx=None, dgfspace=None, dgvfspace=None, meshmode_src_connection=None, qbx_kwargs=None, ): r""" see run_method for descriptions of unlisted args args: gamma and beta are used to precondition with the following equation: \Delta u - \kappa^2 \gamma u = 0 (\partial_n - i\kappa\beta) u |_\Sigma = 0 """ # make sure we get outer bdy id as tuple in case it consists of multiple ids if isinstance(outer_bdy_id, int): outer_bdy_id = [outer_bdy_id] outer_bdy_id = tuple(outer_bdy_id) # away from the excluded region, but firedrake and meshmode point # into pyt_inner_normal_sign = -1 ambient_dim = mesh.geometric_dimension() # {{{ Build src and tgt # build connection meshmode near src boundary -> src boundary inside meshmode from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from meshmode.discretization.connection import make_face_restriction factory = InterpolatoryQuadratureSimplexGroupFactory( dgfspace.finat_element.degree) src_bdy_connection = make_face_restriction(actx, meshmode_src_connection.discr, factory, scatterer_bdy_id) # source is a qbx layer potential from pytential.qbx import QBXLayerPotentialSource disable_refinement = (fspace.mesh().geometric_dimension() == 3) qbx = QBXLayerPotentialSource(src_bdy_connection.to_discr, **qbx_kwargs, _disable_refinement=disable_refinement) # get target indices and point-set target_indices, target = get_target_points_and_indices( fspace, outer_bdy_id) # }}} # build the operations from pytential import bind, sym r""" ..math: x \in \Sigma grad_op(x) = \nabla( \int_\Gamma( u(y) \partial_n H_0^{(1)}(\kappa |x - y|) )d\gamma(y) ) """ grad_op = pyt_inner_normal_sign * sym.grad( ambient_dim, sym.D(HelmholtzKernel(ambient_dim), sym.var("u"), k=sym.var("k"), qbx_forced_limit=None)) r""" ..math: x \in \Sigma op(x) = i \kappa \cdot \int_\Gamma( u(y) \partial_n H_0^{(1)}(\kappa |x - y|) )d\gamma(y) """ op = pyt_inner_normal_sign * 1j * sym.var("k") * (sym.D( HelmholtzKernel(ambient_dim), sym.var("u"), k=sym.var("k"), qbx_forced_limit=None)) # bind the operations pyt_grad_op = bind((qbx, target), grad_op) pyt_op = bind((qbx, target), op) # }}} class MatrixFreeB(object): def __init__(self, A, pyt_grad_op, pyt_op, actx, kappa): """ :arg kappa: The wave number """ self.actx = actx self.k = kappa self.pyt_op = pyt_op self.pyt_grad_op = pyt_grad_op self.A = A self.meshmode_src_connection = meshmode_src_connection # {{{ Create some functions needed for multing self.x_fntn = Function(fspace) # CG self.potential_int = Function(fspace) self.potential_int.dat.data[:] = 0.0 self.grad_potential_int = Function(vfspace) self.grad_potential_int.dat.data[:] = 0.0 self.pyt_result = Function(fspace) self.n = FacetNormal(mesh) self.v = TestFunction(fspace) # some meshmode ones self.x_mm_fntn = self.meshmode_src_connection.discr.empty( self.actx, dtype='c') # }}} def mult(self, mat, x, y): # Copy function data into the fivredrake function self.x_fntn.dat.data[:] = x[:] # Transfer the function to meshmode self.meshmode_src_connection.from_firedrake(project( self.x_fntn, dgfspace), out=self.x_mm_fntn) # Restrict to boundary x_mm_fntn_on_bdy = src_bdy_connection(self.x_mm_fntn) # Apply the operation potential_int_mm = self.pyt_op(self.actx, u=x_mm_fntn_on_bdy, k=self.k) grad_potential_int_mm = self.pyt_grad_op(self.actx, u=x_mm_fntn_on_bdy, k=self.k) # Store in firedrake self.potential_int.dat.data[target_indices] = potential_int_mm.get( ) for dim in range(grad_potential_int_mm.shape[0]): self.grad_potential_int.dat.data[ target_indices, dim] = grad_potential_int_mm[dim].get() # Integrate the potential r""" Compute the inner products using firedrake. Note this will be subtracted later, hence appears off by a sign. .. math:: \langle n(x) \cdot \nabla( \int_\Gamma( u(y) \partial_n H_0^{(1)}(\kappa |x - y|) )d\gamma(y) ), v \rangle_\Sigma - \langle i \kappa \cdot \int_\Gamma( u(y) \partial_n H_0^{(1)}(\kappa |x - y|) )d\gamma(y), v \rangle_\Sigma """ self.pyt_result = assemble( inner(inner(self.grad_potential_int, self.n), self.v) * ds(outer_bdy_id) - inner(self.potential_int, self.v) * ds(outer_bdy_id)) # y <- Ax - evaluated potential self.A.mult(x, y) with self.pyt_result.dat.vec_ro as ep: y.axpy(-1, ep) # {{{ Compute normal helmholtz operator u = TrialFunction(fspace) v = TestFunction(fspace) r""" .. math:: \langle \nabla u, \nabla v \rangle - \kappa^2 \cdot \langle u, v \rangle - i \kappa \langle u, v \rangle_\Sigma """ a = inner(grad(u), grad(v)) * dx \ - Constant(wave_number**2) * inner(u, v) * dx \ - Constant(1j * wave_number) * inner(u, v) * ds(outer_bdy_id) # get the concrete matrix from a general bilinear form A = assemble(a).M.handle # }}} # {{{ Setup Python matrix B = PETSc.Mat().create() # build matrix context Bctx = MatrixFreeB(A, pyt_grad_op, pyt_op, actx, wave_number) # set up B as same size as A B.setSizes(*A.getSizes()) B.setType(B.Type.PYTHON) B.setPythonContext(Bctx) B.setUp() # }}} # {{{ Create rhs # Remember f is \partial_n(true_sol)|_\Gamma # so we just need to compute \int_\Gamma\partial_n(true_sol) H(x-y) sigma = sym.make_sym_vector("sigma", ambient_dim) r""" ..math: x \in \Sigma grad_op(x) = \nabla( \int_\Gamma( f(y) H_0^{(1)}(\kappa |x - y|) )d\gamma(y) ) """ grad_op = pyt_inner_normal_sign * \ sym.grad(ambient_dim, sym.S(HelmholtzKernel(ambient_dim), sym.n_dot(sigma), k=sym.var("k"), qbx_forced_limit=None)) r""" ..math: x \in \Sigma op(x) = i \kappa \cdot \int_\Gamma( f(y) H_0^{(1)}(\kappa |x - y|) )d\gamma(y) ) """ op = 1j * sym.var("k") * pyt_inner_normal_sign * \ sym.S(HelmholtzKernel(ambient_dim), sym.n_dot(sigma), k=sym.var("k"), qbx_forced_limit=None) rhs_grad_op = bind((qbx, target), grad_op) rhs_op = bind((qbx, target), op) # Transfer to meshmode metadata = {'quadrature_degree': 2 * fspace.ufl_element().degree()} dg_true_sol_grad = project(true_sol_grad_expr, dgvfspace, form_compiler_parameters=metadata) true_sol_grad_mm = meshmode_src_connection.from_firedrake(dg_true_sol_grad, actx=actx) true_sol_grad_mm = src_bdy_connection(true_sol_grad_mm) # Apply the operations f_grad_convoluted_mm = rhs_grad_op(actx, sigma=true_sol_grad_mm, k=wave_number) f_convoluted_mm = rhs_op(actx, sigma=true_sol_grad_mm, k=wave_number) # Transfer function back to firedrake f_grad_convoluted = Function(vfspace) f_convoluted = Function(fspace) f_grad_convoluted.dat.data[:] = 0.0 f_convoluted.dat.data[:] = 0.0 for dim in range(f_grad_convoluted_mm.shape[0]): f_grad_convoluted.dat.data[target_indices, dim] = f_grad_convoluted_mm[dim].get() f_convoluted.dat.data[target_indices] = f_convoluted_mm.get() r""" \langle f, v \rangle_\Gamma + \langle i \kappa \cdot \int_\Gamma( f(y) H_0^{(1)}(\kappa |x - y|) )d\gamma(y), v \rangle_\Sigma - \langle n(x) \cdot \nabla( \int_\Gamma( f(y) H_0^{(1)}(\kappa |x - y|) )d\gamma(y) ), v \rangle_\Sigma """ rhs_form = inner(inner(true_sol_grad_expr, FacetNormal(mesh)), v) * ds(scatterer_bdy_id, metadata=metadata) \ + inner(f_convoluted, v) * ds(outer_bdy_id) \ - inner(inner(f_grad_convoluted, FacetNormal(mesh)), v) * ds(outer_bdy_id) rhs = assemble(rhs_form) # {{{ set up a solver: solution = Function(fspace, name="Computed Solution") # {{{ Used for preconditioning if 'gamma' in solver_parameters or 'beta' in solver_parameters: gamma = complex(solver_parameters.pop('gamma', 1.0)) import cmath beta = complex(solver_parameters.pop('beta', cmath.sqrt(gamma))) p = inner(grad(u), grad(v)) * dx \ - Constant(wave_number**2 * gamma) * inner(u, v) * dx \ - Constant(1j * wave_number * beta) * inner(u, v) * ds(outer_bdy_id) P = assemble(p).M.handle else: P = A # }}} # Set up options to contain solver parameters: ksp = PETSc.KSP().create() if solver_parameters['pc_type'] == 'pyamg': del solver_parameters['pc_type'] # We are using the AMG preconditioner pyamg_tol = solver_parameters.get('pyamg_tol', None) if pyamg_tol is not None: pyamg_tol = float(pyamg_tol) pyamg_maxiter = solver_parameters.get('pyamg_maxiter', None) if pyamg_maxiter is not None: pyamg_maxiter = int(pyamg_maxiter) ksp.setOperators(B) ksp.setUp() pc = ksp.pc pc.setType(pc.Type.PYTHON) pc.setPythonContext( AMGTransmissionPreconditioner(wave_number, fspace, A, tol=pyamg_tol, maxiter=pyamg_maxiter, use_plane_waves=True)) # Otherwise use regular preconditioner else: ksp.setOperators(B, P) options_manager = OptionsManager(solver_parameters, options_prefix) options_manager.set_from_options(ksp) import petsc4py.PETSc petsc4py.PETSc.Sys.popErrorHandler() with rhs.dat.vec_ro as b: with solution.dat.vec as x: ksp.solve(b, x) # }}} return ksp, solution
def get_boundary_masks(mesh, key, finat_element): """Get masks for facet dofs. :arg mesh: The mesh to use. :arg key: Canonicalised entity_dofs (see :func:`entity_dofs_key`). :arg finat_element: The FInAT element. :returns: A dict mapping ``"topological"`` and ``"geometric"`` keys to boundary nodes or ``None``. If not None, the entry in the mask dict 3-tuple of a Section, an array of indices, and an array indicating which points in the Section correspond to the facets of the cell. If section.getDof(p) is non-zero, then there are ndof basis functions topologically associated with points in the closure of point p (for "topological", ndof basis functions with non-zero support on points in the closure of p for "geometric"). The basis function indices are in the index array, starting at section.getOffset(p). """ if not mesh.cell_set._extruded: return None masks = {} _, kind = key assert kind in {"cell", "interior_facet"} dim = finat_element.cell.get_spatial_dimension() ecd = finat_element.entity_closure_dofs() try: esd = finat_element.entity_support_dofs() except NotImplementedError: # 4-D cells esd = None # Number of entities on cell excepting the cell itself. chart = sum(map(len, ecd.values())) - 1 closure_section = PETSc.Section().create(comm=PETSc.COMM_SELF) support_section = PETSc.Section().create(comm=PETSc.COMM_SELF) # Double up for interior facets. if kind == "cell": ncell = 1 else: ncell = 2 closure_section.setChart(0, ncell * chart) support_section.setChart(0, ncell * chart) closure_indices = [] support_indices = [] facet_points = [] p = 0 offset = finat_element.space_dimension() for cell in range(ncell): for ent in sorted(ecd.keys()): # Never need closure of cell if sum(ent) == dim: continue for key in sorted(ecd[ent].keys()): closure_section.setDof(p, len(ecd[ent][key])) vals = numpy.asarray(sorted(ecd[ent][key]), dtype=IntType) closure_indices.extend(vals + cell * offset) if esd is not None: support_section.setDof(p, ncell * len(esd[ent][key])) vals = numpy.asarray(sorted(esd[ent][key]), dtype=IntType) support_indices.extend(vals + cell * offset) if sum(ent) == dim - 1: facet_points.append(p) p += 1 closure_section.setUp() support_section.setUp() closure_indices = numpy.asarray(closure_indices, dtype=IntType) support_indices = numpy.asarray(support_indices, dtype=IntType) facet_points = numpy.asarray(facet_points, dtype=IntType) masks["topological"] = (closure_section, closure_indices, facet_points) masks["geometric"] = (support_section, support_indices, facet_points) return masks
def __init__(self, problem, **kwargs): r""" :arg problem: A :class:`NonlinearVariationalProblem` to solve. :kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or :class:`.MixedVectorSpaceBasis`) spanning the null space of the operator. :kwarg transpose_nullspace: as for the nullspace, but used to make the right hand side consistent. :kwarg near_nullspace: as for the nullspace, but used to specify the near nullspace (for multigrid solvers). :kwarg solver_parameters: Solver parameters to pass to PETSc. This should be a dict mapping PETSc options to values. :kwarg appctx: A dictionary containing application context that is passed to the preconditioner if matrix-free. :kwarg options_prefix: an optional prefix used to distinguish PETSc options. If not provided a unique prefix will be created. Use this option if you want to pass options to the solver from the command line in addition to through the ``solver_parameters`` dict. :kwarg pre_jacobian_callback: A user-defined function that will be called immediately before Jacobian assembly. This can be used, for example, to update a coefficient function that has a complicated dependence on the unknown solution. :kwarg pre_function_callback: As above, but called immediately before residual assembly Example usage of the ``solver_parameters`` option: to set the nonlinear solver type to just use a linear solver, use .. code-block:: python {'snes_type': 'ksponly'} PETSc flag options (where the presence of the option means something) should be specified with ``None``. For example: .. code-block:: python {'snes_monitor': None} To use the ``pre_jacobian_callback`` or ``pre_function_callback`` functionality, the user-defined function must accept the current solution as a petsc4py Vec. Example usage is given below: .. code-block:: python def update_diffusivity(current_solution): with cursol.dat.vec_wo as v: current_solution.copy(v) solve(trial*test*dx == dot(grad(cursol), grad(test))*dx, diffusivity) solver = NonlinearVariationalSolver(problem, pre_jacobian_callback=update_diffusivity) """ assert isinstance(problem, NonlinearVariationalProblem) parameters = kwargs.get("solver_parameters") if "parameters" in kwargs: raise TypeError("Use solver_parameters, not parameters") nullspace = kwargs.get("nullspace") nullspace_T = kwargs.get("transpose_nullspace") near_nullspace = kwargs.get("near_nullspace") options_prefix = kwargs.get("options_prefix") pre_j_callback = kwargs.get("pre_jacobian_callback") pre_f_callback = kwargs.get("pre_function_callback") super(NonlinearVariationalSolver, self).__init__(parameters, options_prefix) # Allow anything, interpret "matfree" as matrix_free. mat_type = self.parameters.get("mat_type") pmat_type = self.parameters.get("pmat_type") matfree = mat_type == "matfree" pmatfree = pmat_type == "matfree" appctx = kwargs.get("appctx") ctx = solving_utils._SNESContext(problem, mat_type=mat_type, pmat_type=pmat_type, appctx=appctx, pre_jacobian_callback=pre_j_callback, pre_function_callback=pre_f_callback, options_prefix=self.options_prefix) # No preconditioner by default for matrix-free if (problem.Jp is not None and pmatfree) or matfree: self.set_default_parameter("pc_type", "none") elif ctx.is_mixed: # Mixed problem, use jacobi pc if user has not supplied # one. self.set_default_parameter("pc_type", "jacobi") self.snes = PETSc.SNES().create(comm=problem.dm.comm) self._problem = problem self._ctx = ctx self._work = problem.u.dof_dset.layout_vec.duplicate() self.snes.setDM(problem.dm) ctx.set_function(self.snes) ctx.set_jacobian(self.snes) ctx.set_nullspace(nullspace, problem.J.arguments()[0].function_space()._ises, transpose=False, near=False) ctx.set_nullspace(nullspace_T, problem.J.arguments()[1].function_space()._ises, transpose=True, near=False) ctx.set_nullspace(near_nullspace, problem.J.arguments()[0].function_space()._ises, transpose=False, near=True) ctx._nullspace = nullspace ctx._nullspace_T = nullspace_T ctx._near_nullspace = near_nullspace # Set from options now, so that people who want to noodle with # the snes object directly (mostly Patrick), can. We need the # DM with an app context in place so that if the DM is active # on a subKSP the context is available. dm = self.snes.getDM() with dmhooks.appctx(dm, self._ctx): self.set_from_options(self.snes) # Used for custom grid transfer. self._transfer_operators = () self._setup = False
def initialize(self, pc): _, P = pc.getOperators() assert P.type == "python" context = P.getPythonContext() (self.J, self.bcs) = (context.a, context.row_bcs) test, trial = self.J.arguments() if test.function_space() != trial.function_space(): raise NotImplementedError("test and trial spaces must be the same") Pk = test.function_space() element = Pk.ufl_element() shape = element.value_shape() mesh = Pk.ufl_domain() if len(shape) == 0: P1 = firedrake.FunctionSpace(mesh, "CG", 1) elif len(shape) == 2: P1 = firedrake.VectorFunctionSpace(mesh, "CG", 1, dim=shape[0]) else: P1 = firedrake.TensorFunctionSpace(mesh, "CG", 1, shape=shape, symmetry=element.symmetry()) # TODO: A smarter low-order operator would also interpolate # any coefficients to the coarse space. mapper = ArgumentReplacer({ test: firedrake.TestFunction(P1), trial: firedrake.TrialFunction(P1) }) self.lo_J = map_integrands.map_integrand_dags(mapper, self.J) lo_bcs = [] for bc in self.bcs: # Don't actually need the value, since it's only used for # killing parts of the restriction matrix. lo_bcs.append( firedrake.DirichletBC(P1, firedrake.zero(P1.shape), bc.sub_domain, method=bc.method)) self.lo_bcs = tuple(lo_bcs) mat_type = PETSc.Options().getString( pc.getOptionsPrefix() + "lo_mat_type", firedrake.parameters["default_matrix_type"]) self.lo_op = firedrake.assemble(self.lo_J, bcs=self.lo_bcs, mat_type=mat_type) self.lo_op.force_evaluation() A, P = pc.getOperators() nearnullsp = P.getNearNullSpace() if nearnullsp.handle != 0: # Actually have a near nullspace tmp = firedrake.Function(Pk) low = firedrake.Function(P1) vecs = [] for vec in nearnullsp.getVecs(): with tmp.dat.vec as v: vec.copy(v) low.interpolate(tmp) with low.dat.vec_ro as v: vecs.append(v.copy()) nullsp = PETSc.NullSpace().create(vectors=vecs, comm=pc.comm) self.lo_op.petscmat.setNearNullSpace(nullsp) lo = PETSc.PC().create(comm=pc.comm) lo.incrementTabLevel(1, parent=pc) lo.setOperators(self.lo_op.petscmat, self.lo_op.petscmat) lo.setOptionsPrefix(pc.getOptionsPrefix() + "lo_") lo.setFromOptions() self.lo = lo self.restriction = restriction_matrix(Pk, P1, self.bcs, self.lo_bcs) self.work = self.lo_op.petscmat.createVecs() if len(self.bcs) > 0: bc_nodes = numpy.unique( numpy.concatenate([bc.nodes for bc in self.bcs])) bc_nodes = bc_nodes[bc_nodes < Pk.dof_dset.size] bc_iset = PETSc.IS().createBlock(numpy.prod(shape), bc_nodes, comm=PETSc.COMM_SELF) self.bc_indices = bc_iset.getIndices() bc_iset.destroy() else: self.bc_indices = numpy.empty(0, dtype=numpy.int32)
def createSubMatrix(self, mat, row_is, col_is, target=None): if target is not None: # Repeat call, just return the matrix, since we don't # actually assemble in here. target.assemble() return target # These are the sets of ISes of which the the row and column # space consist. row_ises = self._y.function_space().dof_dset.field_ises col_ises = self._x.function_space().dof_dset.field_ises row_inds = find_sub_block(row_is, row_ises) if row_is == col_is and row_ises == col_ises: col_inds = row_inds else: col_inds = find_sub_block(col_is, col_ises) splitter = ExtractSubBlock() asub = splitter.split(self.a, argument_indices=(row_inds, col_inds)) Wrow = asub.arguments()[0].function_space() Wcol = asub.arguments()[1].function_space() row_bcs = [] col_bcs = [] for bc in self.bcs: if isinstance(bc, DirichletBC): bc_temp = bc.reconstruct(field=row_inds, V=Wrow, g=bc.function_arg, sub_domain=bc.sub_domain, use_split=True) elif isinstance(bc, EquationBCSplit): bc_temp = bc.reconstruct(field=row_inds, V=Wrow, row_field=row_inds, col_field=col_inds, use_split=True) if bc_temp is not None: row_bcs.append(bc_temp) if Wrow == Wcol and row_inds == col_inds and self.bcs == self.bcs_col: col_bcs = row_bcs else: for bc in self.bcs_col: if isinstance(bc, DirichletBC): bc_temp = bc.reconstruct(field=col_inds, V=Wcol, g=bc.function_arg, sub_domain=bc.sub_domain, use_split=True) elif isinstance(bc, EquationBCSplit): bc_temp = bc.reconstruct(field=col_inds, V=Wcol, row_field=row_inds, col_field=col_inds, use_split=True) if bc_temp is not None: col_bcs.append(bc_temp) submat_ctx = ImplicitMatrixContext(asub, row_bcs=row_bcs, col_bcs=col_bcs, fc_params=self.fc_params, appctx=self.appctx) submat_ctx.on_diag = self.on_diag and row_inds == col_inds submat = PETSc.Mat().create(comm=mat.comm) submat.setType("python") submat.setSizes((submat_ctx.row_sizes, submat_ctx.col_sizes), bsize=submat_ctx.block_size) submat.setPythonContext(submat_ctx) submat.setUp() return submat
# Define Weak form a = ( (Ub*phi - 1./(Ro*k2)*psi + 1./Ro*Hb*vphi)*u )*dx \ + ( ((dUb - psi/Ro)*phi + Ub*psi - 1./Ro*Hb*vphi.dx(0))*v )*dx \ + ( 1./Ro*phi*eta - 1./(Ro*k2)*psi*eta.dx(0) + vphi*Ub )*dx m = (phi * u + psi * v + vphi * eta) * dx # Build Petsc operators petsc_a = assemble(a, mat_type='aij', bcs=bc).M.handle petsc_m = assemble(m, mat_type='aij', bcs=bc).M.handle # Define Petsc options opts = PETSc.Options() opts.setValue("eps_gen_non_hermitian", None) opts.setValue("st_pc_factor_shift_type", "NONZERO") #opts.setValue("eps_type", "lapack") opts.setValue("eps_type", "krylovschur") opts.setValue("eps_largest_imaginary", None) opts.setValue("eps_tol", 1e-10) # Define Solver options es = SLEPc.EPS().create(comm=COMM_WORLD) es.setDimensions(num_eigenvalues) es.setOperators(petsc_a, petsc_m) es.setFromOptions() es.solve()
def assemble_mixed_mass_matrix(V_A, V_B): """ Construct the mixed mass matrix of two function spaces, using the TrialFunction from V_A and the TestFunction from V_B. """ if len(V_A) > 1 or len(V_B) > 1: raise NotImplementedError( "Sorry, only implemented for non-mixed spaces") if V_A.ufl_element().mapping() != "identity" or V_B.ufl_element().mapping( ) != "identity": msg = """ Sorry, only implemented for affine maps for now. To do non-affine, we'd need to import much more of the assembly engine of UFL/TSFC/etc to do the assembly on each supermesh cell. """ raise NotImplementedError(msg) mesh_A = V_A.mesh() mesh_B = V_B.mesh() dim = mesh_A.geometric_dimension() assert dim == mesh_B.geometric_dimension() assert dim == mesh_A.topological_dimension() assert dim == mesh_B.topological_dimension() (mh_A, level_A) = get_level(mesh_A) (mh_B, level_B) = get_level(mesh_B) if mesh_A is mesh_B: def likely(cell_A): return [cell_A] else: if (mh_A is None or mh_B is None) or (mh_A is not mh_B): # No mesh hierarchy structure, call libsupermesh for # intersection finding intersections = intersection_finder(mesh_A, mesh_B) likely = intersections.__getitem__ else: # We do have a mesh hierarchy, use it if abs(level_A - level_B) > 1: raise NotImplementedError( "Only works for transferring between adjacent levels for now." ) # What are the cells of B that (probably) intersect with a given cell in A? if level_A > level_B: cell_map = mh_A.fine_to_coarse_cells[level_A] def likely(cell_A): return cell_map[cell_A] elif level_A < level_B: cell_map = mh_A.coarse_to_fine_cells[level_A] def likely(cell_A): return cell_map[cell_A] assert V_A.value_size == V_B.value_size orig_value_size = V_A.value_size if V_A.value_size > 1: V_A = firedrake.FunctionSpace(mesh_A, V_A.ufl_element().sub_elements()[0]) if V_B.value_size > 1: V_B = firedrake.FunctionSpace(mesh_B, V_B.ufl_element().sub_elements()[0]) assert V_A.value_size == 1 assert V_B.value_size == 1 preallocator = PETSc.Mat().create(comm=mesh_A.comm) preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) rset = V_B.dof_dset cset = V_A.dof_dset nrows = rset.layout_vec.getSizes() ncols = cset.layout_vec.getSizes() preallocator.setLGMap(rmap=rset.scalar_lgmap, cmap=cset.scalar_lgmap) preallocator.setSizes(size=(nrows, ncols), bsize=1) preallocator.setUp() zeros = numpy.zeros((V_B.cell_node_map().arity, V_A.cell_node_map().arity), dtype=ScalarType) for cell_A, dofs_A in enumerate(V_A.cell_node_map().values): for cell_B in likely(cell_A): dofs_B = V_B.cell_node_map().values_with_halo[cell_B, :] preallocator.setValuesLocal(dofs_B, dofs_A, zeros) preallocator.assemble() dnnz, onnz = get_preallocation(preallocator, nrows[0]) # Unroll from block to AIJ dnnz = dnnz * cset.cdim dnnz = numpy.repeat(dnnz, rset.cdim) onnz = onnz * cset.cdim onnz = numpy.repeat(onnz, cset.cdim) preallocator.destroy() assert V_A.value_size == V_B.value_size rdim = V_B.dof_dset.cdim cdim = V_A.dof_dset.cdim # # Preallocate M_AB. # mat = PETSc.Mat().create(comm=mesh_A.comm) mat.setType(PETSc.Mat.Type.AIJ) rsizes = tuple(n * rdim for n in nrows) csizes = tuple(c * cdim for c in ncols) mat.setSizes(size=(rsizes, csizes), bsize=(rdim, cdim)) mat.setPreallocationNNZ((dnnz, onnz)) mat.setLGMap(rmap=rset.lgmap, cmap=cset.lgmap) # TODO: Boundary conditions not handled. mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False) mat.setOption(mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) mat.setOption(mat.Option.KEEP_NONZERO_PATTERN, True) mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, False) mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, True) mat.setUp() evaluate_kernel_A = compile_element(ufl.Coefficient(V_A), name="evaluate_kernel_A") evaluate_kernel_B = compile_element(ufl.Coefficient(V_B), name="evaluate_kernel_B") # We only need one of these since we assume that the two meshes both have CG1 coordinates to_reference_kernel = to_reference_coordinates( mesh_A.coordinates.ufl_element()) if dim == 2: reference_mesh = UnitTriangleMesh(comm=COMM_SELF) else: reference_mesh = UnitTetrahedronMesh(comm=COMM_SELF) evaluate_kernel_S = compile_element(ufl.Coefficient( reference_mesh.coordinates.function_space()), name="evaluate_kernel_S") V_S_A = FunctionSpace(reference_mesh, V_A.ufl_element()) V_S_B = FunctionSpace(reference_mesh, V_B.ufl_element()) M_SS = assemble(inner(TrialFunction(V_S_A), TestFunction(V_S_B)) * dx) M_SS = M_SS.M.handle[:, :] node_locations_A = utils.physical_node_locations( V_S_A).dat.data_ro_with_halos node_locations_B = utils.physical_node_locations( V_S_B).dat.data_ro_with_halos num_nodes_A = node_locations_A.shape[0] num_nodes_B = node_locations_B.shape[0] to_reference_kernel = to_reference_coordinates( mesh_A.coordinates.ufl_element()) supermesh_kernel_str = """ #include "libsupermesh-c.h" #include <petsc.h> %(to_reference)s %(evaluate_S)s %(evaluate_A)s %(evaluate_B)s #define complex_mode %(complex_mode)s #define PrintInfo(...) do { if (PetscLogPrintInfo) printf(__VA_ARGS__); } while (0) static void print_array(PetscScalar *arr, int d) { for(int j=0; j<d; j++) PrintInfo(stderr, "%%+.2f ", arr[j]); } static void print_coordinates(PetscScalar *simplex, int d) { for(int i=0; i<d+1; i++) { PrintInfo("\t"); print_array(&simplex[d*i], d); PrintInfo("\\n"); } } #if complex_mode static void seperate_real_and_imag(PetscScalar *simplex, double *real_simplex, double *imag_simplex, int d) { for(int i=0; i<d+1; i++) { for(int j=0; j<d; j++) { real_simplex[d*i+j] = creal(simplex[d*i+j]); imag_simplex[d*i+j] = cimag(simplex[d*i+j]); } } } static void merge_back_to_simplex(PetscScalar* simplex, double* real_simplex, double* imag_simplex, int d) { print_coordinates(simplex,d); for(int i=0; i<d+1; i++) { for(int j=0; j<d; j++) { simplex[d*i+j] = real_simplex[d*i+j]+imag_simplex[d*i+j]*_Complex_I; } } } #endif int supermesh_kernel(PetscScalar* simplex_A, PetscScalar* simplex_B, PetscScalar* simplices_C, PetscScalar* nodes_A, PetscScalar* nodes_B, PetscScalar* M_SS, PetscScalar* outptr, int num_ele) { #define d %(dim)s #define num_nodes_A %(num_nodes_A)s #define num_nodes_B %(num_nodes_B)s double simplex_ref_measure; PrintInfo("simplex_A coordinates\\n"); print_coordinates(simplex_A, d); PrintInfo("simplex_B coordinates\\n"); print_coordinates(simplex_B, d); int num_elements = num_ele; if (d == 2) simplex_ref_measure = 0.5; else if (d == 3) simplex_ref_measure = 1.0/6; PetscScalar R_AS[num_nodes_A][num_nodes_A]; PetscScalar R_BS[num_nodes_B][num_nodes_B]; PetscScalar coeffs_A[%(num_nodes_A)s] = {0.}; PetscScalar coeffs_B[%(num_nodes_B)s] = {0.}; PetscScalar reference_nodes_A[num_nodes_A][d]; PetscScalar reference_nodes_B[num_nodes_B][d]; #if complex_mode double real_simplex_A[d*(d+1)]; double imag_simplex_A[d*(d+1)]; seperate_real_and_imag(simplex_A, real_simplex_A, imag_simplex_A, d); double real_simplex_B[d*(d+1)]; double imag_simplex_B[d*(d+1)]; seperate_real_and_imag(simplex_B, real_simplex_B, imag_simplex_B, d); double real_simplices_C[num_elements*d*(d+1)]; double imag_simplices_C[num_elements*d*(d+1)]; for (int ii=0; ii<num_elements*d*(d+1); ++ii) imag_simplices_C[ii] = 0.; %(libsupermesh_intersect_simplices)s(real_simplex_A, real_simplex_B, real_simplices_C, &num_elements); merge_back_to_simplex(simplex_A, real_simplex_A, imag_simplex_A, d); merge_back_to_simplex(simplex_B, real_simplex_B, imag_simplex_B, d); for(int s=0; s<num_elements; s++) { PetscScalar* simplex_C = &simplices_C[s * d * (d+1)]; double* real_simplex_C = &real_simplices_C[s * d * (d+1)]; double* imag_simplex_C = &imag_simplices_C[s * d * (d+1)]; merge_back_to_simplex(simplex_C, real_simplex_C, imag_simplex_C, d); } #else %(libsupermesh_intersect_simplices)s(simplex_A, simplex_B, simplices_C, &num_elements); #endif PrintInfo("Supermesh consists of %%i elements\\n", num_elements); // would like to do this //PetscScalar MAB[%(num_nodes_A)s][%(num_nodes_B)s] = (PetscScalar (*)[%(num_nodes_B)s])outptr; // but have to do this instead because we don't grok C PetscScalar (*MAB)[num_nodes_A] = (PetscScalar (*)[num_nodes_A])outptr; PetscScalar (*MSS)[num_nodes_A] = (PetscScalar (*)[num_nodes_A])M_SS; // note the underscore for ( int i = 0; i < num_nodes_B; i++ ) { for (int j = 0; j < num_nodes_A; j++) { MAB[i][j] = 0.0; } } for(int s=0; s<num_elements; s++) { PetscScalar* simplex_S = &simplices_C[s * d * (d+1)]; double simplex_S_measure; #if complex_mode double real_simplex_S[d*(d+1)]; double imag_simplex_S[d*(d+1)]; seperate_real_and_imag(simplex_S, real_simplex_S, imag_simplex_S, d); %(libsupermesh_simplex_measure)s(real_simplex_S, &simplex_S_measure); merge_back_to_simplex(simplex_S, real_simplex_S, imag_simplex_S, d); #else %(libsupermesh_simplex_measure)s(simplex_S, &simplex_S_measure); #endif PrintInfo("simplex_S coordinates with measure %%f\\n", simplex_S_measure); print_coordinates(simplex_S, d); PrintInfo("Start mapping nodes for V_A\\n"); PetscScalar physical_nodes_A[num_nodes_A][d]; for(int n=0; n < num_nodes_A; n++) { PetscScalar* reference_node_location = &nodes_A[n*d]; PetscScalar* physical_node_location = physical_nodes_A[n]; for (int j=0; j < d; j++) physical_node_location[j] = 0.0; pyop2_kernel_evaluate_kernel_S(physical_node_location, simplex_S, reference_node_location); PrintInfo("\\tNode "); print_array(reference_node_location, d); PrintInfo(" mapped to "); print_array(physical_node_location, d); PrintInfo("\\n"); } PrintInfo("Start mapping nodes for V_B\\n"); PetscScalar physical_nodes_B[num_nodes_B][d]; for(int n=0; n < num_nodes_B; n++) { PetscScalar* reference_node_location = &nodes_B[n*d]; PetscScalar* physical_node_location = physical_nodes_B[n]; for (int j=0; j < d; j++) physical_node_location[j] = 0.0; pyop2_kernel_evaluate_kernel_S(physical_node_location, simplex_S, reference_node_location); PrintInfo("\\tNode "); print_array(reference_node_location, d); PrintInfo(" mapped to "); print_array(physical_node_location, d); PrintInfo("\\n"); } PrintInfo("==========================================================\\n"); PrintInfo("Start pulling back dof from S into reference space for A.\\n"); for(int n=0; n < num_nodes_A; n++) { for(int i=0; i<d; i++) reference_nodes_A[n][i] = 0.; to_reference_coords_kernel(reference_nodes_A[n], physical_nodes_A[n], simplex_A); PrintInfo("Pulling back "); print_array(physical_nodes_A[n], d); PrintInfo(" to "); print_array(reference_nodes_A[n], d); PrintInfo("\\n"); } PrintInfo("Start pulling back dof from S into reference space for B.\\n"); for(int n=0; n < num_nodes_B; n++) { for(int i=0; i<d; i++) reference_nodes_B[n][i] = 0.; to_reference_coords_kernel(reference_nodes_B[n], physical_nodes_B[n], simplex_B); PrintInfo("Pulling back "); print_array(physical_nodes_B[n], d); PrintInfo(" to "); print_array(reference_nodes_B[n], d); PrintInfo("\\n"); } PrintInfo("Start evaluating basis functions of V_A at dofs for V_A on S\\n"); for(int i=0; i<num_nodes_A; i++) { coeffs_A[i] = 1.; for(int j=0; j<num_nodes_A; j++) { R_AS[i][j] = 0.; pyop2_kernel_evaluate_kernel_A(&R_AS[i][j], coeffs_A, reference_nodes_A[j]); } print_array(R_AS[i], num_nodes_A); PrintInfo("\\n"); coeffs_A[i] = 0.; } PrintInfo("Start evaluating basis functions of V_B at dofs for V_B on S\\n"); for(int i=0; i<num_nodes_B; i++) { coeffs_B[i] = 1.; for(int j=0; j<num_nodes_B; j++) { R_BS[i][j] = 0.; pyop2_kernel_evaluate_kernel_B(&R_BS[i][j], coeffs_B, reference_nodes_B[j]); } print_array(R_BS[i], num_nodes_B); PrintInfo("\\n"); coeffs_B[i] = 0.; } PrintInfo("Start doing the matmatmat mult\\n"); for ( int i = 0; i < num_nodes_B; i++ ) { for (int j = 0; j < num_nodes_A; j++) { for ( int k = 0; k < num_nodes_B; k++) { for ( int l = 0; l < num_nodes_A; l++) { MAB[i][j] += (simplex_S_measure/simplex_ref_measure) * R_BS[i][k] * MSS[k][l] * R_AS[j][l]; } } } } } return num_elements; } """ % { "evaluate_S": str(evaluate_kernel_S), "evaluate_A": str(evaluate_kernel_A), "evaluate_B": str(evaluate_kernel_B), "to_reference": str(to_reference_kernel), "num_nodes_A": num_nodes_A, "num_nodes_B": num_nodes_B, "libsupermesh_simplex_measure": "libsupermesh_triangle_area" if dim == 2 else "libsupermesh_tetrahedron_volume", "libsupermesh_intersect_simplices": "libsupermesh_intersect_tris_real" if dim == 2 else "libsupermesh_intersect_tets_real", "dim": dim, "complex_mode": 1 if complex_mode else 0 } dirs = get_petsc_dir() + (sys.prefix, ) includes = ["-I%s/include" % d for d in dirs] libs = ["-L%s/lib" % d for d in dirs] libs = libs + ["-Wl,-rpath,%s/lib" % d for d in dirs] + ["-lpetsc", "-lsupermesh"] lib = load(supermesh_kernel_str, "c", "supermesh_kernel", cppargs=includes, ldargs=libs, argtypes=[ ctypes.c_voidp, ctypes.c_voidp, ctypes.c_voidp, ctypes.c_voidp, ctypes.c_voidp, ctypes.c_voidp, ctypes.c_voidp ], restype=ctypes.c_int) ammm(V_A, V_B, likely, node_locations_A, node_locations_B, M_SS, ctypes.addressof(lib), mat) if orig_value_size == 1: return mat else: (lrows, grows), (lcols, gcols) = mat.getSizes() lrows *= orig_value_size grows *= orig_value_size lcols *= orig_value_size gcols *= orig_value_size size = ((lrows, grows), (lcols, gcols)) context = BlockMatrix(mat, orig_value_size) blockmat = PETSc.Mat().createPython(size, context=context, comm=mat.comm) blockmat.setUp() return blockmat
def print_at_exit(cls): """Print citations when exiting.""" # We devolve to PETSc for actually printing citations. PETSc.Options()["citations"] = None
def __init__(self, A, P=None, solver_parameters=None, nullspace=None, transpose_nullspace=None, near_nullspace=None, options_prefix=None): """A linear solver for assembled systems (Ax = b). :arg A: a :class:`~.MatrixBase` (the operator). :arg P: an optional :class:`~.MatrixBase` to construct any preconditioner from; if none is supplied ``A`` is used to construct the preconditioner. :kwarg parameters: (optional) dict of solver parameters. :kwarg nullspace: an optional :class:`~.VectorSpaceBasis` (or :class:`~.MixedVectorSpaceBasis` spanning the null space of the operator. :kwarg transpose_nullspace: as for the nullspace, but used to make the right hand side consistent. :kwarg near_nullspace: as for the nullspace, but used to set the near nullpace. :kwarg options_prefix: an optional prefix used to distinguish PETSc options. If not provided a unique prefix will be created. Use this option if you want to pass options to the solver from the command line in addition to through the ``solver_parameters`` dict. .. note:: Any boundary conditions for this solve *must* have been applied when assembling the operator. """ if not isinstance(A, matrix.MatrixBase): raise TypeError("Provided operator is a '%s', not a MatrixBase" % type(A).__name__) if P is not None and not isinstance(P, matrix.MatrixBase): raise TypeError( "Provided preconditioner is a '%s', not a MatrixBase" % type(P).__name__) self.A = A self.comm = A.comm self.P = P if P is not None else A # Set up parameters mixin super(LinearSolver, self).__init__(solver_parameters, options_prefix) self.A.petscmat.setOptionsPrefix(self.options_prefix) self.P.petscmat.setOptionsPrefix(self.options_prefix) # Set some defaults self.set_default_parameter("ksp_rtol", "1e-7") # If preconditioning matrix is matrix-free, then default to no # preconditioning. if isinstance(self.P, matrix.ImplicitMatrix): self.set_default_parameter("pc_type", "none") elif self.P.block_shape != (1, 1): # Otherwise, mixed problems default to jacobi. self.set_default_parameter("pc_type", "jacobi") self.ksp = PETSc.KSP().create(comm=self.comm) W = self.test_space # DM provides fieldsplits (but not operators) self.ksp.setDM(W.dm) self.ksp.setDMActive(False) if nullspace is not None: nullspace._apply(self.A) if P is not None: nullspace._apply(self.P) if transpose_nullspace is not None: transpose_nullspace._apply(self.A, transpose=True) if P is not None: transpose_nullspace._apply(self.P, transpose=True) if near_nullspace is not None: near_nullspace._apply(self.A, near=True) if P is not None: near_nullspace._apply(self.P, near=True) self.nullspace = nullspace self.transpose_nullspace = transpose_nullspace self.near_nullspace = near_nullspace # Operator setting must come after null space has been # applied # Force evaluation here self.A.force_evaluation() self.P.force_evaluation() self.ksp.setOperators(A=self.A.petscmat, P=self.P.petscmat) # Set from options now (we're not allowed to change parameters # anyway). self.set_from_options(self.ksp)
def initialize(self, pc): from firedrake import TrialFunction, TestFunction, dx, \ assemble, inner, grad, split, Constant, parameters from firedrake.assemble import allocate_matrix, create_assembly_callable if pc.getType() != "python": raise ValueError("Expecting PC type python") prefix = pc.getOptionsPrefix() + "pcd_" # we assume P has things stuffed inside of it _, P = pc.getOperators() context = P.getPythonContext() test, trial = context.a.arguments() if test.function_space() != trial.function_space(): raise ValueError("Pressure space test and trial space differ") Q = test.function_space() p = TrialFunction(Q) q = TestFunction(Q) mass = p * q * dx # Regularisation to avoid having to think about nullspaces. stiffness = inner(grad(p), grad(q)) * dx + Constant(1e-6) * p * q * dx opts = PETSc.Options() # we're inverting Mp and Kp, so default them to assembled. # Fp only needs its action, so default it to mat-free. # These can of course be overridden. # only Fp is referred to in update, so that's the only # one we stash. default = parameters["default_matrix_type"] Mp_mat_type = opts.getString(prefix + "Mp_mat_type", default) Kp_mat_type = opts.getString(prefix + "Kp_mat_type", default) self.Fp_mat_type = opts.getString(prefix + "Fp_mat_type", "matfree") Mp = assemble(mass, form_compiler_parameters=context.fc_params, mat_type=Mp_mat_type, options_prefix=prefix + "Mp_") Kp = assemble(stiffness, form_compiler_parameters=context.fc_params, mat_type=Kp_mat_type, options_prefix=prefix + "Kp_") Mp.force_evaluation() Kp.force_evaluation() # FIXME: Should we transfer nullspaces over. I think not. Mksp = PETSc.KSP().create(comm=pc.comm) Mksp.incrementTabLevel(1, parent=pc) Mksp.setOptionsPrefix(prefix + "Mp_") Mksp.setOperators(Mp.petscmat) Mksp.setUp() Mksp.setFromOptions() self.Mksp = Mksp Kksp = PETSc.KSP().create(comm=pc.comm) Kksp.incrementTabLevel(1, parent=pc) Kksp.setOptionsPrefix(prefix + "Kp_") Kksp.setOperators(Kp.petscmat) Kksp.setUp() Kksp.setFromOptions() self.Kksp = Kksp state = context.appctx["state"] Re = context.appctx.get("Re", 1.0) velid = context.appctx["velocity_space"] u0 = split(state)[velid] fp = 1.0 / Re * inner(grad(p), grad(q)) * dx + inner(u0, grad(p)) * q * dx self.Re = Re self.Fp = allocate_matrix(fp, form_compiler_parameters=context.fc_params, mat_type=self.Fp_mat_type, options_prefix=prefix + "Fp_") self._assemble_Fp = create_assembly_callable( fp, tensor=self.Fp, form_compiler_parameters=context.fc_params, mat_type=self.Fp_mat_type) self._assemble_Fp() self.Fp.force_evaluation() Fpmat = self.Fp.petscmat self.workspace = [Fpmat.createVecLeft() for i in (0, 1)]
def __init__(self, problem, **kwargs): """ Solve a :class:`NonlinearVariationalProblem` on a hierarchy of meshes. :arg problem: A :class:`NonlinearVariationalProblem` or iterable thereof (if specifying the problem on each level by hand). :kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or :class:`MixedVectorSpaceBasis`) spanning the null space of the operator. :kwarg solver_parameters: Solver parameters to pass to PETSc. This should be a dict mapping PETSc options to values. PETSc flag options should be specified with `bool` values (:data:`True` for on, :data:`False` for off). :kwarg options_prefix: an optional prefix used to distinguish PETSc options. If not provided a unique prefix will be created. Use this option if you want to pass options to the solver from the command line in addition to through the :data:`solver_parameters` dict. .. note:: This solver is set up for use with geometric multigrid, that is you can use :data:`"snes_type": "fas"` or :data:`"pc_type": "mg"` transparently. """ # Do this first so __del__ doesn't barf horribly if we get an # error in __init__ parameters, nullspace, options_prefix \ = firedrake.solving_utils._extract_kwargs(**kwargs) if options_prefix is not None: self._opt_prefix = options_prefix self._auto_prefix = False else: self._opt_prefix = "firedrake_nlvsh_%d_" % NLVSHierarchy._id self._auto_prefix = True NLVSHierarchy._id += 1 if isinstance(problem, firedrake.NonlinearVariationalProblem): # We just got a single problem so coarsen up the hierarchy problems = [] while True: if problem: problems.append(problem) else: break problem = coarsen_problem(problem) problems.reverse() else: # User has provided list of problems problems = problem ctx = firedrake.solving_utils._SNESContext(problems) if nullspace is not None: raise NotImplementedError( "Coarsening nullspaces no yet implemented") snes = PETSc.SNES().create() snes.setDM(problems[-1].dm) self.problems = problems self.snes = snes self.ctx = ctx self.ctx.set_function(self.snes) self.ctx.set_jacobian(self.snes) self.snes.setOptionsPrefix(self._opt_prefix) # Allow command-line arguments to override dict parameters opts = PETSc.Options() for k, v in opts.getAll().iteritems(): if k.startswith(self._opt_prefix): parameters[k[len(self._opt_prefix):]] = v self.parameters = parameters
def initialize(self, pc): """Set up the problem context. Take the original mixed problem and reformulate the problem as a hybridized mixed system. A KSP is created for the Lagrange multiplier system. """ from firedrake import (FunctionSpace, Function, Constant, TrialFunction, TrialFunctions, TestFunction, DirichletBC) from firedrake.assemble import (allocate_matrix, create_assembly_callable) from firedrake.formmanipulation import split_form from ufl.algorithms.replace import replace # Extract the problem context prefix = pc.getOptionsPrefix() + "hybridization_" _, P = pc.getOperators() self.ctx = P.getPythonContext() if not isinstance(self.ctx, ImplicitMatrixContext): raise ValueError( "The python context must be an ImplicitMatrixContext") test, trial = self.ctx.a.arguments() V = test.function_space() mesh = V.mesh() if len(V) != 2: raise ValueError("Expecting two function spaces.") if all(Vi.ufl_element().value_shape() for Vi in V): raise ValueError("Expecting an H(div) x L2 pair of spaces.") # Automagically determine which spaces are vector and scalar for i, Vi in enumerate(V): if Vi.ufl_element().sobolev_space().name == "HDiv": self.vidx = i else: assert Vi.ufl_element().sobolev_space().name == "L2" self.pidx = i # Create the space of approximate traces. W = V[self.vidx] if W.ufl_element().family() == "Brezzi-Douglas-Marini": tdegree = W.ufl_element().degree() else: try: # If we have a tensor product element h_deg, v_deg = W.ufl_element().degree() tdegree = (h_deg - 1, v_deg - 1) except TypeError: tdegree = W.ufl_element().degree() - 1 TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree) # Break the function spaces and define fully discontinuous spaces broken_elements = ufl.MixedElement( [ufl.BrokenElement(Vi.ufl_element()) for Vi in V]) V_d = FunctionSpace(mesh, broken_elements) # Set up the functions for the original, hybridized # and schur complement systems self.broken_solution = Function(V_d) self.broken_residual = Function(V_d) self.trace_solution = Function(TraceSpace) self.unbroken_solution = Function(V) self.unbroken_residual = Function(V) shapes = (V[self.vidx].finat_element.space_dimension(), np.prod(V[self.vidx].shape)) domain = "{[i,j]: 0 <= i < %d and 0 <= j < %d}" % shapes instructions = """ for i, j w[i,j] = w[i,j] + 1 end """ self.weight = Function(V[self.vidx]) par_loop((domain, instructions), ufl.dx, {"w": (self.weight, INC)}, is_loopy_kernel=True) instructions = """ for i, j vec_out[i,j] = vec_out[i,j] + vec_in[i,j]/w[i,j] end """ self.average_kernel = (domain, instructions) # Create the symbolic Schur-reduction: # Original mixed operator replaced with "broken" # arguments arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)} Atilde = Tensor(replace(self.ctx.a, arg_map)) gammar = TestFunction(TraceSpace) n = ufl.FacetNormal(mesh) sigma = TrialFunctions(V_d)[self.vidx] if mesh.cell_set._extruded: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_h + gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_v) else: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS) # Here we deal with boundaries. If there are Neumann # conditions (which should be enforced strongly for # H(div)xL^2) then we need to add jump terms on the exterior # facets. If there are Dirichlet conditions (which should be # enforced weakly) then we need to zero out the trace # variables there as they are not active (otherwise the hybrid # problem is not well-posed). # If boundary conditions are contained in the ImplicitMatrixContext: if self.ctx.row_bcs: # Find all the subdomains with neumann BCS # These are Dirichlet BCs on the vidx space neumann_subdomains = set() for bc in self.ctx.row_bcs: if bc.function_space().index == self.pidx: raise NotImplementedError( "Dirichlet conditions for scalar variable not supported. Use a weak bc" ) if bc.function_space().index != self.vidx: raise NotImplementedError( "Dirichlet bc set on unsupported space.") # append the set of sub domains subdom = bc.sub_domain if isinstance(subdom, str): neumann_subdomains |= set([subdom]) else: neumann_subdomains |= set( as_tuple(subdom, numbers.Integral)) # separate out the top and bottom bcs extruded_neumann_subdomains = neumann_subdomains & { "top", "bottom" } neumann_subdomains = neumann_subdomains - extruded_neumann_subdomains integrand = gammar * ufl.dot(sigma, n) measures = [] trace_subdomains = [] if mesh.cell_set._extruded: ds = ufl.ds_v for subdomain in sorted(extruded_neumann_subdomains): measures.append({ "top": ufl.ds_t, "bottom": ufl.ds_b }[subdomain]) trace_subdomains.extend( sorted({"top", "bottom"} - extruded_neumann_subdomains)) else: ds = ufl.ds if "on_boundary" in neumann_subdomains: measures.append(ds) else: measures.extend((ds(sd) for sd in sorted(neumann_subdomains))) markers = [int(x) for x in mesh.exterior_facets.unique_markers] dirichlet_subdomains = set(markers) - neumann_subdomains trace_subdomains.extend(sorted(dirichlet_subdomains)) for measure in measures: Kform += integrand * measure trace_bcs = [ DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains ] else: # No bcs were provided, we assume weak Dirichlet conditions. # We zero out the contribution of the trace variables on # the exterior boundary. Extruded cells will have both # horizontal and vertical facets trace_subdomains = ["on_boundary"] if mesh.cell_set._extruded: trace_subdomains.extend(["bottom", "top"]) trace_bcs = [ DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains ] # Make a SLATE tensor from Kform K = Tensor(Kform) # Assemble the Schur complement operator and right-hand side self.schur_rhs = Function(TraceSpace) self._assemble_Srhs = create_assembly_callable( K * Atilde.inv * AssembledVector(self.broken_residual), tensor=self.schur_rhs, form_compiler_parameters=self.ctx.fc_params) mat_type = PETSc.Options().getString(prefix + "mat_type", "aij") schur_comp = K * Atilde.inv * K.T self.S = allocate_matrix(schur_comp, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type, options_prefix=prefix) self._assemble_S = create_assembly_callable( schur_comp, tensor=self.S, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type) with timed_region("HybridOperatorAssembly"): self._assemble_S() Smat = self.S.petscmat nullspace = self.ctx.appctx.get("trace_nullspace", None) if nullspace is not None: nsp = nullspace(TraceSpace) Smat.setNullSpace(nsp.nullspace(comm=pc.comm)) # Set up the KSP for the system of Lagrange multipliers trace_ksp = PETSc.KSP().create(comm=pc.comm) trace_ksp.setOptionsPrefix(prefix) trace_ksp.setOperators(Smat) trace_ksp.setUp() trace_ksp.setFromOptions() self.trace_ksp = trace_ksp split_mixed_op = dict(split_form(Atilde.form)) split_trace_op = dict(split_form(K.form)) # Generate reconstruction calls self._reconstruction_calls(split_mixed_op, split_trace_op)
def __del__(self): if self._auto_prefix and hasattr(self, '_opt_prefix'): opts = PETSc.Options() for k in self.parameters.iterkeys(): del opts[self._opt_prefix + k] delattr(self, '_opt_prefix')
def lgmap(self, V, bcs, lgmap=None): assert len(V) == 1, "lgmap should not be called on MixedFunctionSpace" V = V.topological if bcs is None or len(bcs) == 0: return lgmap or V.dof_dset.lgmap # Boundary condition list *must* be collectively ordered already. # Key is a sorted list of bc subdomain, bc method, bc component. bc_key = [] for bc in bcs: fs = bc.function_space() while fs.component is not None and fs.parent is not None: fs = fs.parent if fs.topological != V: raise RuntimeError("DirichletBC defined on a different FunctionSpace!") bc_key.append(bc._cache_key) def key(a): tpl, *rest = a if len(tpl) == 1 and isinstance(tpl[0], str): # tpl = ("some_string", ) return (True, tpl[0], (), tuple(rest)) else: # Ex: # tpl = ((facet_dim, ((1,), (2,), (3,))), # (edge_dim, ((1, 3), (1, 4))), # (vert_dim, ((1, 3, 4), ))) return (False, "", tpl, tuple(rest)) bc_key = tuple(sorted(bc_key, key=key)) node_set = V.node_set key = (node_set, V.value_size, lgmap is None, bc_key) try: return self.map_cache[key] except KeyError: pass unblocked = any(bc.function_space().component is not None for bc in bcs) if lgmap is None: lgmap = V.dof_dset.lgmap if unblocked: indices = lgmap.indices.copy() bsize = 1 else: indices = lgmap.block_indices.copy() bsize = lgmap.getBlockSize() assert bsize == V.value_size else: # MatBlock case, LGMap is already unrolled. indices = lgmap.block_indices.copy() bsize = lgmap.getBlockSize() unblocked = True nodes = [] for bc in bcs: if bc.function_space().component is not None: nodes.append(bc.nodes * V.value_size + bc.function_space().component) elif unblocked: tmp = bc.nodes * V.value_size for i in range(V.value_size): nodes.append(tmp + i) else: nodes.append(bc.nodes) nodes = numpy.unique(numpy.concatenate(nodes)) indices[nodes] = -1 return self.map_cache.setdefault(key, PETSc.LGMap().create(indices, bsize=bsize, comm=lgmap.comm))
def create_interpolation(dmc, dmf): _, clvl = utils.get_level(dmc) _, flvl = utils.get_level(dmf) cctx = dmc.getAppCtx() fctx = dmf.getAppCtx() V_c = dmc.getAttr("__fs__")() V_f = dmf.getAttr("__fs__")() nrow = sum(x.dof_dset.size * x.dof_dset.cdim for x in V_f) ncol = sum(x.dof_dset.size * x.dof_dset.cdim for x in V_c) cfn = firedrake.Function(V_c) ffn = firedrake.Function(V_f) cbcs = cctx._problems[clvl].bcs fbcs = fctx._problems[flvl].bcs class Interpolation(object): def __init__(self, cfn, ffn, cbcs=None, fbcs=None): self.cfn = cfn self.ffn = ffn self.cbcs = cbcs or [] self.fbcs = fbcs or [] def mult(self, mat, x, y, inc=False): with self.cfn.dat.vec as v: x.copy(v) firedrake.prolong(self.cfn, self.ffn) for bc in self.fbcs: bc.zero(self.ffn) with self.ffn.dat.vec_ro as v: if inc: y.axpy(1.0, v) else: v.copy(y) def multAdd(self, mat, x, y, w): if y.handle == w.handle: self.mult(mat, x, w, inc=True) else: self.mult(mat, x, w) w.axpy(1.0, y) def multTranspose(self, mat, x, y, inc=False): with self.ffn.dat.vec as v: x.copy(v) firedrake.restrict(self.ffn, self.cfn) for bc in self.cbcs: bc.zero(self.cfn) with self.cfn.dat.vec_ro as v: if inc: y.axpy(1.0, v) else: v.copy(y) def multTransposeAdd(self, mat, x, y, w): if y.handle == w.handle: self.multTranspose(mat, x, w, inc=True) else: self.multTranspose(mat, x, w) w.axpy(1.0, y) ctx = Interpolation(cfn, ffn, cbcs, fbcs) mat = PETSc.Mat().create() mat.setSizes(((nrow, None), (ncol, None))) mat.setType(mat.Type.PYTHON) mat.setPythonContext(ctx) mat.setUp() return mat, None
def run_steady_turbine(**model_options): """ Consider a simple test case with two turbines positioned in a channel. The mesh has been adapted with respect to fluid speed and so has strong anisotropy in the direction of flow. If the default SIPG parameter is used, this steady state problem fails to converge. However, using the automatic SIPG parameter functionality, it should converge. """ # Load an anisotropic mesh from file plex = PETSc.DMPlex().create() abspath = os.path.realpath(__file__) plex.createFromFile( abspath.replace('test_anisotropic.py', 'anisotropic_plex.h5')) mesh2d = Mesh(plex) x, y = SpatialCoordinate(mesh2d) # Create steady state solver object solver_obj = solver2d.FlowSolver2d(mesh2d, Constant(40.0)) options = solver_obj.options options.timestep = 20.0 options.simulation_export_time = 20.0 options.simulation_end_time = 18.0 options.timestepper_type = 'SteadyState' options.timestepper_options.solver_parameters = { 'mat_type': 'aij', 'snes_type': 'newtonls', 'snes_rtol': 1e-8, 'snes_monitor': None, 'pc_type': 'lu', 'pc_factor_mat_solver_type': 'mumps', } options.output_directory = 'outputs' options.fields_to_export = ['uv_2d', 'elev_2d'] options.use_grad_div_viscosity_term = False options.element_family = 'dg-dg' options.horizontal_viscosity = Constant(1.0) options.quadratic_drag_coefficient = Constant(0.0025) options.use_lax_friedrichs_velocity = True options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) options.use_grad_depth_viscosity_term = False options.update(model_options) solver_obj.create_equations() # Apply boundary conditions solver_obj.bnd_functions['shallow_water'] = { 1: { 'uv': Constant([3.0, 0.0]) }, 2: { 'elev': Constant(0.0) }, 3: { 'un': Constant(0.0) }, } def bump(fs, locs, scale=1.0): """Scaled bump function for turbines.""" i = 0 for j in range(len(locs)): x0 = locs[j][0] y0 = locs[j][1] r = locs[j][2] expr1 = (x - x0) * (x - x0) + (y - y0) * (y - y0) expr2 = scale * exp(1 - 1 / (1 - (x - x0) * (x - x0) / r**2)) * exp(1 - 1 / (1 - (y - y0) * (y - y0) / r**2)) i += conditional(lt(expr1, r * r), expr2, 0) return i # Set up turbine array L = 1000.0 # domain length W = 300.0 # domain width D = 18.0 # turbine diameter A = pi * (D / 2)**2 # turbine area locs = [(L / 2 - 8 * D, W / 2, D / 2), (L / 2 + 8 * D, W / 2, D / 2)] # turbine locations # NOTE: We include a correction to account for the fact that the thrust coefficient is based # on an upstream velocity, whereas we are using a depth averaged at-the-turbine velocity # (see Kramer and Piggott 2016, eq. (15)). correction = 4 / (1 + sqrt(1 - A / (40.0 * D)))**2 scaling = len(locs) / assemble( bump(solver_obj.function_spaces.P1DG_2d, locs) * dx) farm_options = TidalTurbineFarmOptions() farm_options.turbine_density = bump(solver_obj.function_spaces.P1DG_2d, locs, scale=scaling) farm_options.turbine_options.diameter = D farm_options.turbine_options.thrust_coefficient = 0.8 * correction solver_obj.options.tidal_turbine_farms['everywhere'] = farm_options # Apply initial guess of inflow velocity and solve solver_obj.assign_initial_conditions(uv=Constant([3.0, 0.0])) solver_obj.iterate()
def initialize(self, pc): from firedrake import TestFunction, parameters from firedrake.assemble import allocate_matrix, create_assembly_callable from firedrake.interpolation import Interpolator from firedrake.solving_utils import _SNESContext from firedrake.matrix_free.operators import ImplicitMatrixContext _, P = pc.getOperators() appctx = self.get_appctx(pc) fcp = appctx.get("form_compiler_parameters") if pc.getType() != "python": raise ValueError("Expecting PC type python") ctx = dmhooks.get_appctx(pc.getDM()) if ctx is None: raise ValueError("No context found.") if not isinstance(ctx, _SNESContext): raise ValueError("Don't know how to get form from %r", ctx) prefix = pc.getOptionsPrefix() options_prefix = prefix + self._prefix opts = PETSc.Options() # Handle the fine operator if type is python if P.getType() == "python": ictx = P.getPythonContext() if ictx is None: raise ValueError("No context found on matrix") if not isinstance(ictx, ImplicitMatrixContext): raise ValueError("Don't know how to get form from %r", ictx) fine_operator = ictx.a fine_bcs = ictx.row_bcs if fine_bcs != ictx.col_bcs: raise NotImplementedError("Row and column bcs must match") fine_mat_type = opts.getString(options_prefix + "mat_type", parameters["default_matrix_type"]) self.fine_op = allocate_matrix(fine_operator, bcs=fine_bcs, form_compiler_parameters=fcp, mat_type=fine_mat_type, options_prefix=options_prefix) self._assemble_fine_op = create_assembly_callable( fine_operator, tensor=self.fine_op, bcs=fine_bcs, form_compiler_parameters=fcp, mat_type=fine_mat_type) self._assemble_fine_op() fine_petscmat = self.fine_op.petscmat else: fine_petscmat = P # Transfer fine operator null space fine_petscmat.setNullSpace(P.getNullSpace()) fine_transpose_nullspace = P.getTransposeNullSpace() if fine_transpose_nullspace.handle != 0: fine_petscmat.setTransposeNullSpace(fine_transpose_nullspace) # Handle the coarse operator coarse_options_prefix = options_prefix + "mg_coarse" coarse_mat_type = opts.getString(coarse_options_prefix + "mat_type", parameters["default_matrix_type"]) get_coarse_space = appctx.get("get_coarse_space", None) if not get_coarse_space: raise ValueError( "Need to provide a callback which provides the coarse space.") coarse_space = get_coarse_space() get_coarse_operator = appctx.get("get_coarse_operator", None) if not get_coarse_operator: raise ValueError( "Need to provide a callback which provides the coarse operator." ) coarse_operator = get_coarse_operator() coarse_space_bcs = appctx.get("coarse_space_bcs", None) # These should be callbacks which return the relevant nullspaces get_coarse_nullspace = appctx.get("get_coarse_op_nullspace", None) get_coarse_transpose_nullspace = appctx.get( "get_coarse_op_transpose_nullspace", None) self.coarse_op = allocate_matrix(coarse_operator, bcs=coarse_space_bcs, form_compiler_parameters=fcp, mat_type=coarse_mat_type, options_prefix=coarse_options_prefix) self._assemble_coarse_op = create_assembly_callable( coarse_operator, tensor=self.coarse_op, bcs=coarse_space_bcs, form_compiler_parameters=fcp) self._assemble_coarse_op() coarse_opmat = self.coarse_op.petscmat # Set nullspace if provided if get_coarse_nullspace: nsp = get_coarse_nullspace() coarse_opmat.setNullSpace(nsp.nullspace(comm=pc.comm)) if get_coarse_transpose_nullspace: tnsp = get_coarse_transpose_nullspace() coarse_opmat.setTransposeNullSpace(tnsp.nullspace(comm=pc.comm)) interp_petscmat = appctx.get("interpolation_matrix", None) if interp_petscmat is None: # Create interpolation matrix from coarse space to fine space fine_space = ctx.J.arguments()[0].function_space() interpolator = Interpolator(TestFunction(coarse_space), fine_space) interpolation_matrix = interpolator.callable() interp_petscmat = interpolation_matrix.handle # We set up a PCMG object that uses the constructed interpolation # matrix to generate the restriction/prolongation operators. # This is a two-level multigrid preconditioner. pcmg = PETSc.PC().create(comm=pc.comm) pcmg.incrementTabLevel(1, parent=pc) pcmg.setType(pc.Type.MG) pcmg.setOptionsPrefix(options_prefix) pcmg.setMGLevels(2) pcmg.setMGCycleType(pc.MGCycleType.V) pcmg.setMGInterpolation(1, interp_petscmat) pcmg.setOperators(A=fine_petscmat, P=fine_petscmat) # Create new appctx self._ctx_ref = self.new_snes_ctx(pc, coarse_operator, coarse_space_bcs, coarse_mat_type, fcp) coarse_solver = pcmg.getMGCoarseSolve() coarse_solver.setOperators(A=coarse_opmat, P=coarse_opmat) # coarse space dm coarse_dm = coarse_space.dm coarse_solver.setDM(coarse_dm) coarse_solver.setDMActive(False) pcmg.setDM(coarse_dm) pcmg.setFromOptions() self.pc = pcmg self._dm = coarse_dm with dmhooks.add_hooks(coarse_dm, self, appctx=self._ctx_ref, save=False): coarse_solver.setFromOptions()
bytes = info["memory"] nz = info["nz_used"] return rows, cols, bytes, nz first = True workaround_flop_counting_bug = True if workaround_flop_counting_bug: # Prior to c91eb2e, PyOP2 overcounted flops by this factor scaling = 3.0 else: scaling = 1.0 sizeof_int = PETSc.IntType().dtype.itemsize sizeof_double = PETSc.ScalarType().dtype.itemsize PETSc.Sys.Print("Int Type has %d bytes, Scalar Type has %d bytes" % (sizeof_int, sizeof_double)) def aij_matvec_bytes(rows, cols, nz, rbs=1, cbs=1): # Gropp et al. 2000 if rbs == cbs and rbs != 1: pass else: rbs = 1 cbs = 1 return ((cols + rows)*sizeof_double # Vec read/write + (rows / rbs)*sizeof_int # Row pointer
def __init__(self, Q, fixed_bids=[], extra_bcs=[], direct_solve=False): if isinstance(extra_bcs, fd.DirichletBC): extra_bcs = [extra_bcs] self.direct_solve = direct_solve self.fixed_bids = fixed_bids # fixed parts of bdry self.params = self.get_params() # solver parameters self.Q = Q """ V: type fd.FunctionSpace I: type PETSc.Mat, interpolation matrix between V and ControlSpace """ (V, I_interp) = Q.get_space_for_inner() free_bids = list(V.mesh().topology.exterior_facets.unique_markers) self.free_bids = [int(i) for i in free_bids] # np.int->int for bid in self.fixed_bids: self.free_bids.remove(bid) # Some weak forms have a nullspace. We import the nullspace if no # parts of the bdry are fixed (we assume that a DirichletBC is # sufficient to empty the nullspace). nsp = None if len(self.fixed_bids) == 0: nsp_functions = self.get_nullspace(V) if nsp_functions is not None: nsp = fd.VectorSpaceBasis(nsp_functions) nsp.orthonormalize() bcs = [] # impose homogeneous Dirichlet bcs on bdry parts that are fixed. if len(self.fixed_bids) > 0: dim = V.value_size if dim == 2: zerovector = fd.Constant((0, 0)) elif dim == 3: zerovector = fd.Constant((0, 0, 0)) else: raise NotImplementedError bcs.append(fd.DirichletBC(V, zerovector, self.fixed_bids)) if len(extra_bcs) > 0: bcs += extra_bcs if len(bcs) == 0: bcs = None a = self.get_weak_form(V) A = fd.assemble(a, mat_type='aij', bcs=bcs) ls = fd.LinearSolver(A, solver_parameters=self.params, nullspace=nsp, transpose_nullspace=nsp) self.ls = ls self.A = A.petscmat self.interpolated = False # If the matrix I is passed, replace A with transpose(I)*A*I # and set up a ksp solver for self.riesz_map if I_interp is not None: self.interpolated = True ITAI = self.A.PtAP(I_interp) from firedrake.petsc import PETSc import numpy as np zero_rows = [] # if there are zero-rows, replace them with rows that # have 1 on the diagonal entry for row in range(*ITAI.getOwnershipRange()): (cols, vals) = ITAI.getRow(row) valnorm = np.linalg.norm(vals) if valnorm < 1e-13: zero_rows.append(row) for row in zero_rows: ITAI.setValue(row, row, 1.0) ITAI.assemble() # overwrite the self.A created by get_impl self.A = ITAI # create ksp solver for self.riesz_map Aksp = PETSc.KSP().create(comm=V.comm) Aksp.setOperators(ITAI) Aksp.setType("preonly") Aksp.pc.setType("cholesky") Aksp.pc.setFactorSolverType("mumps") Aksp.setFromOptions() Aksp.setUp() self.Aksp = Aksp
def __init__(self, dm, section): super(Halo, self).__init__() # Use a DM to create the halo SFs self.dm = PETSc.DMShell().create(dm.comm) self.dm.setPointSF(dm.getPointSF()) self.dm.setDefaultSection(section)
def __init__(self, problem, **kwargs): """ :arg problem: A :class:`NonlinearVariationalProblem` to solve. :kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or :class:`.MixedVectorSpaceBasis`) spanning the null space of the operator. :kwarg solver_parameters: Solver parameters to pass to PETSc. This should be a dict mapping PETSc options to values. For example, to set the nonlinear solver type to just use a linear solver: :kwarg options_prefix: an optional prefix used to distinguish PETSc options. If not provided a unique prefix will be created. Use this option if you want to pass options to the solver from the command line in addition to through the ``solver_parameters`` dict. .. code-block:: python {'snes_type': 'ksponly'} PETSc flag options should be specified with `bool` values. For example: .. code-block:: python {'snes_monitor': True} """ parameters, nullspace, options_prefix = solving_utils._extract_kwargs( **kwargs) # Do this first so __del__ doesn't barf horribly if we get an # error in __init__ if options_prefix is not None: self._opt_prefix = options_prefix self._auto_prefix = False else: self._opt_prefix = 'firedrake_snes_%d_' % NonlinearVariationalSolver._id self._auto_prefix = True NonlinearVariationalSolver._id += 1 assert isinstance(problem, NonlinearVariationalProblem) ctx = solving_utils._SNESContext(problem) self.snes = PETSc.SNES().create() self.snes.setOptionsPrefix(self._opt_prefix) # Mixed problem, use jacobi pc if user has not supplied one. if ctx.is_mixed: parameters.setdefault('pc_type', 'jacobi') # Allow command-line arguments to override dict parameters opts = PETSc.Options() for k, v in opts.getAll().iteritems(): if k.startswith(self._opt_prefix): parameters[k[len(self._opt_prefix):]] = v self._problem = problem self._ctx = ctx self.snes.setDM(problem.dm) ctx.set_function(self.snes) ctx.set_jacobian(self.snes) ctx.set_nullspace(nullspace, problem.J.arguments()[0].function_space()._ises) self.parameters = parameters