def __init__(self, problems): problems = as_tuple(problems) self._problems = problems # Build the jacobian with the correct sparsity pattern. Note # that since matrix assembly is lazy this doesn't actually # force an additional assembly of the matrix since in # form_jacobian we call assemble again which drops this # computation on the floor. from firedrake.assemble import assemble self._jacs = tuple(assemble(problem.J, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) if problems[-1].Jp is not None: self._pjacs = tuple(assemble(problem.Jp, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) else: self._pjacs = self._jacs # Function to hold current guess self._xs = tuple(function.Function(problem.u) for problem in problems) self.Fs = tuple(ufl.replace(problem.F, {problem.u: x}) for problem, x in zip(problems, self._xs)) self.Js = tuple(ufl.replace(problem.J, {problem.u: x}) for problem, x in zip(problems, self._xs)) if problems[-1].Jp is not None: self.Jps = tuple(ufl.replace(problem.Jp, {problem.u: x}) for problem, x in zip(problems, self._xs)) else: self.Jps = tuple(None for _ in problems) self._Fs = tuple(function.Function(F.arguments()[0].function_space()) for F in self.Fs) self._jacobians_assembled = [False for _ in problems]
def drop_references(self): replace_map = {dep: function_replacement(dep) for dep in self.dependencies()} super().drop_references() self._rhs = ufl.replace(self._rhs, replace_map)
def getNonlinearVariationalForms(self, X): # Generate copies of time dependent functions self.tdfButcher = [[] for i in range(len(self.tdf))] for j in range(0, len(self.tdf)): if self.tdf[j].__class__.__name__ == "CompiledExpression": self.tdfButcher[j].append( d.Expression(self.tdf[j].cppcode, t=self.tstart)) else: self.tdfButcher[j].append(self.tdf[j]) if self.n == 1: # Add differential equations L = [X[0] * self.Q * d.dx - self.u * self.Q * d.dx] replaceDict = {self.u: X[0]} for k in range(0, len(self.tdf)): replaceDict[self.tdf[k]] = self.tdfButcher[k][0] L[0] -= self.DT * replace(self.f[0], replaceDict) else: # Add differential equations L = [ reduce((lambda x, y: x + y), [ X[0][alpha] * self.Q[alpha] * d.dx - self.u[alpha] * self.Q[alpha] * d.dx for alpha in range(self.n) ]) ] for alpha in range(self.n): replaceDict = {self.u: X[0]} for k in range(len(self.tdf)): replaceDict[self.tdf[k]] = self.tdfButcher[k][j] L[0] -= self.DT * ufl.replace(self.f[alpha], replaceDict) return L
def get_dwr_indicator(F, adjoint_error, test_space=None): """ Generate a dual weighted residual (DWR) error indicator, given a form and an approximation of the error in the adjoint solution. :arg F: the form :arg adjoint_error: the approximation to the adjoint error, either as a single :class:`Function`, or in a dictionary :kwarg test_space: the :class:`FunctionSpace` that the test function lives in, or an appropriate dictionary """ mapping = {} if isinstance(adjoint_error, firedrake.Function): fs = test_space or adjoint_error.function_space() if F.ufl_domain() != fs.mesh(): raise ValueError( "Meshes underlying the form and adjoint error do not match.") mapping[firedrake.TestFunction(fs)] = adjoint_error else: if test_space is None: test_space = { key: firedrake.TestFunction(err.function_space()) for key, err in adjoint_error.items() } for key, err in adjoint_error.items(): if F.ufl_domain() != test_space[key].mesh(): raise ValueError( "Meshes underlying the form and adjoint error do not match." ) mapping[firedrake.TestFunction(test_space[key])] = err return form2indicator(ufl.replace(F, mapping))
def _solve(self, eps, u, reconstructor, P0, solver_parameters): """Find approximate solution with fixed eps and mesh. Use reconstructor to reconstruct the flux and estimate errors. """ p, q = self.p, self.q f, df = self.f, self.df boundary = self.boundary exact_solution = self.u_ex V = u.function_space() mesh = V.mesh() dx = Measure('dx', domain=mesh) eps = Constant(eps) # Problem formulation S = inner(grad(u), grad(u))**(p/2-1) * grad(u) + df S_eps = (eps + inner(grad(u), grad(u)))**(p/2-1) * grad(u) + df v = TestFunction(V) F_eps = ( inner(S_eps, grad(v)) - f*v ) * dx bc = DirichletBC(V, exact_solution if exact_solution else 0.0, boundary) # Solve solve(F_eps == 0, u, bc, solver_parameters=solver_parameters or {}) # Reconstruct flux q in H^q(div) s.t. # q ~ -S # div q ~ f Q = reconstructor.reconstruct(S, f).sub(0, deepcopy=False) # Compute error estimate using equilibrated stress reconstruction v = TestFunction(P0) h = CellDiameters(mesh) Cp = Constant(poincare_const(mesh.type(), p)) est0 = assemble(((Cp*h*(f-div(Q)))**2)**(0.5*q)*v*dx) est1 = assemble(inner(S_eps+Q, S_eps+Q)**(0.5*q)*v*dx) est2 = assemble(inner(S_eps-S, S_eps-S)**(0.5*q)*v*dx) q = float(q) est_h = est0.array()**(1.0/q) + est1.array()**(1.0/q) est_eps = est2.array()**(1.0/q) est_tot = est_h + est_eps Est_h = MPI.sum( mesh.mpi_comm(), (est_h **q).sum() )**(1.0/q) Est_eps = MPI.sum( mesh.mpi_comm(), (est_eps**q).sum() )**(1.0/q) Est_tot = MPI.sum( mesh.mpi_comm(), (est_tot**q).sum() )**(1.0/q) # Wrap arrays as cell functions est_h = self.vecarray_to_cellfunction(est_h, P0) est_eps = self.vecarray_to_cellfunction(est_eps, P0) est_tot = self.vecarray_to_cellfunction(est_tot, P0) # Upper estimate using exact solution if exact_solution: S_exact = ufl.replace(S, {u: exact_solution}) Est_up = sobolev_norm(S-S_exact, q, k=0) else: Est_up = None log(18, 'Error estimates: overall %g, discretization %g, ' 'regularization %g, estimate_up %s' % (Est_tot, Est_h, Est_eps, Est_up)) return u, est_h, est_eps, est_tot, Est_h, Est_eps, Est_tot, Est_up
def _rush_larsen_step(rhs_exprs, diff_rhs_exprs, linear_terms, system_size, y0, stage_solution, dt, time, a, c, v, DX, time_dep_expressions): # If we need to replace the original solution with stage solution repl = None if stage_solution is not None: if system_size > 1: repl = {y0: stage_solution} else: repl = {y0[0]: stage_solution} # If we have time dependent expressions if time_dep_expressions and abs(float(c)) > DOLFIN_EPS: time_ = time time = time + dt * float(c) repl.update(_replace_dict_time_dependent_expression(time_dep_expressions, time_, dt, float(c))) repl[time_] = time # If all terms are linear (using generalized=True) we add a safe # guard to the linearized term. See below safe_guard = sum(linear_terms) == system_size # Add componentwise contribution to rl form rl_ufl_form = ufl.zero() num_rl_steps = 0 for ind in range(system_size): # forward euler step fe_du_i = rhs_exprs[ind] * dt * float(a) # If exact integration if linear_terms[ind]: num_rl_steps += 1 # Rush Larsen step Safeguard the divisor: let's hope # diff_rhs_exprs[ind] is never 1.0e-16! Let's get rid of # this when the conditional fixes land properly in UFL. eps = Constant(1.0e-16) rl_du_i = rhs_exprs[ind] / (diff_rhs_exprs[ind] + eps) * ( ufl.exp(diff_rhs_exprs[ind] * dt) - 1.0) # If safe guard if safe_guard: du_i = ufl.conditional(ufl.lt(abs(diff_rhs_exprs[ind]), 1e-8), fe_du_i, rl_du_i) else: du_i = rl_du_i else: du_i = fe_du_i # If we should replace solution in form with stage solution if repl: du_i = ufl.replace(du_i, repl) rl_ufl_form += (y0[ind] + du_i) * v[ind] return rl_ufl_form * DX
def _rush_larsen_step(rhs_exprs, diff_rhs_exprs, linear_terms, system_size, \ y0, stage_solution, dt, time, a, c, v, DX, time_dep_expressions): # If we need to replace the original solution with stage solution repl = None if stage_solution is not None: if system_size > 1: repl = {y0: stage_solution} else: repl = {y0[0]: stage_solution} # If we have time dependent expressions if time_dep_expressions and abs(float(c)) > DOLFIN_EPS: time_ = time time = time + dt * float(c) repl.update(_replace_dict_time_dependent_expression(time_dep_expressions, \ time_, dt, float(c))) repl[time_] = time # If all terms are linear (using generalized=True) we add a safe # guard to the linearized term. See below safe_guard = sum(linear_terms) == system_size # Add componentwise contribution to rl form rl_ufl_form = ufl.zero() num_rl_steps = 0 for ind in range(system_size): # forward euler step fe_du_i = rhs_exprs[ind] * dt * float(a) # If exact integration if linear_terms[ind]: num_rl_steps += 1 # Rush Larsen step # Safeguard the divisor: let's hope diff_rhs_exprs[ind] is never 1.0e-16! # Let's get rid of this when the conditional fixes land properly in UFL. eps = Constant(1.0e-16) rl_du_i = rhs_exprs[ind]/(diff_rhs_exprs[ind] + eps)*(\ ufl.exp(diff_rhs_exprs[ind]*dt) - 1.0) # If safe guard if safe_guard: du_i = ufl.conditional(ufl.lt(abs(diff_rhs_exprs[ind]), 1e-8), fe_du_i, rl_du_i) else: du_i = rl_du_i else: du_i = fe_du_i # If we should replace solution in form with stage solution if repl: du_i = ufl.replace(du_i, repl) rl_ufl_form += (y0[ind] + du_i) * v[ind] return rl_ufl_form * DX
def adjoint_derivative_action(self, nl_deps, dep_index, adj_x): # Derived from EquationSolver.derivative_action (see dolfin-adjoint # reference below). Code first added 2017-12-07. # Re-written 2018-01-28 # Updated to adjoint only form 2018-01-29 eq_deps = self.dependencies() if dep_index < 0 or dep_index >= len(eq_deps): raise EquationException("dep_index out of bounds") elif dep_index == 0: return adj_x dep = eq_deps[dep_index] dF = derivative(self._rhs, dep) dF = ufl.algorithms.expand_derivatives(dF) dF = eliminate_zeros(dF) if dF.empty(): return None dF = ufl.replace(dF, dict(zip(self.nonlinear_dependencies(), nl_deps))) if self._rank == 0: dF = assemble( dF, form_compiler_parameters=self._form_compiler_parameters) return (-real_function_value(adj_x), dF) else: assert self._rank == 1 dF = assemble( ufl.action(adjoint(dF), adj_x), form_compiler_parameters=self._form_compiler_parameters) return (-1.0, dF)
def linear_equation_new_x(eq, x, manager=None, annotate=None, tlm=None): lhs, rhs = eq.lhs, eq.rhs lhs_x_dep = x in lhs.coefficients() rhs_x_dep = x in rhs.coefficients() if lhs_x_dep or rhs_x_dep: x_old = function_new(x) AssignmentSolver(x, x_old).solve(manager=manager, annotate=annotate, tlm=tlm) if lhs_x_dep: lhs = ufl.replace(lhs, {x: x_old}) if rhs_x_dep: rhs = ufl.replace(rhs, {x: x_old}) return lhs == rhs else: return eq
def replace_placeholders(self, i, new_coefficients): assert len(new_coefficients) == len(self._placeholders[i]) replacements = dict( (placeholder, new_coefficient) for (placeholder, new_coefficient ) in zip(self._placeholders[i], new_coefficients)) return replace(self._form_with_placeholders[i], replacements)
def split_arity(form, x, argument): if x not in form.coefficients(): # No dependence on x return ufl.classes.Form([]), form form_derivative = ufl.derivative(form, x, argument=argument) form_derivative = ufl.algorithms.expand_derivatives(form_derivative) if x in form_derivative.coefficients(): # Non-linear return ufl.classes.Form([]), form arity = len(form.arguments()) try: eq_form = ufl.algorithms.expand_derivatives( ufl.replace(form, {x: argument})) A = ufl.algorithms.formtransformations.compute_form_with_arity( eq_form, arity + 1) b = ufl.algorithms.formtransformations.compute_form_with_arity( eq_form, arity) except ufl.UFLException: # UFL error encountered return ufl.classes.Form([]), form if not is_cached(A): # Non-cached higher arity form return ufl.classes.Form([]), form # Success return A, b
def solve_adjoint(T): """ Computes the adjoint solution of the Poisson problem given solution T """ mesh = T.function_space().mesh() mvc = MeshValueCollection("size_t", mesh, 1) with XDMFFile("meshes/mf.xdmf") as infile: infile.read(mvc, "name_to_read") mf = cpp.mesh.MeshFunctionSizet(mesh, mvc) V = T.function_space() X = SpatialCoordinate(mesh) f_ = f(X) n = FacetNormal(mesh) v = Function(V) L = JT(T) + a_s(T,v) - l_s(f_,v) adjoint = derivative(L, T, TestFunction(V)) from ufl import replace adjoint = replace(adjoint, {v: TrialFunction(V)}) lmb = Function(V) a, L = lhs(adjoint), rhs(adjoint) A = assemble(a) b = assemble(L) bcs = [DirichletBC(V, Constant(0), mf, 1), DirichletBC(V, Constant(0), mf, 2)] [bc.apply(A,b) for bc in bcs] solve(A, lmb.vector(), b, 'lu') return lmb
def adjoint_derivative_action(self, nl_deps, dep_index, adj_x): eq_deps = self.dependencies() if dep_index < 0 or dep_index >= len(eq_deps): raise EquationException("dep_index out of bounds") elif dep_index == 0: return adj_x dep = eq_deps[dep_index] dF = derivative(self._rhs, dep, argument=adj_x) dF = ufl.algorithms.expand_derivatives(dF) dF = eliminate_zeros(dF) dF = ufl.replace(dF, dict(zip(self.nonlinear_dependencies(), nl_deps))) dF_val = evaluate_expr(dF) F = function_new(dep) if isinstance(dF_val, float): function_assign(F, dF_val) elif is_real_function(F): dF_val_local = np.array([dF_val.sum()], dtype=np.float64) dF_val = np.empty((1, ), dtype=np.float64) comm = function_comm(F) comm.Allreduce(dF_val_local, dF_val, op=MPI.SUM) dF_val = dF_val[0] function_assign(F, dF_val) else: assert function_local_size(F) == len(dF_val) function_set_values(F, dF_val) return (-1.0, F)
def split(self, fields): from ufl import as_vector, replace from firedrake import NonlinearVariationalProblem as NLVP, FunctionSpace splits = self._splits.get(tuple(fields)) if splits is not None: return splits splits = [] problem = self._problem splitter = ExtractSubBlock() for field in fields: try: if len(field) > 1: raise NotImplementedError("Can't split into subblock") except TypeError: # Just a single field, we can handle that pass F = splitter.split(problem.F, argument_indices=(field, )) J = splitter.split(problem.J, argument_indices=(field, field)) us = problem.u.split() subu = us[field] vec = [] for i, u in enumerate(us): for idx in numpy.ndindex(u.ufl_shape): vec.append(u[idx]) u = as_vector(vec) F = replace(F, {problem.u: u}) J = replace(J, {problem.u: u}) if problem.Jp is not None: Jp = splitter.split(problem.Jp, argument_indices=(field, field)) Jp = replace(Jp, {problem.u: u}) else: Jp = None bcs = [] for bc in problem.bcs: if bc.function_space().index == field: V = FunctionSpace(subu.ufl_domain(), subu.ufl_element()) bcs.append(type(bc)(V, bc.function_arg, bc.sub_domain, method=bc.method)) new_problem = NLVP(F, subu, bcs=bcs, J=J, Jp=None, form_compiler_parameters=problem.form_compiler_parameters) new_problem._constant_jacobian = problem._constant_jacobian splits.append(type(self)(new_problem, mat_type=self.mat_type, pmat_type=self.pmat_type, appctx=self.appctx)) return self._splits.setdefault(tuple(fields), splits)
def subtract_adjoint_derivative_actions(self, adj_x, nl_deps, dep_Bs): for dep_index, dep_B in dep_Bs.items(): if dep_index not in self._adjoint_dF_cache: dep = self.dependencies()[dep_index] dF = derivative(self._F, dep) dF = ufl.algorithms.expand_derivatives(dF) dF = eliminate_zeros(dF) if dF.empty(): dF = None else: dF = adjoint(dF) self._adjoint_dF_cache[dep_index] = dF dF = self._adjoint_dF_cache[dep_index] if dF is not None: if dep_index not in self._adjoint_action_cache: if self._cache_rhs_assembly \ and isinstance(adj_x, backend_Function) \ and is_cached(dF): # Cached matrix action self._adjoint_action_cache[dep_index] = CacheRef() elif self._defer_adjoint_assembly: # Cached form, deferred assembly self._adjoint_action_cache[dep_index] = None else: # Cached form, immediate assembly self._adjoint_action_cache[dep_index] = unbound_form( ufl.action(dF, coefficient=adj_x), list(self.nonlinear_dependencies()) + [adj_x]) cache = self._adjoint_action_cache[dep_index] if cache is None: # Cached form, deferred assembly replace_map = dict(zip(self.nonlinear_dependencies(), nl_deps)) dep_B.sub(ufl.action(ufl.replace(dF, replace_map), coefficient=adj_x)) elif isinstance(cache, CacheRef): # Cached matrix action mat_bc = cache() if mat_bc is None: replace_map = dict(zip(self.nonlinear_dependencies(), nl_deps)) self._adjoint_action_cache[dep_index], (mat, _) = \ assembly_cache().assemble( dF, form_compiler_parameters=self._form_compiler_parameters, # noqa: E501 replace_map=replace_map) else: mat, _ = mat_bc dep_B.sub(matrix_multiply(mat, function_vector(adj_x))) else: # Cached form, immediate assembly assert isinstance(cache, ufl.classes.Form) bind_form(cache, list(nl_deps) + [adj_x]) dep_B.sub(assemble( cache, form_compiler_parameters=self._form_compiler_parameters)) # noqa: E501 unbind_form(cache)
def value(): if replace_map is None: assemble_form = form else: assemble_form = ufl.replace(form, replace_map) return LocalSolver( assemble_form, form_compiler_parameters=form_compiler_parameters)
def prepare_recompute_component(self, inputs, relevant_outputs): if not self.lincom: return None replace_map = {} for dep in self.get_dependencies(): replace_map[dep.output] = dep.saved_output return ufl.replace(self.expr, replace_map)
def _replace_with_saved_output(self): if self.expr is None: return None replace_map = {} for dep in self.get_dependencies(): replace_map[dep.output] = dep.saved_output return ufl.replace(self.expr, replace_map)
def _right_hand_side_of_equation(me, E): if E in me.all_right_hand_sides: E_rhs = me.all_right_hand_sides[E] else: w = me._variable_for_equation(E) E_rhs = rhs(replace(E, {w:TrialFunction(w.function_space())})) me.all_right_hand_sides[E] = E_rhs return E_rhs
def value(): if replace_map is None: assemble_form = form else: assemble_form = ufl.replace(form, replace_map) local_solver = LocalSolver(assemble_form, solver_type=solver_type) local_solver.factorize() return local_solver
def repl(t): if len(t.form.arguments()) != 2: raise TypeError( 'Trying to replace trial function of a form that is not linear' ) trial = t.form.arguments()[1] new_form = ufl.replace(t.form, {trial: new}) return Term(new_form, t.labels)
def replace_transporting_velocity(self, uadv): # replace the transporting velocity in any terms that contain it if any([t.has_label(transporting_velocity) for t in self.residual]): assert uadv is not None if uadv == "prognostic": self.residual = self.residual.label_map( lambda t: t.has_label(transporting_velocity), map_if_true=lambda t: Term(ufl.replace( t.form, {t.get(transporting_velocity): split(t.get(subject))[0]}), t.labels) ) else: self.residual = self.residual.label_map( lambda t: t.has_label(transporting_velocity), map_if_true=lambda t: Term(ufl.replace( t.form, {t.get(transporting_velocity): uadv}), t.labels) ) self.residual = transporting_velocity.update_value(self.residual, uadv)
def vjp_solve_eval_impl( g: np.array, fenics_solution: fenics.Function, fenics_residual: ufl.Form, fenics_inputs: List[FenicsVariable], bcs: List[fenics.DirichletBC], ) -> Tuple[np.array]: """Computes the gradients of the output with respect to the inputs.""" # Convert tangent covector (adjoint) to a FEniCS variable adj_value = numpy_to_fenics(g, fenics_solution) adj_value = adj_value.vector() F = fenics_residual u = fenics_solution V = u.function_space() dFdu = fenics.derivative(F, u) adFdu = ufl.adjoint( dFdu, reordered_arguments=ufl.algorithms.extract_arguments(dFdu) ) u_adj = fenics.Function(V) adj_F = ufl.action(adFdu, u_adj) adj_F = ufl.replace(adj_F, {u_adj: fenics.TrialFunction(V)}) adj_F_assembled = fenics.assemble(adj_F) if len(bcs) != 0: for bc in bcs: bc.homogenize() hbcs = bcs for bc in hbcs: bc.apply(adj_F_assembled) bc.apply(adj_value) fenics.solve(adj_F_assembled, u_adj.vector(), adj_value) fenics_grads = [] for fenics_input in fenics_inputs: if isinstance(fenics_input, fenics.Function): V = fenics_input.function_space() dFdm = fenics.derivative(F, fenics_input, fenics.TrialFunction(V)) adFdm = fenics.adjoint(dFdm) result = fenics.assemble(-adFdm * u_adj) if isinstance(fenics_input, fenics.Constant): fenics_grad = fenics.Constant(result.sum()) else: # fenics.Function fenics_grad = fenics.Function(V, result) fenics_grads.append(fenics_grad) # Convert FEniCS gradients to jax array representation jax_grads = ( None if fg is None else np.asarray(fenics_to_numpy(fg)) for fg in fenics_grads ) jax_grad_tuple = tuple(jax_grads) return jax_grad_tuple
def coarsen_form(form, Nf, Nc, replace_d): # Coarsen a form, by replacing the solution, test and trial functions, and # reconstructing each integral with a coarsened quadrature degree. # If form is not a Form, then return form. return Form([ f.reconstruct( metadata=coarsen_quadrature(f.metadata(), Nf, Nc)) for f in replace(form, replace_d).integrals() ]) if isinstance(form, Form) else form
def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None): if not self.linear and self.func == block_variable.output: # We are not able to calculate derivatives wrt initial guess. return None F_form = prepared["form"] adj_sol = prepared["adj_sol"] adj_sol_bdy = prepared["adj_sol_bdy"] c = block_variable.output c_rep = block_variable.saved_output if isinstance(c, firedrake.Function): trial_function = firedrake.TrialFunction(c.function_space()) elif isinstance(c, firedrake.Constant): mesh = self.compat.extract_mesh_from_form(F_form) trial_function = firedrake.TrialFunction( c._ad_function_space(mesh)) elif isinstance(c, firedrake.DirichletBC): tmp_bc = self.compat.create_bc( c, value=self.compat.extract_subfunction(adj_sol_bdy, c.function_space())) return [tmp_bc] elif isinstance(c, self.compat.MeshType): # Using CoordianteDerivative requires us to do action before # differentiating, might change in the future. F_form_tmp = firedrake.action(F_form, adj_sol) X = firedrake.SpatialCoordinate(c_rep) dFdm = firedrake.derivative( -F_form_tmp, X, firedrake.TestFunction(c._ad_function_space())) dFdm = self.compat.assemble_adjoint_value(dFdm, **self.assemble_kwargs) return dFdm # dFdm_cache works with original variables, not block saved outputs. if c in self._dFdm_cache: dFdm = self._dFdm_cache[c] else: dFdm = -firedrake.derivative(self.lhs, c, trial_function) dFdm = firedrake.adjoint(dFdm) self._dFdm_cache[c] = dFdm # Replace the form coefficients with checkpointed values. replace_map = self._replace_map(dFdm) replace_map[self.func] = self.get_outputs()[0].saved_output dFdm = replace(dFdm, replace_map) dFdm = dFdm * adj_sol dFdm = self.compat.assemble_adjoint_value(dFdm, **self.assemble_kwargs) return dFdm
def UFLFunction(grid, name, order, expr, renumbering=None, virtualize=True, tempVars=True, predefined=None, **kwargs): scalar = False if type(expr) == list or type(expr) == tuple: expr = ufl.as_vector(expr) elif type(expr) == int or type(expr) == float: expr = ufl.as_vector( [expr] ) scalar = True try: if expr.ufl_shape == (): expr = ufl.as_vector([expr]) scalar = True except: return None _, coeff_ = ufl.algorithms.analysis.extract_arguments_and_coefficients(expr) coeff = {c : c.toVectorCoefficient()[0] for c in coeff_ if len(c.ufl_shape) == 0 and not c.is_cellwise_constant()} expr = replace(expr,coeff) if len(expr.ufl_shape) > 1: raise AttributeError("can only generate grid functions from vector values UFL expressions not from expressions with shape=",expr.ufl_shape) # set up the source class source = UFLFunctionSource(grid, expr, name,order, tempVars=tempVars,virtualize=virtualize, predefined=predefined) coefficients = source.coefficientList numCoefficients = len(coefficients) if renumbering is None: renumbering = dict() renumbering.update((c, i) for i, c in enumerate(sorted((c for c in coefficients if not c.is_cellwise_constant()), key=lambda c: c.count()))) renumbering.update((c, i) for i, c in enumerate(c for c in coefficients if c.is_cellwise_constant())) coefficientNames = ['coefficient' + str(i) if n is None else n for i, n in enumerate(getattr(c, 'name', None) for c in coefficients if not c.is_cellwise_constant())] # call code generator from dune.generator import builder module = builder.load(source.name(), source, "UFLLocalFunction") assert hasattr(module,"UFLLocalFunction"),\ "GridViews of coefficients need to be compatible with the grid view of the ufl local functions" class LocalFunction(module.UFLLocalFunction): def __init__(self, gridView, name, order, *args, **kwargs): self.base = module.UFLLocalFunction self._coefficientNames = {n: i for i, n in enumerate(source.coefficientNames)} if renumbering is not None: self._renumbering = renumbering self._setConstant = self.setConstant # module.UFLLocalFunction.__dict__['setConstant'] self.setConstant = lambda *args: setConstant(self,*args) self.constantShape = source._constantShapes self._constants = [c for c in source.constantList if isinstance(c,Constant)] self.scalar = scalar init(self, gridView, name, order, *args, **kwargs) return LocalFunction
def prepare_evaluate_adj(self, inputs, adj_inputs, relevant_dependencies): replaced_coeffs = {} for block_variable in self.get_dependencies(): coeff = block_variable.output c_rep = block_variable.saved_output if coeff in self.form.coefficients(): replaced_coeffs[coeff] = c_rep form = ufl.replace(self.form, replaced_coeffs) return form
def _replace_form(self, form, func=None): """Replace the form coefficients with checkpointed values func represents the initial guess if relevant. """ replace_map = self._replace_map(form) if func is not None and self.func in replace_map: self.backend.Function.assign(func, replace_map[self.func]) replace_map[self.func] = func return ufl.replace(form, replace_map)
def momentum(U, h): h = h + H_0 spaces = U.function_space() tests, trials = TestFunction(spaces), TrialFunction(spaces) test_u, test_v = split(tests) test_u_x, test_u_y = test_u.dx(0), test_u.dx(1) test_v_x, test_v_y = test_v.dx(0), test_v.dx(1) u, v = split(U) u_x, u_y = u.dx(0), u.dx(1) v_x, v_y = v.dx(0), v.dx(1) # G11 eqn (3) eps = Constant(5.0e-9, static=True) S_eq = LocalProjectionSolver( (u_x**2) + (v_y**2) + u_x * v_y + 0.25 * ((u_y + v_x)**2) + eps, S) nu_eq = ExprEvaluationSolver(0.5 * B * (S**((1.0 - n) / (2.0 * n))), nu) # GHS09 eqns (1)--(2) F = (-inner(tests, -beta_sq * U) * dx + inner(test_u_x, nu * h * (4.0 * u_x + 2.0 * v_y)) * dx + inner(test_u_y, nu * h * (u_y + v_x)) * dx + inner(test_v_y, nu * h * (4.0 * v_y + 2.0 * u_x)) * dx + inner(test_v_x, nu * h * (u_y + v_x)) * dx + inner(test_u, rho * g * h * grad_b_x) * dx + inner(tests, rho * g * h * grad(h)) * dx) F = ufl.replace(F, {U: trials}) U_eq = EquationSolver(lhs(F) == rhs(F), U, solver_parameters={ "ksp_type": "cg", "pc_type": "hypre", "pc_hypre_type": "boomeramg", "ksp_rtol": 1.0e-12, "ksp_atol": 1.0e-16, "mat_type": "aij" }, adjoint_solver_parameters={ "ksp_type": "preonly", "pc_type": "cholesky", "mat_type": "aij" }, tlm_solver_parameters={ "ksp_type": "preonly", "pc_type": "cholesky", "mat_type": "aij" }, cache_adjoint_jacobian=True, cache_tlm_jacobian=True) return FixedPointSolver([S_eq, nu_eq, U_eq], solver_parameters={ "absolute_tolerance": 1.0e-16, "relative_tolerance": 1.0e-11 })
def eval_dJ(self, angle): # in degrees """ Computes gradient at given angle """ # Update state and mesh self.eval_J(angle) # Solve adjoint eq f = Function(self.V) f.interpolate(self.f) v = Function(self.V) L = self.J_ufl(self.T) + self.a_s(self.T, v) - self.l_s(f, v) adj = derivative(L, self.T, TestFunction(self.V)) from ufl import replace adj = replace(adj, {v: TrialFunction(self.V)}) a, L = lhs(adj), rhs(adj) A = assemble(a) b = assemble(L) bcs = [ DirichletBC(self.V, Constant(0), self.mf, self.outer_marker), DirichletBC(self.V, Constant(0), self.mf, self.inner_marker) ] [bc.apply(A, b) for bc in bcs] solve(A, self.lmb.vector(), b, 'lu') # Compute boundary_adjoint lmb_b = assemble(rhs(adj) + action(adjoint(lhs(adj)), self.lmb)) self.lmb_b = Function(self.V) tmp = Function(self.V) tmp.vector()[:] = lmb_b.get_local() bc_adj = DirichletBC(self.V, tmp, "on_boundary") bc_adj.apply(self.lmb_b.vector()) File("output/b_adjoint.pvd") << self.lmb_b # The gradient, computed with the material derivative s = TestFunction(self.S) d = div(s)*(0.5*self.T*self.T+dot(grad(self.T),grad(self.lmb)) -f*self.lmb)*dx - inner(dot(grad(self.T),grad(s)), grad(self.lmb))*dx\ - inner(grad(self.T), dot(grad(self.lmb),grad(s)))*dx # Hadamard version assuming strong form of gradient is fulfilled d_Hadamard = inner( s, n) * (0.5 * self.T * self.T - inner(n, grad(self.lmb)) * inner(n, grad(self.T))) * ds dJs = assemble(d) dJs_Hadamard = assemble(d_Hadamard) s = Function(self.S) s.vector()[:] = dJs.get_local() XDMFFile("output/singlemesh_gradient_material.xdmf").write(s) s.vector()[:] = dJs_Hadamard.get_local() XDMFFile("output/singlemesh_gradient_hadamard.xdmf").write(s)
def getNonlinearVariationalForms(self, B, X): s = B.shape[0] # Generate copies of time dependent functions self.tdfButcher = [[] for i in range(len(self.tdf))] for j in range(0, len(self.tdf)): for i in range(0, s): if self.tdf[j].__class__.__name__ == "CompiledExpression": self.tdfButcher[j].append(Expression(self.tdf[j].cppcode, t=self.tstart)) else: self.tdfButcher[j].append(self.tdf[j]) if self.n == 1: # Add differential equations L = [X[j + 1] * self.Q * dx - self.u * self.Q * dx for j in range(s - 1)] for i in range(s - 1): for j in range(s): replaceDict = {self.u: X[j]} for k in range(0, len(self.tdf)): replaceDict[self.tdf[k]] = self.tdfButcher[k][j] L[i] -= self.DT * B[i + 1, j] * replace(self.f[0], replaceDict) else: # Add differential equations L = [reduce((lambda x, y: x + y), [X[j + 1][alpha] * self.Q[alpha] * dx - self.u[alpha] * self.Q[alpha] * dx for alpha in range(self.n - self.m)]) for j in range(s - 1)] for alpha in range(self.n - self.m): for i in range(s - 1): for j in range(s): replaceDict = {self.u: X[j]} for k in range(len(self.tdf)): replaceDict[self.tdf[k]] = self.tdfButcher[k][j] L[i] -= self.DT * B[i + 1, j] * replace(self.f[alpha], replaceDict) # Add algebraic equations for beta in range(self.m): for i in range(s - 1): replaceDict = {self.u: X[i + 1]} for k in range(len(self.tdf)): replaceDict[self.tdf[k]] = self.tdfButcher[k][i + 1] L[i] += replace(self.g[beta], {self.u: X[i + 1]}) return L
def forward_solve(self, x, deps=None): if deps is None: rhs = self._rhs else: rhs = ufl.replace(self._rhs, dict(zip(self.dependencies(), deps))) rhs_val = evaluate_expr(rhs) if isinstance(rhs_val, float): function_assign(x, rhs_val) else: assert function_local_size(x) == len(rhs_val) function_set_values(x, rhs_val)
def __init__(self, problems): problems = as_tuple(problems) self._problems = problems # Build the jacobian with the correct sparsity pattern. Note # that since matrix assembly is lazy this doesn't actually # force an additional assembly of the matrix since in # form_jacobian we call assemble again which drops this # computation on the floor. from firedrake.assemble import assemble self._jacs = tuple( assemble(problem.J, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) if problems[-1].Jp is not None: self._pjacs = tuple( assemble( problem.Jp, bcs=problem.bcs, form_compiler_parameters=problem.form_compiler_parameters, nest=problem._nest) for problem in problems) else: self._pjacs = self._jacs # Function to hold current guess self._xs = tuple(function.Function(problem.u) for problem in problems) self.Fs = tuple( ufl.replace(problem.F, {problem.u: x}) for problem, x in zip(problems, self._xs)) self.Js = tuple( ufl.replace(problem.J, {problem.u: x}) for problem, x in zip(problems, self._xs)) if problems[-1].Jp is not None: self.Jps = tuple( ufl.replace(problem.Jp, {problem.u: x}) for problem, x in zip(problems, self._xs)) else: self.Jps = tuple(None for _ in problems) self._Fs = tuple( function.Function(F.arguments()[0].function_space()) for F in self.Fs) self._jacobians_assembled = [False for _ in problems]
def _form_action(self, u): """Assemble the form action of this :class:`Matrix`' bilinear form onto the :class:`Function` ``u``. .. note:: This is the form **without** any boundary conditions.""" if not hasattr(self, '_a_action'): self._a_action = ufl.action(self._a, u) if hasattr(self, '_a_action_coeff'): self._a_action = ufl.replace(self._a_action, {self._a_action_coeff: u}) self._a_action_coeff = u # Since we assemble the cached form, the kernels will already have # been compiled and stashed on the form the second time round return assemble._assemble(self._a_action)
def _real_mangle(form): """If the form contains arguments in the Real function space, replace these with literal 1 before passing to tsfc.""" a = form.arguments() reals = [x.ufl_element().family() == "Real" for x in a] if not any(reals): return form replacements = {} for arg, r in zip(a, reals): if r: replacements[arg] = 1 # If only the test space is Real, we need to turn the trial function into a test function. if reals == [True, False]: replacements[a[1]] = TestFunction(a[1].function_space()) return ufl.replace(form, replacements)
def to_tlm(self, perturbation): r"""Return another RushLarsenScheme that implements the tangent linearisation of the ODE solver. This takes \dot{y_n} (the derivative of y_n with respect to a parameter) and computes \dot{y_n+1} (the derivative of y_n+1 with respect to that parameter). """ generator = functools.partial(_rush_larsen_scheme_generator_tlm, perturbation=perturbation) new_solution = self._solution.copy() new_form = ufl.replace(self._rhs_form, {self._solution: new_solution}) return RushLarsenScheme(new_form, new_solution, self._t, self._order, self._generalized, generator=generator)
def to_adm(self, adj): r""" Return another MultiStageScheme that implements the adjoint linearisation of the ODE solver. This takes \bar{y_n+1} (the derivative of a functional J with respect to y_n+1) and computes \bar{y_n} (the derivative of J with respect to y_n). """ generator = functools.partial(_butcher_scheme_generator_adm, adj=adj) new_solution = self._solution.copy() new_form = ufl.replace(self._rhs_form, {self._solution: new_solution}) return ButcherMultiStageScheme(new_form, new_solution, self._t, self._bcs, self.a, self.b, self.c, self._order, generator=generator)
def to_tlm(self, perturbation): r""" Return another MultiStageScheme that implements the tangent linearisation of the ODE solver. This takes \dot{y_n} (the derivative of y_n with respect to a parameter) and computes \dot{y_n+1} (the derivative of y_n+1 with respect to that parameter). """ generator = functools.partial(_butcher_scheme_generator_tlm, \ perturbation=perturbation) new_solution = self._solution.copy() new_form = ufl.replace(self._rhs_form, {self._solution: new_solution}) return MultiStageScheme(new_form, new_solution, self._t, self._bcs, self.a, self.b, self.c, self._order, generator=generator)
def one_times(measure): # Workaround for UFL issue #80: # https://bitbucket.org/fenics-project/ufl/issues/80 form = 1 * measure fd = compute_form_data(form, do_estimate_degrees=False) itg_data, = fd.integral_data integral, = itg_data.integrals integrand = integral.integrand() # UFL considers QuadratureWeight a geometric quantity, and the # general handler for geometric quantities estimates the degree of # the coordinate element. This would unnecessarily increase the # estimated degree, so we drop QuadratureWeight instead. expression = replace(integrand, {QuadratureWeight(itg_data.domain): 1}) # Now estimate degree for the preprocessed form degree = estimate_total_polynomial_degree(expression) return integrand, degree
def cell_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong cell residual """ # Define trial and test functions for the cell residuals on # discontinuous version of primal trial space R_T = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for cell # residual v_T = self._b_T * v a_R_T = inner(v_T, R_T) * dx(self.domain) L_R_T = replace(self.weak_residual, {v_h: v_T}) return (a_R_T, L_R_T)
def _find_linear_terms(rhs_exprs, u): """Help function that takes a list of rhs expressions and return a list of bools determining what component, rhs_exprs[i], is linear wrt u[i]. """ uu = [Constant(1.0) for _ in rhs_exprs] if len(rhs_exprs) > 1: repl = {u: ufl.as_vector(uu)} else: repl = {u: uu[0]} linear_terms = [] for i, ui in enumerate(uu): comp_i_s = expand_indices(ufl.replace(rhs_exprs[i], repl)) linear_terms.append(ui in extract_coefficients(comp_i_s) and ui not in extract_coefficients( expand_derivatives(ufl.diff(comp_i_s, ui)))) return linear_terms
def facet_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong facet residual(s) """ # Define trial and test functions for the facet residuals on # discontinuous version of primal trial space R_e = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for facet # residual v_e = self._b_e*v a_R_dT = ((inner(v_e('+'), R_e('+')) + inner(v_e('-'), R_e('-')))*dS(self.domain) + inner(v_e, R_e)*ds(self.domain)) L_R_dT = (replace(self.weak_residual, {v_h: v_e}) - inner(v_e, self._R_T)*dx(self.domain)) return (a_R_dT, L_R_dT)
def _butcher_scheme_generator_adm(a, b, c, time, solution, rhs_form, adj): """ Generates a list of forms and solutions for a given Butcher tableau *Arguments* a (2 dimensional numpy array) The a matrix of the Butcher tableau. b (1-2 dimensional numpy array) The b vector of the Butcher tableau. If b is 2 dimensional the scheme includes an error estimator and can be used in adaptive solvers. c (1 dimensional numpy array) The c vector the Butcher tableau. time (_Constant_) A Constant holding the time at the start of the time step solution (_Function_) The prognostic variable rhs_form (ufl.Form) A UFL form representing the rhs for a time differentiated equation adj (_Function_) The derivative of the functional with respect to y_n+1 """ a = _check_abc(a, b, c) size = a.shape[0] DX = _check_form(rhs_form) # Get test function arguments = rhs_form.arguments() coefficients = rhs_form.coefficients() v = arguments[0] # Create time step dt = Constant(0.1) # rhs forms dolfin_stage_forms = [] ufl_stage_forms = [] # Stage solutions k = [Function(solution.function_space(), name="k_%d"%i) for i in range(size)] kbar = [Function(solution.function_space(), name="kbar_%d"%i) \ for i in range(size)] # Create the stage forms y_ = solution time_ = time time_dep_expressions = _time_dependent_expressions(rhs_form, time) zero_ = ufl.zero(*y_.ufl_shape) forward_forms = [] stage_solutions = [] jacobian_indices = [] # The recomputation of the forward run: for i, ki in enumerate(k): # Check whether the stage is explicit explicit = a[i,i] == 0 # Evaluation arguments for the ith stage evalargs = y_ + dt * sum([float(a[i,j]) * k[j] \ for j in range(i+1)], zero_) time = time_ + dt*c[i] replace_dict = _replace_dict_time_dependent_expression(\ time_dep_expressions, time_, dt, c[i]) replace_dict[y_] = evalargs replace_dict[time_] = time stage_form = ufl.replace(rhs_form, replace_dict) forward_forms.append(stage_form) if explicit: stage_forms = [stage_form] jacobian_indices.append(-1) else: # Create a F=0 form and differentiate it stage_form_implicit = stage_form - ufl.inner(ki, v)*DX stage_forms = [stage_form_implicit, derivative(\ stage_form_implicit, ki)] jacobian_indices.append(0) ufl_stage_forms.append(stage_forms) dolfin_stage_forms.append([Form(form) for form in stage_forms]) stage_solutions.append(ki) for i, kbari in reversed(list(enumerate(kbar))): # Check whether the stage is explicit explicit = a[i,i] == 0 # And now the adjoint linearisation: stage_form_adm = ufl.inner(dt * b[i] * adj, v)*DX + sum(\ [dt * float(a[j,i]) * safe_action(safe_adjoint(derivative(\ forward_forms[j], y_)), kbar[j]) for j in range(i, size)]) if explicit: stage_forms_adm = [stage_form_adm] jacobian_indices.append(-1) else: # Create a F=0 form and differentiate it stage_form_adm -= ufl.inner(kbar[i], v)*DX stage_forms_adm = [stage_form_adm, derivative(stage_form_adm, kbari)] jacobian_indices.append(1) ufl_stage_forms.append(stage_forms_adm) dolfin_stage_forms.append([Form(form) for form in stage_forms_adm]) stage_solutions.append(kbari) # Only one last stage if len(b.shape) == 1: last_stage = Form(ufl.inner(adj, v)*DX + sum(\ [safe_action(safe_adjoint(derivative(forward_forms[i], y_)), kbar[i]) \ for i in range(size)])) else: raise Exception("Not sure what to do here") human_form = "unimplemented" return ufl_stage_forms, dolfin_stage_forms, jacobian_indices, last_stage,\ stage_solutions, dt, human_form, adj
def _butcher_scheme_generator_tlm(a, b, c, time, solution, rhs_form, perturbation): """ Generates a list of forms and solutions for a given Butcher tableau *Arguments* a (2 dimensional numpy array) The a matrix of the Butcher tableau. b (1-2 dimensional numpy array) The b vector of the Butcher tableau. If b is 2 dimensional the scheme includes an error estimator and can be used in adaptive solvers. c (1 dimensional numpy array) The c vector the Butcher tableau. time (_Constant_) A Constant holding the time at the start of the time step solution (_Function_) The prognostic variable rhs_form (ufl.Form) A UFL form representing the rhs for a time differentiated equation perturbation (_Function_) The perturbation in the initial condition of the solution """ a = _check_abc(a, b, c) size = a.shape[0] DX = _check_form(rhs_form) # Get test function arguments = rhs_form.arguments() coefficients = rhs_form.coefficients() v = arguments[0] # Create time step dt = Constant(0.1) # rhs forms dolfin_stage_forms = [] ufl_stage_forms = [] # Stage solutions k = [Function(solution.function_space(), name="k_%d"%i) for i in range(size)] kdot = [Function(solution.function_space(), name="kdot_%d"%i) \ for i in range(size)] # Create the stage forms y_ = solution time_ = time time_dep_expressions = _time_dependent_expressions(rhs_form, time) zero_ = ufl.zero(*y_.ufl_shape) forward_forms = [] stage_solutions = [] jacobian_indices = [] for i, ki in enumerate(k): # Check whether the stage is explicit explicit = a[i,i] == 0 # Evaluation arguments for the ith stage evalargs = y_ + dt * sum([float(a[i,j]) * k[j] \ for j in range(i+1)], zero_) time = time_ + dt*c[i] replace_dict = _replace_dict_time_dependent_expression(time_dep_expressions, time_, dt, c[i]) replace_dict[y_] = evalargs replace_dict[time_] = time stage_form = ufl.replace(rhs_form, replace_dict) forward_forms.append(stage_form) # The recomputation of the forward run: if explicit: stage_forms = [stage_form] jacobian_indices.append(-1) else: # Create a F=0 form and differentiate it stage_form_implicit = stage_form - ufl.inner(ki, v)*DX stage_forms = [stage_form_implicit, derivative(stage_form_implicit, ki)] jacobian_indices.append(0) ufl_stage_forms.append(stage_forms) dolfin_stage_forms.append([Form(form) for form in stage_forms]) stage_solutions.append(ki) # And now the tangent linearisation: stage_form_tlm = safe_action(derivative(stage_form, y_), perturbation) + \ sum([dt*float(a[i,j]) * safe_action(derivative(\ forward_forms[j], y_), kdot[j]) for j in range(i+1)]) if explicit: stage_forms_tlm = [stage_form_tlm] jacobian_indices.append(-1) else: # Create a F=0 form and differentiate it stage_form_tlm -= ufl.inner(kdot[i], v)*DX stage_forms_tlm = [stage_form_tlm, derivative(stage_form_tlm, kdot[i])] jacobian_indices.append(1) ufl_stage_forms.append(stage_forms_tlm) dolfin_stage_forms.append([Form(form) for form in stage_forms_tlm]) stage_solutions.append(kdot[i]) # Only one last stage if len(b.shape) == 1: last_stage = Form(ufl.inner(perturbation + sum(\ [dt*float(bi)*kdoti for bi, kdoti in zip(b, kdot)], zero_), v)*DX) else: raise Exception("Not sure what to do here") human_form = [] for i in range(size): kterm = " + ".join("%sh*k_%s" % ("" if a[i,j] == 1.0 else \ "%s*"% a[i,j], j) \ for j in range(size) if a[i,j] != 0) if c[i] in [0.0, 1.0]: cih = " + h" if c[i] == 1.0 else "" else: cih = " + %s*h" % c[i] kdotterm = " + ".join("%(a)sh*action(derivative(f(t_n%(cih)s, y_n + "\ "%(kterm)s), kdot_%(i)s" % \ {"a": ("" if a[i,j] == 1.0 else "%s*"% a[i,j], j), "i": i, "cih": cih, "kterm": kterm} \ for j in range(size) if a[i,j] != 0) if len(kterm) == 0: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n)" % {"i": i, "cih": cih}) human_form.append("kdot_%(i)s = action(derivative("\ "f(t_n%(cih)s, y_n), y_n), ydot_n)" % \ {"i": i, "cih": cih}) else: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n + %(kterm)s)" % \ {"i": i, "cih": cih, "kterm": kterm}) human_form.append("kdot_%(i)s = action(derivative(f(t_n%(cih)s, "\ "y_n + %(kterm)s), y_n) + %(kdotterm)s" % \ {"i": i, "cih": cih, "kterm": kterm, "kdotterm": kdotterm}) parentheses = "(%s)" if np.sum(b>0) > 1 else "%s" human_form.append("ydot_{n+1} = ydot_n + h*" + parentheses % (" + ".join(\ "%skdot_%s" % ("" if b[i] == 1.0 else "%s*" % b[i], i) \ for i in range(size) if b[i] > 0))) human_form = "\n".join(human_form) return ufl_stage_forms, dolfin_stage_forms, jacobian_indices, last_stage, \ stage_solutions, dt, human_form, perturbation
def _butcher_scheme_generator(a, b, c, time, solution, rhs_form): """ Generates a list of forms and solutions for a given Butcher tableau *Arguments* a (2 dimensional numpy array) The a matrix of the Butcher tableau. b (1-2 dimensional numpy array) The b vector of the Butcher tableau. If b is 2 dimensional the scheme includes an error estimator and can be used in adaptive solvers. c (1 dimensional numpy array) The c vector the Butcher tableau. time (_Constant_) A Constant holding the time at the start of the time step solution (_Function_) The prognostic variable rhs_form (ufl.Form) A UFL form representing the rhs for a time differentiated equation """ a = _check_abc(a, b, c) size = a.shape[0] DX = _check_form(rhs_form) # Get test function arguments = rhs_form.arguments() coefficients = rhs_form.coefficients() v = arguments[0] # Create time step dt = Constant(0.1) # rhs forms dolfin_stage_forms = [] ufl_stage_forms = [] # Stage solutions k = [Function(solution.function_space(), name="k_%d"%i) for i in range(size)] jacobian_indices = [] # Create the stage forms y_ = solution time_ = time time_dep_expressions = _time_dependent_expressions(rhs_form, time) zero_ = ufl.zero(*y_.ufl_shape) for i, ki in enumerate(k): # Check whether the stage is explicit explicit = a[i,i] == 0 # Evaluation arguments for the ith stage evalargs = y_ + dt * sum([float(a[i,j]) * k[j] \ for j in range(i+1)], zero_) time = time_ + dt*c[i] replace_dict = _replace_dict_time_dependent_expression(time_dep_expressions, time_, dt, c[i]) replace_dict[y_] = evalargs replace_dict[time_] = time stage_form = ufl.replace(rhs_form, replace_dict) if explicit: stage_forms = [stage_form] jacobian_indices.append(-1) else: # Create a F=0 form and differentiate it stage_form -= ufl.inner(ki, v)*DX stage_forms = [stage_form, derivative(stage_form, ki)] jacobian_indices.append(0) ufl_stage_forms.append(stage_forms) dolfin_stage_forms.append([Form(form) for form in stage_forms]) # Only one last stage if len(b.shape) == 1: last_stage = Form(ufl.inner(y_+sum([dt*float(bi)*ki for bi, ki in \ zip(b, k)], zero_), v)*DX) else: # FIXME: Add support for adaptivity in RKSolver and MultiStageScheme last_stage = [Form(ufl.inner(y_+sum([dt*float(bi)*ki for bi, ki in \ zip(b[0,:], k)], zero_), v)*DX), Form(ufl.inner(y_+sum([dt*float(bi)*ki for bi, ki in \ zip(b[1,:], k)], zero_), v)*DX)] # Create the Function holding the solution at end of time step #k.append(solution.copy()) # Generate human form of MultiStageScheme human_form = [] for i in range(size): kterm = " + ".join("%sh*k_%s" % ("" if a[i,j] == 1.0 else \ "%s*"% a[i,j], j) \ for j in range(size) if a[i,j] != 0) if c[i] in [0.0, 1.0]: cih = " + h" if c[i] == 1.0 else "" else: cih = " + %s*h" % c[i] if len(kterm) == 0: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n)" % {"i": i, "cih": cih}) else: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n + %(kterm)s)" % \ {"i": i, "cih": cih, "kterm": kterm}) parentheses = "(%s)" if np.sum(b>0) > 1 else "%s" human_form.append("y_{n+1} = y_n + h*" + parentheses % (" + ".join(\ "%sk_%s" % ("" if b[i] == 1.0 else "%s*" % b[i], i) \ for i in range(size) if b[i] > 0))) human_form = "\n".join(human_form) return ufl_stage_forms, dolfin_stage_forms, jacobian_indices, last_stage, \ k, dt, human_form, None
def compute_liftings(name, p, mesh, f, exact_solution=None): r"""Find approximation to p-Laplace problem with rhs f, and compute global and local liftings of the residual. Return tuple ( u_h, \sum_a ||\nabla r ||_{p,\omega_a}^p \psi_a/|\omega_a|, \sum_a ||\nabla r^a ||_{p,\omega_a}^p \psi_a/|\omega_a|, \sum_a ||\nabla(u-u_h)||_{p,\omega_a}^p \psi_a/|\omega_a|, \sum_a ||\sigma(\nabla u)-\sigma(\nabla u_h)||_{q,\omega_a}^q \psi_a/|\omega_a|, C_{cont,PF}, ||\nabla r||_p^{p-1}, ( 1/N \sum_a ||\nabla r_a||_p^p )^{1/q}, ||\sigma(\nabla u) - \sigma(\nabla u_h)||_q, N * C_PF * r_norm_loc / r_norm_glob, N * r_norm_loc_PF / r_norm_glob, r_norm_glob / r_norm_loc, flux_err / r_norm_glob, ). First five are P1 functions, the rest are numbers. """ q = p/(p-1) # Dual Lebesgue exponent N = mesh.topology().dim() + 1 # Vertices per cell # Check that mesh is the coarsest one assert mesh.id() == mesh.root_node().id() # Get Galerkin approximation of p-Laplace problem -\Delta_p u = f log(25, 'Computing residual of p-Laplace problem') V = FunctionSpace(mesh, 'Lagrange', 1) criterion = lambda u_h, Est_h, Est_eps, Est_tot, Est_up: Est_eps <= 1e-6*Est_tot u = solve_p_laplace_adaptive(p, criterion, V, f, zero(mesh.geometry().dim()), exact_solution) # Plot exact solution, approximation and error plot_error(exact_solution, u, name) # p-Laplacian flux of u S = inner(grad(u), grad(u))**(0.5*Constant(p)-1.0) * grad(u) # Compute cell-wise norm of flux if exact_solution is not None: u_ex = exact_solution else: warning("Don't have exact solution for computation of flux error. Assuming zero.") u_ex = Constant(0) S_ex = ufl.replace(S, {u: u_ex}) S_ex = inner(grad(exact_solution), grad(exact_solution))**(0.5*Constant(p)-1.0) * grad(exact_solution) mesh_fine = u.function_space().mesh() flux_err = ((S - S_ex)**2)**Constant(0.5*q) flux_err_fine, flux_err_coarse = compute_cellwise_norm(flux_err, mesh_fine) # Distribute cell-wise flux error to patches flux_err_p1 = distribute_p0_to_p1(flux_err_coarse, Function(V)) # Sanity check and logging flux_err = sobolev_norm(S_ex-S, q, k=0) assert np.isclose(assemble(flux_err_fine *dx), flux_err**q) assert np.isclose(assemble(flux_err_coarse*dx), flux_err**q) assert np.isclose(assemble(flux_err_p1 *dx), flux_err**q) info_blue(r"||\sigma(u)-\sigma(u_h)||_q^q = %g" % flux_err**q) # Global lifting of W^{-1, p'} functional R = f + div(S) u.set_allow_extrapolation(True) # Needed hack r_glob = compute_global_lifting(p, mesh, f, S) u.set_allow_extrapolation(False) # Compute cell-wise norm of global lifting dr_glob_fine, dr_glob_coarse = compute_cellwise_grad(r_glob, p) # Distribute cell-wise norms of global lifting to patches dr_glob_p1 = distribute_p0_to_p1(dr_glob_coarse, Function(V)) # Norm of global lifting, equal to norm of residual r_norm_glob = sobolev_norm(r_glob, p)**(p/q) # Sanity check assert np.isclose(assemble(dr_glob_fine *dx), r_norm_glob**q) assert np.isclose(assemble(dr_glob_coarse*dx), r_norm_glob**q) assert np.isclose(assemble(dr_glob_p1 *dx), r_norm_glob**q) # Compute local liftings r_norm_loc, r_norm_loc_PF, r_loc_p1 = compute_local_liftings(p, V, f, S) # Compute energy error if exact_solution: ee_fine, ee_coarse = compute_cellwise_grad(exact_solution-u, p, mesh_fine=u.function_space().mesh()) ee_p1 = distribute_p0_to_p1(ee_coarse, Function(V)) ee = sobolev_norm(exact_solution-u, p) assert np.isclose(assemble(ee_fine *dx), ee**p) assert np.isclose(assemble(ee_coarse*dx), ee**p) assert np.isclose(assemble(ee_p1 *dx), ee**p) info_blue(r"||\nabla(u-u_h)||_p^p = %g" % ee**p) else: ee_p1 = None # Check effectivity of localization estimates C_PF = poincare_friedrichs_cutoff(mesh, p) ratio_a = ( N * C_PF * r_norm_loc ) / r_norm_glob ratio_b = r_norm_glob / r_norm_loc ratio_a_PF = ( N * r_norm_loc_PF ) / r_norm_glob ratio_c = flux_err / r_norm_glob assert ratio_a >= 1.0 and ratio_b >= 1.0 assert ratio_a_PF >= 1.0 assert ratio_c >= 1.0 # Report info_blue(r"||\nabla r||_p^{p-1} = %g, ( 1/N \sum_a ||\nabla r_a||_p^p )^{1/q} = %g" % (r_norm_glob, r_norm_loc)) info_blue("C_{cont,PF} = %g" % C_PF) info_green("(4.8a) ok: rhs/lhs = %g >= 1" % ratio_a) info_green("(4.8b) ok: rhs/lhs = %g >= 1" % ratio_b) info_green("ratio_c = %g >= 1" % ratio_c) return u, dr_glob_p1, r_loc_p1, ee_p1, flux_err_p1, \ C_PF, r_norm_glob, r_norm_loc, flux_err, ratio_a, ratio_a_PF, ratio_b, ratio_c
def __init__(self, *args, **kwargs): """ :arg problem: A :class:`NonlinearVariationalProblem` to solve. :kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or :class:`.MixedVectorSpaceBasis`) spanning the null space of the operator. :kwarg solver_parameters: Solver parameters to pass to PETSc. This should be a dict mapping PETSc options to values. For example, to set the nonlinear solver type to just use a linear solver: .. code-block:: python {'snes_type': 'ksponly'} PETSc flag options should be specified with `bool` values. For example: .. code-block:: python {'snes_monitor': True} .. warning :: Since this object contains a circular reference and a custom ``__del__`` attribute, you *must* call :meth:`.destroy` on it when you are done, otherwise it will never be garbage collected. """ assert isinstance(args[0], NonlinearVariationalProblem) self._problem = args[0] # Build the jacobian with the correct sparsity pattern. Note # that since matrix assembly is lazy this doesn't actually # force an additional assembly of the matrix since in # form_jacobian we call assemble again which drops this # computation on the floor. self._jac_tensor = assemble.assemble(self._problem.J_ufl, bcs=self._problem.bcs, form_compiler_parameters=self._problem.form_compiler_parameters) if self._problem.Jp is not None: self._jac_ptensor = assemble.assemble(self._problem.Jp, bcs=self._problem.bcs, form_compiler_parameters=self._problem.form_compiler_parameters) else: self._jac_ptensor = self._jac_tensor test = self._problem.F_ufl.arguments()[0] self._F_tensor = function.Function(test.function_space()) # Function to hold current guess self._x = function.Function(self._problem.u_ufl) self._problem.F_ufl = ufl.replace(self._problem.F_ufl, {self._problem.u_ufl: self._x}) self._problem.J_ufl = ufl.replace(self._problem.J_ufl, {self._problem.u_ufl: self._x}) if self._problem.Jp is not None: self._problem.Jp = ufl.replace(self._problem.Jp, {self._problem.u_ufl: self._x}) self._jacobian_assembled = False self.snes = PETSc.SNES().create() self._opt_prefix = 'firedrake_snes_%d_' % NonlinearVariationalSolver._id NonlinearVariationalSolver._id += 1 self.snes.setOptionsPrefix(self._opt_prefix) parameters = kwargs.get('solver_parameters', None) if 'parameters' in kwargs: warning(RED % "The 'parameters' keyword to %s is deprecated, use 'solver_parameters' instead.", self.__class__.__name__) parameters = kwargs['parameters'] if 'solver_parameters' in kwargs: warning(RED % "'parameters' and 'solver_parameters' passed to %s, using the latter", self.__class__.__name__) parameters = kwargs['solver_parameters'] # Make sure we don't stomp on a dict the user has passed in. parameters = parameters.copy() if parameters is not None else {} # Mixed problem, use jacobi pc if user has not supplied one. if self._jac_tensor._M.sparsity.shape != (1, 1): parameters.setdefault('pc_type', 'jacobi') self.parameters = parameters ksp = self.snes.getKSP() pc = ksp.getPC() pmat = self._jac_ptensor._M names = [fs.name if fs.name else str(i) for i, fs in enumerate(test.function_space())] ises = solving_utils.set_fieldsplits(pmat, pc, names=names) with self._F_tensor.dat.vec as v: self.snes.setFunction(self.form_function, v) self.snes.setJacobian(self.form_jacobian, J=self._jac_tensor._M.handle, P=self._jac_ptensor._M.handle) nullspace = kwargs.get('nullspace', None) if nullspace is not None: self.set_nullspace(nullspace, ises=ises)
def __init__(self, *args, **kwargs): """ :arg problem: A :class:`NonlinearVariationalProblem` to solve. :kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or :class:`.MixedVectorSpaceBasis`) spanning the null space of the operator. :kwarg solver_parameters: Solver parameters to pass to PETSc. This should be a dict mapping PETSc options to values. For example, to set the nonlinear solver type to just use a linear solver: .. code-block:: python {'snes_type': 'ksponly'} PETSc flag options should be specified with `bool` values. For example: .. code-block:: python {'snes_monitor': True} .. warning :: Since this object contains a circular reference and a custom ``__del__`` attribute, you *must* call :meth:`.destroy` on it when you are done, otherwise it will never be garbage collected. """ assert isinstance(args[0], NonlinearVariationalProblem) self._problem = args[0] # Build the jacobian with the correct sparsity pattern. Note # that since matrix assembly is lazy this doesn't actually # force an additional assembly of the matrix since in # form_jacobian we call assemble again which drops this # computation on the floor. self._jac_tensor = assemble(self._problem.J_ufl, bcs=self._problem.bcs) self._jac_ptensor = self._jac_tensor test = self._problem.F_ufl.compute_form_data().original_arguments[0] self._F_tensor = function.Function(test.function_space()) # Function to hold current guess self._x = function.Function(self._problem.u_ufl) self._problem.F_ufl = ufl.replace(self._problem.F_ufl, {self._problem.u_ufl: self._x}) self._problem.J_ufl = ufl.replace(self._problem.J_ufl, {self._problem.u_ufl: self._x}) self.snes = PETSc.SNES().create() self._opt_prefix = 'firedrake_snes_%d_' % NonlinearVariationalSolver._id NonlinearVariationalSolver._id += 1 self.snes.setOptionsPrefix(self._opt_prefix) parameters = kwargs.get('solver_parameters', None) # Make sure we don't stomp on a dict the user has passed in. parameters = copy(parameters) if parameters is not None else {} # Mixed problem, use jacobi pc if user has not supplied one. if self._jac_tensor._M.sparsity.shape != (1, 1): parameters.setdefault('pc_type', 'jacobi') self.parameters = parameters ksp = self.snes.getKSP() pc = ksp.getPC() pmat = self._jac_ptensor._M if pmat.sparsity.shape != (1, 1): rows, cols = pmat.sparsity.shape ises = [] nlocal_rows = 0 for i in range(rows): if i < cols: nlocal_rows += pmat[i, i].sparsity.nrows * pmat[i, i].dims[0] offset = 0 if op2.MPI.comm.rank == 0: op2.MPI.comm.exscan(nlocal_rows) else: offset = op2.MPI.comm.exscan(nlocal_rows) for i in range(rows): if i < cols: nrows = pmat[i, i].sparsity.nrows * pmat[i, i].dims[0] name = test.function_space()[i].name name = name if name else '%d' % i ises.append((name, PETSc.IS().createStride(nrows, first=offset, step=1))) offset += nrows pc.setFieldSplitIS(*ises) else: ises = None with self._F_tensor.dat.vec as v: self.snes.setFunction(self.form_function, v) self.snes.setJacobian(self.form_jacobian, J=self._jac_tensor._M.handle, P=self._jac_ptensor._M.handle) nullspace = kwargs.get('nullspace', None) if nullspace is not None: self.set_nullspace(nullspace, ises=ises)
def _butcher_scheme_generator(a, b, c, solution, rhs_form): """ Generates a list of forms and solutions for a given Butcher tableau *Arguments* a (2 dimensional numpy array) The a matrix of the Butcher tableau. b (1-2 dimensional numpy array) The b vector of the Butcher tableau. If b is 2 dimensional the scheme includes an error estimator and can be used in adaptive solvers. c (1 dimensional numpy array) The c vector the Butcher tableau. solution (_Function_) The prognastic variable rhs_form (ufl.Form) A UFL form representing the rhs for a time differentiated equation """ if not (isinstance(a, np.ndarray) and (len(a) == 1 or \ (len(a.shape)==2 and a.shape[0] == a.shape[1]))): raise TypeError("Expected an m x m numpy array as the first argument") if not (isinstance(b, np.ndarray) and len(b.shape) in [1,2]): raise TypeError("Expected a 1 or 2 dimensional numpy array as the second argument") if not (isinstance(c, np.ndarray) and len(c.shape) == 1): raise TypeError("Expected a 1 dimensional numpy array as the third argument") # Make sure a is a "matrix" if len(a) == 1: a.shape = (1, 1) # Get size of system size = a.shape[0] # If b is a matrix we expect it to have two rows if len(b.shape) == 2: if not (b.shape[0] == 2 and b.shape[1] == size): raise ValueError("Expected a 2 row matrix with the same number "\ "of collumns as the first dimension of the a matrix.") elif len(b) != size: raise ValueError("Expected the length of the b vector to have the "\ "same size as the first dimension of the a matrix.") if len(c) != size: raise ValueError("Expected the length of the c vector to have the "\ "same size as the first dimension of the a matrix.") # Check if tableau is fully implicit for i in range(size): for j in range(i): if a[j, i] != 0: raise ValueError("Does not support fully implicit Butcher tableau.") if not isinstance(rhs_form, ufl.Form): raise TypeError("Expected a ufl.Form as the 5th argument.") # Check if form contains a cell or point integral if "cell" in rhs_form.integral_groups(): DX = ufl.dx elif "point" in rhs_form.integral_groups(): DX = ufl.dP else: raise ValueError("Expected either a cell or point integral in the form.") # Get test function arguments, coefficients = ufl.algorithms.extract_arguments_and_coefficients(rhs_form) if len(arguments) != 1: raise ValueError("Expected the form to have rank 1") v = arguments[0] # Create time step dt = Constant(0.1) # rhs forms dolfin_stage_forms = [] ufl_stage_forms = [] # Stage solutions k = [solution.copy(deepcopy=True) for i in range(size)] # Create the stage forms y_ = solution for i, ki in enumerate(k): # Check wether the stage is explicit explicit = a[i,i] == 0 # Evaluation arguments for the ith stage evalargs = y_ + dt * sum([float(a[i,j]) * k[j] \ for j in range(i+1)], ufl.zero(*y_.shape())) stage_form = ufl.replace(rhs_form, {y_:evalargs}) if explicit: stage_forms = [stage_form] else: # Create a F=0 form and differentiate it stage_form -= ufl.inner(ki, v)*DX stage_forms = [stage_form, derivative(stage_form, ki)] ufl_stage_forms.append(stage_forms) dolfin_stage_forms.append([Form(form) for form in stage_forms]) # Only one last stage if len(b.shape) == 1: last_stage = cpp.FunctionAXPY([(float(bi), ki) for bi, ki in zip(b, k)]) else: # FIXME: Add support for addaptivity in RKSolver and MultiStageScheme last_stage = [cpp.FunctionAXPY([(float(bi), ki) for bi, ki in zip(b[0,:], k)]), cpp.FunctionAXPY([(float(bi), ki) for bi, ki in zip(b[1,:], k)])] # Create the Function holding the solution at end of time step #k.append(solution.copy()) # Generate human form of MultiStageScheme human_form = [] for i in range(size): kterm = " + ".join("%sh*k_%s" % ("" if a[i,j] == 1.0 else \ "%s*"% a[i,j], j) \ for j in range(size) if a[i,j] != 0) if c[i] in [0.0, 1.0]: cih = " + h" if c[i] == 1.0 else "" else: cih = " + %s*h" % c[i] if len(kterm) == 0: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n)" % {"i": i, "cih": cih}) else: human_form.append("k_%(i)s = f(t_n%(cih)s, y_n + %(kterm)s)" % \ {"i": i, "cih": cih, "kterm": kterm}) parentheses = "(%s)" if np.sum(b>0) > 1 else "%s" human_form.append("y_{n+1} = y_n + h*" + parentheses % (" + ".join(\ "%sk_%s" % ("" if b[i] == 1.0 else "%s*" % b[i], i) \ for i in range(size) if b[i] > 0))) human_form = "\n".join(human_form) return ufl_stage_forms, dolfin_stage_forms, last_stage, k, dt, human_form
def compile_integral(integral_data, form_data, prefix, parameters, interface=firedrake_interface): """Compiles a UFL integral into an assembly kernel. :arg integral_data: UFL integral data :arg form_data: UFL form data :arg prefix: kernel name will start with this string :arg parameters: parameters object :arg interface: backend module for the kernel interface :returns: a kernel constructed by the kernel interface """ if parameters is None: parameters = default_parameters() else: _ = default_parameters() _.update(parameters) parameters = _ # Remove these here, they're handled below. if parameters.get("quadrature_degree") in ["auto", "default", None, -1, "-1"]: del parameters["quadrature_degree"] if parameters.get("quadrature_rule") in ["auto", "default", None]: del parameters["quadrature_rule"] integral_type = integral_data.integral_type interior_facet = integral_type.startswith("interior_facet") mesh = integral_data.domain cell = integral_data.domain.ufl_cell() arguments = form_data.preprocessed_form.arguments() kernel_name = "%s_%s_integral_%s" % (prefix, integral_type, integral_data.subdomain_id) # Handle negative subdomain_id kernel_name = kernel_name.replace("-", "_") fiat_cell = as_fiat_cell(cell) integration_dim, entity_ids = lower_integral_type(fiat_cell, integral_type) quadrature_indices = [] # Dict mapping domains to index in original_form.ufl_domains() domain_numbering = form_data.original_form.domain_numbering() builder = interface.KernelBuilder(integral_type, integral_data.subdomain_id, domain_numbering[integral_data.domain]) argument_multiindices = tuple(builder.create_element(arg.ufl_element()).get_indices() for arg in arguments) return_variables = builder.set_arguments(arguments, argument_multiindices) builder.set_coordinates(mesh) builder.set_coefficients(integral_data, form_data) # Map from UFL FiniteElement objects to multiindices. This is # so we reuse Index instances when evaluating the same coefficient # multiple times with the same table. # # We also use the same dict for the unconcatenate index cache, # which maps index objects to tuples of multiindices. These two # caches shall never conflict as their keys have different types # (UFL finite elements vs. GEM index objects). index_cache = {} kernel_cfg = dict(interface=builder, ufl_cell=cell, integral_type=integral_type, precision=parameters["precision"], integration_dim=integration_dim, entity_ids=entity_ids, argument_multiindices=argument_multiindices, index_cache=index_cache) mode_irs = collections.OrderedDict() for integral in integral_data.integrals: params = parameters.copy() params.update(integral.metadata()) # integral metadata overrides if params.get("quadrature_rule") == "default": del params["quadrature_rule"] mode = pick_mode(params["mode"]) mode_irs.setdefault(mode, collections.OrderedDict()) integrand = ufl.replace(integral.integrand(), form_data.function_replace_map) integrand = ufl_utils.split_coefficients(integrand, builder.coefficient_split) # Check if the integral has a quad degree attached, otherwise use # the estimated polynomial degree attached by compute_form_data quadrature_degree = params.get("quadrature_degree", params["estimated_polynomial_degree"]) try: quadrature_degree = params["quadrature_degree"] except KeyError: quadrature_degree = params["estimated_polynomial_degree"] functions = list(arguments) + [builder.coordinate(mesh)] + list(integral_data.integral_coefficients) function_degrees = [f.ufl_function_space().ufl_element().degree() for f in functions] if all((asarray(quadrature_degree) > 10 * asarray(degree)).all() for degree in function_degrees): logger.warning("Estimated quadrature degree %s more " "than tenfold greater than any " "argument/coefficient degree (max %s)", quadrature_degree, max_degree(function_degrees)) try: quad_rule = params["quadrature_rule"] except KeyError: integration_cell = fiat_cell.construct_subelement(integration_dim) quad_rule = make_quadrature(integration_cell, quadrature_degree) if not isinstance(quad_rule, AbstractQuadratureRule): raise ValueError("Expected to find a QuadratureRule object, not a %s" % type(quad_rule)) quadrature_multiindex = quad_rule.point_set.indices quadrature_indices.extend(quadrature_multiindex) config = kernel_cfg.copy() config.update(quadrature_rule=quad_rule) expressions = fem.compile_ufl(integrand, interior_facet=interior_facet, **config) reps = mode.Integrals(expressions, quadrature_multiindex, argument_multiindices, params) for var, rep in zip(return_variables, reps): mode_irs[mode].setdefault(var, []).append(rep) # Finalise mode representations into a set of assignments assignments = [] for mode, var_reps in mode_irs.items(): assignments.extend(mode.flatten(var_reps.items(), index_cache)) if assignments: return_variables, expressions = zip(*assignments) else: return_variables = [] expressions = [] # Need optimised roots for COFFEE options = dict(reduce(operator.and_, [mode.finalise_options.items() for mode in mode_irs.keys()])) expressions = impero_utils.preprocess_gem(expressions, **options) assignments = list(zip(return_variables, expressions)) # Look for cell orientations in the IR if builder.needs_cell_orientations(expressions): builder.require_cell_orientations() # Construct ImperoC split_argument_indices = tuple(chain(*[var.index_ordering() for var in return_variables])) index_ordering = tuple(quadrature_indices) + split_argument_indices try: impero_c = impero_utils.compile_gem(assignments, index_ordering, remove_zeros=True) except impero_utils.NoopError: # No operations, construct empty kernel return builder.construct_empty_kernel(kernel_name) # Generate COFFEE index_names = [] def name_index(index, name): index_names.append((index, name)) if index in index_cache: for multiindex, suffix in zip(index_cache[index], string.ascii_lowercase): name_multiindex(multiindex, name + suffix) def name_multiindex(multiindex, name): if len(multiindex) == 1: name_index(multiindex[0], name) else: for i, index in enumerate(multiindex): name_index(index, name + str(i)) name_multiindex(quadrature_indices, 'ip') for multiindex, name in zip(argument_multiindices, ['j', 'k']): name_multiindex(multiindex, name) # Construct kernel body = generate_coffee(impero_c, index_names, parameters["precision"], expressions, split_argument_indices) return builder.construct_kernel(kernel_name, body)