def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): # If you want to apply boundary conditions symmetrically in the adjoint # -- and you often do -- # then we need to have a UFL representation of all the terms in the adjoint equation. # However! # Since UFL cannot represent the identity map, # we need to find an f such that when # assemble(inner(f, v)*dx) # we get the contraction_vector.data back. # This involves inverting a mass matrix. if backend.parameters["adjoint"]["symmetric_bcs"] and backend.__version__ <= '1.2.0': backend.info_red("Warning: symmetric BC application requested but unavailable in dolfin <= 1.2.0.") if backend.parameters["adjoint"]["symmetric_bcs"] and backend.__version__ > '1.2.0': V = contraction_vector.data.function_space() v = backend.TestFunction(V) if str(V) not in adjglobals.fsp_lu: u = backend.TrialFunction(V) A = backend.assemble(backend.inner(u, v)*backend.dx) lusolver = backend.LUSolver(A, "mumps") lusolver.parameters["symmetric"] = True lusolver.parameters["reuse_factorization"] = True adjglobals.fsp_lu[str(V)] = lusolver else: lusolver = adjglobals.fsp_lu[str(V)] riesz = backend.Function(V) lusolver.solve(riesz.vector(), contraction_vector.data.vector()) return adjlinalg.Vector(backend.inner(riesz, v)*backend.dx) else: return adjlinalg.Vector(contraction_vector.data)
def _ad_convert_type(self, value, options=None): options = {} if options is None else options riesz_representation = options.get("riesz_representation", "l2") if riesz_representation == "l2": return create_overloaded_object( compat.function_from_vector(self.function_space(), value, cls=backend.Function)) elif riesz_representation == "L2": ret = compat.create_function(self.function_space()) u = backend.TrialFunction(self.function_space()) v = backend.TestFunction(self.function_space()) M = backend.assemble(backend.inner(u, v) * backend.dx) compat.linalg_solve(M, ret.vector(), value) return ret elif riesz_representation == "H1": ret = compat.create_function(self.function_space()) u = backend.TrialFunction(self.function_space()) v = backend.TestFunction(self.function_space()) M = backend.assemble( backend.inner(u, v) * backend.dx + backend.inner(backend.grad(u), backend.grad(v)) * backend.dx) compat.linalg_solve(M, ret.vector(), value) return ret elif callable(riesz_representation): return riesz_representation(value) else: raise NotImplementedError("Unknown Riesz representation %s" % riesz_representation)
def compute_gst(ic, final, nsv, ic_norm="mass", final_norm="mass", which=1): '''This function computes the generalised stability analysis of a simulation. Generalised stability theory computes the perturbations to a field (such as an initial condition, forcing term, etc.) that /grow the most/ over the finite time window of the simulation. For more details, see the mathematical documentation on `the website <http://dolfin-adjoint.org>`_. - :py:data:`ic` -- the input of the propagator - :py:data:`final` -- the output of the propagator - :py:data:`nsv` -- the number of optimal perturbations to compute - :py:data:`ic_norm` -- a symmetric positive-definite bilinear form that defines the norm on the input space - :py:data:`final_norm` -- a symmetric positive-definite bilinear form that defines the norm on the output space - :py:data:`which` -- which singular vectors to compute. Use e.g. slepc4py.SLEPc.EPS.Which.LARGEST_REAL You can supply :py:data:`"mass"` for :py:data:`ic_norm` and :py:data:`final_norm` to use the (default) mass matrices associated with these spaces. For example: .. code-block:: python gst = compute_gst("State", "State", nsv=10) for i in range(gst.ncv): # number of converged vectors (sigma, u, v) = gst.get_gst(i, return_vectors=True) ''' ic_var = adjglobals.adj_variables[ic] ic_var.c_object.timestep = 0 ic_var.c_object.iteration = 0 final_var = adjglobals.adj_variables[final] if final_norm == "mass": final_value = adjglobals.adjointer.get_variable_value(final_var).data final_fnsp = final_value.function_space() u = backend.TrialFunction(final_fnsp) v = backend.TestFunction(final_fnsp) final_mass = backend.inner(u, v) * backend.dx final_norm = adjlinalg.Matrix(final_mass) elif final_norm is not None: final_norm = adjlinalg.Matrix(final_norm) if ic_norm == "mass": ic_value = adjglobals.adjointer.get_variable_value(ic_var).data ic_fnsp = ic_value.function_space() u = backend.TrialFunction(ic_fnsp) v = backend.TestFunction(ic_fnsp) ic_mass = backend.inner(u, v) * backend.dx ic_norm = adjlinalg.Matrix(ic_mass) elif ic_norm is not None: ic_norm = adjlinalg.Matrix(ic_norm) return adjglobals.adjointer.compute_gst(ic_var, ic_norm, final_var, final_norm, nsv, which)
def __init__(self, v, V, output, bcs=[], *args, **kwargs): mesh = kwargs.pop("mesh", None) if mesh is None: mesh = V.mesh() dx = backend.dx(mesh) w = backend.TestFunction(V) Pv = backend.TrialFunction(V) a = backend.inner(w, Pv) * dx L = backend.inner(w, v) * dx super(ProjectBlock, self).__init__(a == L, output, bcs, *args, **kwargs)
def compute_gst(ic, final, nsv, ic_norm="mass", final_norm="mass", which=1): """This function computes the generalised stability analysis of a simulation. Generalised stability theory computes the perturbations to a field (such as an initial condition, forcing term, etc.) that /grow the most/ over the finite time window of the simulation. For more details, see the mathematical documentation on `the website <http://dolfin-adjoint.org>`_. - :py:data:`ic` -- the input of the propagator - :py:data:`final` -- the output of the propagator - :py:data:`nsv` -- the number of optimal perturbations to compute - :py:data:`ic_norm` -- a symmetric positive-definite bilinear form that defines the norm on the input space - :py:data:`final_norm` -- a symmetric positive-definite bilinear form that defines the norm on the output space - :py:data:`which` -- which singular vectors to compute. Use e.g. slepc4py.SLEPc.EPS.Which.LARGEST_REAL You can supply :py:data:`"mass"` for :py:data:`ic_norm` and :py:data:`final_norm` to use the (default) mass matrices associated with these spaces. For example: .. code-block:: python gst = compute_gst("State", "State", nsv=10) for i in range(gst.ncv): # number of converged vectors (sigma, u, v) = gst.get_gst(i, return_vectors=True) """ ic_var = adjglobals.adj_variables[ic] ic_var.c_object.timestep = 0 ic_var.c_object.iteration = 0 final_var = adjglobals.adj_variables[final] if final_norm == "mass": final_value = adjglobals.adjointer.get_variable_value(final_var).data final_fnsp = final_value.function_space() u = backend.TrialFunction(final_fnsp) v = backend.TestFunction(final_fnsp) final_mass = backend.inner(u, v) * backend.dx final_norm = adjlinalg.Matrix(final_mass) elif final_norm is not None: final_norm = adjlinalg.Matrix(final_norm) if ic_norm == "mass": ic_value = adjglobals.adjointer.get_variable_value(ic_var).data ic_fnsp = ic_value.function_space() u = backend.TrialFunction(ic_fnsp) v = backend.TestFunction(ic_fnsp) ic_mass = backend.inner(u, v) * backend.dx ic_norm = adjlinalg.Matrix(ic_mass) elif ic_norm is not None: ic_norm = adjlinalg.Matrix(ic_norm) return adjglobals.adjointer.compute_gst(ic_var, ic_norm, final_var, final_norm, nsv, which)
def _ad_dot(self, other, options=None): options = {} if options is None else options riesz_representation = options.get("riesz_representation", "l2") if riesz_representation == "l2": return self.vector().inner(other.vector()) elif riesz_representation == "L2": return backend.assemble(backend.inner(self, other) * backend.dx) elif riesz_representation == "H1": return backend.assemble( (backend.inner(self, other) + backend.inner(backend.grad(self), backend.grad(other))) * backend.dx) else: raise NotImplementedError("Unknown Riesz representation %s" % riesz_representation)
def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): idx = dependencies.index(variable) # If you want to apply boundary conditions symmetrically in the adjoint # -- and you often do -- # then we need to have a UFL representation of all the terms in the adjoint equation. # However! # Since UFL cannot represent the identity map, # we need to find an f such that when # assemble(inner(f, v)*dx) # we get the contraction_vector.data back. # This involves inverting a mass matrix. if backend.parameters["adjoint"][ "symmetric_bcs"] and backend.__version__ <= '1.2.0': backend.info_red( "Warning: symmetric BC application requested but unavailable in dolfin <= 1.2.0." ) if backend.parameters["adjoint"][ "symmetric_bcs"] and backend.__version__ > '1.2.0': V = contraction_vector.data.function_space() v = backend.TestFunction(V) if str(V) not in adjglobals.fsp_lu: u = backend.TrialFunction(V) A = backend.assemble(backend.inner(u, v) * backend.dx) solver = "mumps" if "mumps" in backend.lu_solver_methods( ).keys() else "default" lusolver = backend.LUSolver(A, solver) lusolver.parameters["symmetric"] = True lusolver.parameters["reuse_factorization"] = True adjglobals.fsp_lu[str(V)] = lusolver else: lusolver = adjglobals.fsp_lu[str(V)] riesz = backend.Function(V) lusolver.solve( riesz.vector(), self.weights[idx] * contraction_vector.data.vector()) out = (backend.inner(riesz, v) * backend.dx) else: out = backend.Function(self.fn_space) out.assign(self.weights[idx] * contraction_vector.data) return adjlinalg.Vector(out)
def _assemble_and_solve_adj_eq(self, dFdu_adj_form, dJdu, compute_bdy): dJdu_copy = dJdu.copy() bcs = self._homogenize_bcs() solver = self.block_helper.adjoint_solver if solver is None: if self.assemble_system: rhs_bcs_form = backend.inner( backend.Function(self.function_space), dFdu_adj_form.arguments()[0]) * backend.dx A, _ = backend.assemble_system(dFdu_adj_form, rhs_bcs_form, bcs) else: A = compat.assemble_adjoint_value(dFdu_adj_form) [bc.apply(A) for bc in bcs] solver = backend.LUSolver(A, self.method) self.block_helper.adjoint_solver = solver solver.parameters.update(self.lu_solver_parameters) [bc.apply(dJdu) for bc in bcs] adj_sol = backend.Function(self.function_space) solver.solve(adj_sol.vector(), dJdu) adj_sol_bdy = None if compute_bdy: adj_sol_bdy = compat.function_from_vector( self.function_space, dJdu_copy - compat.assemble_adjoint_value( backend.action(dFdu_adj_form, adj_sol))) return adj_sol, adj_sol_bdy
def _assemble_and_solve_adj_eq(self, dFdu_adj_form, dJdu, compute_bdy): dJdu_copy = dJdu.copy() bcs = self._homogenize_bcs() if self.assemble_system: rhs_bcs_form = backend.inner( backend.Function(self.function_space), dFdu_adj_form.arguments()[0]) * backend.dx A, _ = backend.assemble_system(dFdu_adj_form, rhs_bcs_form, bcs) else: A = backend.assemble(dFdu_adj_form) [bc.apply(A) for bc in bcs] [bc.apply(dJdu) for bc in bcs] adj_sol = compat.create_function(self.function_space) compat.linalg_solve(A, adj_sol.vector(), dJdu, *self.adj_args, **self.adj_kwargs) adj_sol_bdy = None if compute_bdy: adj_sol_bdy = compat.function_from_vector( self.function_space, dJdu_copy - compat.assemble_adjoint_value( backend.action(dFdu_adj_form, adj_sol))) return adj_sol, adj_sol_bdy
def norm(self): if isinstance(self.data, backend.Function): return (abs(backend.assemble(backend.inner(self.data, self.data)*backend.dx)))**0.5 elif isinstance(self.data, ufl.form.Form): return backend.assemble(self.data).norm("l2") elif isinstance(self.data, backend.MultiMeshFunction): raise NotImplementedError
def _assemble_and_solve_adj_eq(self, dFdu_adj_form, dJdu, compute_bdy): dJdu_copy = dJdu.copy() bcs = self._homogenize_bcs() solver = self.block_helper.adjoint_solver if solver is None: solver = backend.PETScKrylovSolver(self.method, self.preconditioner) solver.ksp().setOptionsPrefix(self.ksp_options_prefix) solver.set_from_options() if self.assemble_system: rhs_bcs_form = backend.inner(backend.Function(self.function_space), dFdu_adj_form.arguments()[0]) * backend.dx A, _ = backend.assemble_system(dFdu_adj_form, rhs_bcs_form, bcs) if self._ad_nullspace is not None: as_backend_type(A).set_nullspace(self._ad_nullspace) if self.pc_operator is not None: P = self._replace_form(self.pc_operator) P, _ = backend.assemble_system(P, rhs_bcs_form, bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: A = compat.assemble_adjoint_value(dFdu_adj_form) [bc.apply(A) for bc in bcs] if self._ad_nullspace is not None: as_backend_type(A).set_nullspace(self._ad_nullspace) if self.pc_operator is not None: P = self._replace_form(self.pc_operator) P = compat.assemble_adjoint_value(P) [bc.apply(P) for bc in bcs] solver.set_operators(A, P) else: solver.set_operator(A) self.block_helper.adjoint_solver = solver solver.parameters.update(self.krylov_solver_parameters) [bc.apply(dJdu) for bc in bcs] if self._ad_nullspace is not None: if self._ad_nullspace._ad_orthogonalized: self._ad_nullspace.orthogonalize(dJdu) adj_sol = backend.Function(self.function_space) solver.solve(adj_sol.vector(), dJdu) adj_sol_bdy = None if compute_bdy: adj_sol_bdy = compat.function_from_vector(self.function_space, dJdu_copy - compat.assemble_adjoint_value( backend.action(dFdu_adj_form, adj_sol))) return adj_sol, adj_sol_bdy
def project_dolfin(v, V=None, bcs=None, mesh=None, solver_type="cg", preconditioner_type="default", form_compiler_parameters=None, annotate=None, name=None): '''The project call performs an equation solve, and so it too must be annotated so that the adjoint and tangent linear models may be constructed automatically by libadjoint. To disable the annotation of this function, just pass :py:data:`annotate=False`. This is useful in cases where the solve is known to be irrelevant or diagnostic for the purposes of the adjoint computation (such as projecting fields to other function spaces for the purposes of visualisation).''' to_annotate = utils.to_annotate(annotate) if isinstance(v, backend.Expression) and (annotate is not True): to_annotate = False if isinstance(v, backend.Constant) and (annotate is not True): to_annotate = False out = backend.project(v=v, V=V, bcs=bcs, mesh=mesh, solver_type=solver_type, preconditioner_type=preconditioner_type, form_compiler_parameters=form_compiler_parameters) out = utils.function_to_da_function(out) if name is not None: out.adj_name = name out.rename(name, "a Function from dolfin-adjoint") if to_annotate: # reproduce the logic from project. This probably isn't future-safe, but anyway if V is None: V = backend.fem.projection._extract_function_space(v, mesh) if mesh is None: mesh = V.mesh() # Define variational problem for projection w = backend.TestFunction(V) Pv = backend.TrialFunction(V) a = backend.inner(w, Pv)*backend.dx(domain=mesh) L = backend.inner(w, v)*backend.dx(domain=mesh) solving.annotate(a == L, out, bcs, solver_parameters={"linear_solver": solver_type, "preconditioner": preconditioner_type, "symmetric": True}) if backend.parameters["adjoint"]["record_all"]: adjglobals.adjointer.record_variable(adjglobals.adj_variables[out], libadjoint.MemoryStorage(adjlinalg.Vector(out))) return out
def project_test(func): if isinstance(func, backend.Function): V = func.function_space() u = backend.TrialFunction(V) v = backend.TestFunction(V) M = backend.assemble(backend.inner(u, v) * backend.dx) proj = backend.Function(V) backend.solve(M, proj.vector(), func.vector()) return proj else: return func
def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): if not hermitian: fn = Function_split(contraction_vector)[self.idx] action = backend.inner(self.test, fn) else: bigtest = backend.TestFunction(self.function.function_space()) outfn = backend.Function(self.function.function_space()) # DOLFIN is a bit annoying when it comes to splits. Actually, it is very annoying. # You can't do anything like # outfn[idx].vector()[:] = values_I_want_to_assign_to_outfn[idx] # or # fn = outfn.split()[idx]; fn.vector()[:] = values_I_want_to_assign_to_outfn[idx] # for whatever reason assert False, "No idea how to assign to a subfunction yet .. " assignment.dolfin_assign(outfn, contraction_vector.data) action = backend.inner(bigtest, outfn)*backend.dx return adjlinalg.Vector(action)
def project_test(func): if isinstance(func, backend.Function): V = func.function_space() u = backend.TrialFunction(V) v = backend.TestFunction(V) M = backend.assemble(backend.inner(u, v)*backend.dx) proj = backend.Function(V) backend.solve(M, proj.vector(), func.vector()) return proj else: return func
def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): if not hermitian: fn = Function_split(contraction_vector)[self.idx] action = backend.inner(self.test, fn) else: bigtest = backend.TestFunction(self.function.function_space()) outfn = backend.Function(self.function.function_space()) # DOLFIN is a bit annoying when it comes to splits. Actually, it is very annoying. # You can't do anything like # outfn[idx].vector()[:] = values_I_want_to_assign_to_outfn[idx] # or # fn = outfn.split()[idx]; fn.vector()[:] = values_I_want_to_assign_to_outfn[idx] # for whatever reason assert False, "No idea how to assign to a subfunction yet .. " assignment.dolfin_assign(outfn, contraction_vector.data) action = backend.inner(bigtest, outfn) * backend.dx return adjlinalg.Vector(action)
def annotate_split(bigfn, idx, smallfn, bcs): fn_space = smallfn.function_space().collapse() test = backend.TestFunction(fn_space) trial = backend.TrialFunction(fn_space) eq_lhs = backend.inner(test, trial)*backend.dx diag_name = "Split:%s:" % idx + hashlib.md5(str(eq_lhs) + "split" + str(smallfn) + str(bigfn) + str(idx) + str(random.random())).hexdigest() diag_deps = [] diag_block = libadjoint.Block(diag_name, dependencies=diag_deps, test_hermitian=backend.parameters["adjoint"]["test_hermitian"], test_derivative=backend.parameters["adjoint"]["test_derivative"]) solving.register_initial_conditions([(bigfn, adjglobals.adj_variables[bigfn])], linear=True, var=None) var = adjglobals.adj_variables.next(smallfn) frozen_expressions_dict = expressions.freeze_dict() def diag_assembly_cb(dependencies, values, hermitian, coefficient, context): '''This callback must conform to the libadjoint Python block assembly interface. It returns either the form or its transpose, depending on the value of the logical hermitian.''' assert coefficient == 1 expressions.update_expressions(frozen_expressions_dict) value_coeffs=[v.data for v in values] eq_l = eq_lhs if hermitian: adjoint_bcs = [utils.homogenize(bc) for bc in bcs if isinstance(bc, backend.DirichletBC)] + [bc for bc in bcs if not isinstance(bc, backend.DirichletBC)] if len(adjoint_bcs) == 0: adjoint_bcs = None return (adjlinalg.Matrix(backend.adjoint(eq_l), bcs=adjoint_bcs), adjlinalg.Vector(None, fn_space=fn_space)) else: return (adjlinalg.Matrix(eq_l, bcs=bcs), adjlinalg.Vector(None, fn_space=fn_space)) diag_block.assemble = diag_assembly_cb rhs = SplitRHS(test, bigfn, idx) eqn = libadjoint.Equation(var, blocks=[diag_block], targets=[var], rhs=rhs) cs = adjglobals.adjointer.register_equation(eqn) solving.do_checkpoint(cs, var, rhs) if backend.parameters["adjoint"]["fussy_replay"]: mass = eq_lhs smallfn_massed = backend.Function(fn_space) backend.solve(mass == backend.action(mass, smallfn), smallfn_massed) assert False, "No idea how to assign to a subfunction yet .. " #assignment.dolfin_assign(bigfn, smallfn_massed) if backend.parameters["adjoint"]["record_all"]: smallfn_record = backend.Function(fn_space) assignment.dolfin_assign(smallfn_record, smallfn) adjglobals.adjointer.record_variable(var, libadjoint.MemoryStorage(adjlinalg.Vector(smallfn_record)))
def dot_product(self,y): if isinstance(self.data, ufl.form.Form): return backend.assemble(backend.inner(self.data, y.data)*backend.dx) elif isinstance(self.data, backend.Function): if isinstance(y.data, ufl.form.Form): other = backend.assemble(y.data) else: other = y.data.vector() return self.data.vector().inner(other) else: raise libadjoint.exceptions.LibadjointErrorNotImplemented("Don't know how to dot anything else.")
def define_nonlinear_equation(F, u): # Given an F := 0, # we write the equation for libadjoint's annotation purposes as # M.u = M.u + F(u) # as we need to have something on the diagonal in our big time system fn_space = u.function_space() test = backend.TestFunction(fn_space) trial = backend.TrialFunction(fn_space) mass = backend.inner(test, trial) * backend.dx return (mass, backend.action(mass, u) - F)
def evaluate_tlm_component(self, inputs, tlm_inputs, block_variable, idx, prepared=None): F_form = prepared["form"] dFdu = prepared["dFdu"] V = self.get_outputs()[idx].output.function_space() bcs = [] dFdm = 0. dFdm_shape = 0. for block_variable in self.get_dependencies(): tlm_value = block_variable.tlm_value c = block_variable.output c_rep = block_variable.saved_output if isinstance(c, backend.DirichletBC): if tlm_value is None: bcs.append(compat.create_bc(c, homogenize=True)) else: bcs.append(tlm_value) continue elif isinstance(c, compat.MeshType): X = backend.SpatialCoordinate(c) c_rep = X if tlm_value is None: continue if c == self.func and not self.linear: continue if isinstance(c, compat.MeshType): dFdm_shape += compat.assemble_adjoint_value( backend.derivative(-F_form, c_rep, tlm_value)) else: dFdm += backend.derivative(-F_form, c_rep, tlm_value) if isinstance(dFdm, float): v = dFdu.arguments()[0] dFdm = backend.inner(backend.Constant(numpy.zeros(v.ufl_shape)), v) * backend.dx dFdm = compat.assemble_adjoint_value(dFdm) + dFdm_shape dudm = backend.Function(V) return self._assemble_and_solve_tlm_eq( compat.assemble_adjoint_value(dFdu, bcs=bcs), dFdm, dudm, bcs)
def __call__(self, dependencies, values): fn = Function_split(values[0].data, deepcopy=True)[self.idx] return adjlinalg.Vector(backend.inner(self.test, fn)*backend.dx)
def perturbed_replay(parameter, perturbation, perturbation_scale, observation, perturbation_norm="mass", observation_norm="mass", callback=None, forget=False): r"""Perturb the forward run and compute .. math:: \frac{ \left|\left| \delta \mathrm{observation} \right|\right| }{ \left|\left| \delta \mathrm{input} \right| \right| } as a function of time. :py:data:`parameter` -- an FunctionControl to say what variable should be perturbed (e.g. FunctionControl('InitialConcentration')) :py:data:`perturbation` -- a Function to give the perturbation direction (from a GST analysis, for example) :py:data:`perturbation_norm` -- a bilinear Form which induces a norm on the space of perturbation inputs :py:data:`perturbation_scale` -- how big the norm of the initial perturbation should be :py:data:`observation` -- the variable to observe (e.g. 'Concentration') :py:data:`observation_norm` -- a bilinear Form which induces a norm on the space of perturbation outputs :py:data:`callback` -- a function f(var, perturbed, unperturbed) that the user can supply (e.g. to dump out variables during the perturbed replay) """ if not backend.parameters["adjoint"]["record_all"]: info_red( "Warning: your replay test will be much more effective with backend.parameters['adjoint']['record_all'] = True." ) assert isinstance(parameter, controls.FunctionControl) if perturbation_norm == "mass": p_fnsp = perturbation.function_space() u = backend.TrialFunction(p_fnsp) v = backend.TestFunction(p_fnsp) p_mass = backend.inner(u, v) * backend.dx perturbation_norm = p_mass if not isinstance(perturbation_norm, backend.GenericMatrix): perturbation_norm = backend.assemble(perturbation_norm) if not isinstance(observation_norm, backend.GenericMatrix) and observation_norm != "mass": observation_norm = backend.assemble(observation_norm) def compute_norm(perturbation, norm): # Need to compute <x, Ax> and then take its sqrt # where x is perturbation, A is norm try: vec = perturbation.vector() except: vec = perturbation Ax = vec.copy() norm.mult(vec, Ax) xAx = vec.inner(Ax) return math.sqrt(xAx) growths = [] for i in range(adjglobals.adjointer.equation_count): (fwd_var, output) = adjglobals.adjointer.get_forward_solution(i) if fwd_var == parameter.var: # we've hit the initial condition we want to perturb current_norm = compute_norm(perturbation, perturbation_norm) output.data.vector()[:] += (perturbation_scale / current_norm) * perturbation.vector() unperturbed = adjglobals.adjointer.get_variable_value(fwd_var).data if fwd_var.name == observation: # we've hit something we want to observe # Fetch the unperturbed result from the record if observation_norm == "mass": # we can't do this earlier, because we don't have the observation function space yet o_fnsp = output.data.function_space() u = backend.TrialFunction(o_fnsp) v = backend.TestFunction(o_fnsp) o_mass = backend.inner(u, v) * backend.dx observation_norm = backend.assemble(o_mass) diff = output.data.vector() - unperturbed.vector() growths.append( compute_norm(diff, observation_norm) / perturbation_scale) # <--- the action line if callback is not None: callback(fwd_var, output.data, unperturbed) storage = libadjoint.MemoryStorage(output) storage.set_compare(tol=None) storage.set_overwrite(True) out = adjglobals.adjointer.record_variable(fwd_var, storage) if forget: adjglobals.adjointer.forget_forward_equation(i) # can happen if we initialised a nonlinear solve with a constant zero guess if growths[0] == 0.0: return growths[1:] else: return growths
def norm(self): if isinstance(self.data, backend.Function): return (abs(backend.assemble(backend.inner(self.data, self.data)*backend.dx)))**0.5 elif isinstance(self.data, ufl.form.Form): return backend.assemble(self.data).norm("l2")
def project_dolfin(v, V=None, bcs=None, mesh=None, solver_type="lu", preconditioner_type="default", form_compiler_parameters=None, annotate=None, name=None): '''The project call performs an equation solve, and so it too must be annotated so that the adjoint and tangent linear models may be constructed automatically by libadjoint. To disable the annotation of this function, just pass :py:data:`annotate=False`. This is useful in cases where the solve is known to be irrelevant or diagnostic for the purposes of the adjoint computation (such as projecting fields to other function spaces for the purposes of visualisation).''' to_annotate = utils.to_annotate(annotate) if isinstance(v, backend.Expression) and (annotate is not True): to_annotate = False if isinstance(v, backend.Constant) and (annotate is not True): to_annotate = False out = backend.project(v=v, V=V, bcs=bcs, mesh=mesh, solver_type=solver_type, preconditioner_type=preconditioner_type, form_compiler_parameters=form_compiler_parameters) out = utils.function_to_da_function(out) if name is not None: out.adj_name = name out.rename(name, "a Function from dolfin-adjoint") if to_annotate: # reproduce the logic from project. This probably isn't future-safe, but anyway if V is None: V = backend.fem.projection._extract_function_space(v, mesh) if mesh is None: mesh = V.mesh() # Define variational problem for projection w = backend.TestFunction(V) Pv = backend.TrialFunction(V) a = backend.inner(w, Pv) * backend.dx(domain=mesh) L = backend.inner(w, v) * backend.dx(domain=mesh) solving.annotate(a == L, out, bcs, solver_parameters={ "linear_solver": solver_type, "preconditioner": preconditioner_type, "symmetric": True }) if backend.parameters["adjoint"]["record_all"]: adjglobals.adjointer.record_variable( adjglobals.adj_variables[out], libadjoint.MemoryStorage(adjlinalg.Vector(out))) return out
def annotate_split(bigfn, idx, smallfn, bcs): fn_space = smallfn.function_space().collapse() test = backend.TestFunction(fn_space) trial = backend.TrialFunction(fn_space) eq_lhs = backend.inner(test, trial) * backend.dx key = "{}split{}{}{}{}".format(eq_lhs, smallfn, bigfn, idx, random.random()).encode('utf8') diag_name = "Split:%s:" % idx + hashlib.md5(key).hexdigest() diag_deps = [] diag_block = libadjoint.Block( diag_name, dependencies=diag_deps, test_hermitian=backend.parameters["adjoint"]["test_hermitian"], test_derivative=backend.parameters["adjoint"]["test_derivative"]) solving.register_initial_conditions( [(bigfn, adjglobals.adj_variables[bigfn])], linear=True, var=None) var = adjglobals.adj_variables.next(smallfn) frozen_expressions_dict = expressions.freeze_dict() def diag_assembly_cb(dependencies, values, hermitian, coefficient, context): '''This callback must conform to the libadjoint Python block assembly interface. It returns either the form or its transpose, depending on the value of the logical hermitian.''' assert coefficient == 1 expressions.update_expressions(frozen_expressions_dict) value_coeffs = [v.data for v in values] eq_l = eq_lhs if hermitian: adjoint_bcs = [ utils.homogenize(bc) for bc in bcs if isinstance(bc, backend.DirichletBC) ] + [bc for bc in bcs if not isinstance(bc, backend.DirichletBC)] if len(adjoint_bcs) == 0: adjoint_bcs = None return (adjlinalg.Matrix(backend.adjoint(eq_l), bcs=adjoint_bcs), adjlinalg.Vector(None, fn_space=fn_space)) else: return (adjlinalg.Matrix(eq_l, bcs=bcs), adjlinalg.Vector(None, fn_space=fn_space)) diag_block.assemble = diag_assembly_cb rhs = SplitRHS(test, bigfn, idx) eqn = libadjoint.Equation(var, blocks=[diag_block], targets=[var], rhs=rhs) cs = adjglobals.adjointer.register_equation(eqn) solving.do_checkpoint(cs, var, rhs) if backend.parameters["adjoint"]["fussy_replay"]: mass = eq_lhs smallfn_massed = backend.Function(fn_space) backend.solve(mass == backend.action(mass, smallfn), smallfn_massed) assert False, "No idea how to assign to a subfunction yet .. " #assignment.dolfin_assign(bigfn, smallfn_massed) if backend.parameters["adjoint"]["record_all"]: smallfn_record = backend.Function(fn_space) assignment.dolfin_assign(smallfn_record, smallfn) adjglobals.adjointer.record_variable( var, libadjoint.MemoryStorage(adjlinalg.Vector(smallfn_record)))
def __call__(self, dependencies, values): fn = Function_split(values[0].data, deepcopy=True)[self.idx] return adjlinalg.Vector(backend.inner(self.test, fn) * backend.dx)
def assemble(self): u = TrialFunction(self.V) v = TestFunction(self.V) A = inner(u, v) * dx + alpha * inner(grad(u), grad(v)) * dx return assemble(A)
def perturbed_replay(parameter, perturbation, perturbation_scale, observation, perturbation_norm="mass", observation_norm="mass", callback=None, forget=False): r"""Perturb the forward run and compute .. math:: \frac{ \left|\left| \delta \mathrm{observation} \right|\right| }{ \left|\left| \delta \mathrm{input} \right| \right| } as a function of time. :py:data:`parameter` -- an FunctionControl to say what variable should be perturbed (e.g. FunctionControl('InitialConcentration')) :py:data:`perturbation` -- a Function to give the perturbation direction (from a GST analysis, for example) :py:data:`perturbation_norm` -- a bilinear Form which induces a norm on the space of perturbation inputs :py:data:`perturbation_scale` -- how big the norm of the initial perturbation should be :py:data:`observation` -- the variable to observe (e.g. 'Concentration') :py:data:`observation_norm` -- a bilinear Form which induces a norm on the space of perturbation outputs :py:data:`callback` -- a function f(var, perturbed, unperturbed) that the user can supply (e.g. to dump out variables during the perturbed replay) """ if not backend.parameters["adjoint"]["record_all"]: info_red("Warning: your replay test will be much more effective with backend.parameters['adjoint']['record_all'] = True.") assert isinstance(parameter, controls.FunctionControl) if perturbation_norm == "mass": p_fnsp = perturbation.function_space() u = backend.TrialFunction(p_fnsp) v = backend.TestFunction(p_fnsp) p_mass = backend.inner(u, v)*backend.dx perturbation_norm = p_mass if not isinstance(perturbation_norm, backend.GenericMatrix): perturbation_norm = backend.assemble(perturbation_norm) if not isinstance(observation_norm, backend.GenericMatrix) and observation_norm != "mass": observation_norm = backend.assemble(observation_norm) def compute_norm(perturbation, norm): # Need to compute <x, Ax> and then take its sqrt # where x is perturbation, A is norm try: vec = perturbation.vector() except: vec = perturbation Ax = vec.copy() norm.mult(vec, Ax) xAx = vec.inner(Ax) return math.sqrt(xAx) growths = [] for i in range(adjglobals.adjointer.equation_count): (fwd_var, output) = adjglobals.adjointer.get_forward_solution(i) if fwd_var == parameter.var: # we've hit the initial condition we want to perturb current_norm = compute_norm(perturbation, perturbation_norm) output.data.vector()[:] += (perturbation_scale/current_norm) * perturbation.vector() unperturbed = adjglobals.adjointer.get_variable_value(fwd_var).data if fwd_var.name == observation: # we've hit something we want to observe # Fetch the unperturbed result from the record if observation_norm == "mass": # we can't do this earlier, because we don't have the observation function space yet o_fnsp = output.data.function_space() u = backend.TrialFunction(o_fnsp) v = backend.TestFunction(o_fnsp) o_mass = backend.inner(u, v)*backend.dx observation_norm = backend.assemble(o_mass) diff = output.data.vector() - unperturbed.vector() growths.append(compute_norm(diff, observation_norm)/perturbation_scale) # <--- the action line if callback is not None: callback(fwd_var, output.data, unperturbed) storage = libadjoint.MemoryStorage(output) storage.set_compare(tol=None) storage.set_overwrite(True) out = adjglobals.adjointer.record_variable(fwd_var, storage) if forget: adjglobals.adjointer.forget_forward_equation(i) # can happen if we initialised a nonlinear solve with a constant zero guess if growths[0] == 0.0: return growths[1:] else: return growths
def assemble(self): u = TrialFunction(self.V) v = TestFunction(self.V) A = inner(u, v)*dx return assemble(A)
def assemble(self): u = TrialFunction(self.V) v = TestFunction(self.V) A = inner(u, v)*dx + alpha*inner(grad(u), grad(v))*dx return assemble(A)
def assemble(self): u = TrialFunction(self.V) v = TestFunction(self.V) A = inner(u, v) * dx return assemble(A)