def assemble_data(self): assert not isinstance(self.data, IdentityMatrix) if backend.__name__ == "firedrake": # Firedrake specifies assembled matrix type as part of the # solver parameters. mat_type = self.solver_parameters.get("mat_type") assemble = lambda x: backend.assemble(self.data, mat_type=mat_type) else: assemble = backend.assemble if not self.cache: if hasattr(self.data.arguments()[0], '_V_multi'): return backend.assemble_multimesh(self.data) else: return backend.assemble(self.data) else: if self.data in caching.assembled_adj_forms: if backend.parameters["adjoint"]["debug_cache"]: backend.info_green("Got an assembly cache hit") return caching.assembled_adj_forms[self.data] else: if backend.parameters["adjoint"]["debug_cache"]: backend.info_red("Got an assembly cache miss") if hasattr(self.data.arguments()[0], '_V_multi'): M = backend.assemble_multimesh(self.data) else: M = backend.assemble(self.data) caching.assembled_adj_forms[self.data] = M return M
def hessian(self, m_dot, project=False): """ Evaluates the Hessian action at the most recently evaluated control value in direction m_dot. Args: m_dot: The direction in control space in which to compute the Hessian. Must be of the same type as the Control (e.g. Function, Constant or lists of latter). project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The directional second derivative. The returned type is the same as the control type. Note: Hessian evaluations never delete the forward state. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls] + [m_dot]) fnspaces = [ p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls ] if hash in self._cache["hessian_cache"]: info_green("Got a Hessian cache hit.") return cache_load(self._cache["hessian_cache"][hash], fnspaces) else: info_red("Got a Hessian cache miss") # Compute the Hessian action by solving the second order adjoint equations Hm = self.H(m_dot, project=project) # Apply the scaling factor scaled_Hm = utils.scale(Hm, self.scale) # Call callback control_data = [p.data() for p in self.controls] if self.current_func_value is not None: current_func_value = self.scale * self.current_func_value else: current_func_value = None self.hessian_cb(current_func_value, delist(control_data, list_type=self.controls), m_dot, scaled_Hm) # Cache the result if self.cache is not None: self._cache["hessian_cache"][hash] = cache_store( scaled_Hm, self.cache) return scaled_Hm
def hessian(self, m_dot, project=False): """ Evaluates the Hessian action at the most recently evaluated control value in direction m_dot. Args: m_dot: The direction in control space in which to compute the Hessian. Must be of the same type as the Control (e.g. Function, Constant or lists of latter). project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The directional second derivative. The returned type is the same as the control type. Note: Hessian evaluations never delete the forward state. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls] + [m_dot]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["hessian_cache"]: info_green("Got a Hessian cache hit.") return cache_load(self._cache["hessian_cache"][hash], fnspaces) else: info_red("Got a Hessian cache miss") # Compute the Hessian action by solving the second order adjoint equations Hm = self.H(m_dot, project=project) # Apply the scaling factor scaled_Hm = utils.scale(Hm, self.scale) # Call callback control_data = [p.data() for p in self.controls] if self.current_func_value is not None: current_func_value = self.scale * self.current_func_value else: current_func_value = None self.hessian_cb(current_func_value, delist(control_data, list_type=self.controls), m_dot, scaled_Hm) # Cache the result if self.cache is not None: self._cache["hessian_cache"][hash] = cache_store(scaled_Hm, self.cache) return scaled_Hm
def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def caching_solve(self, var, b): if isinstance(self.data, IdentityMatrix): output = b.duplicate() output.axpy(1.0, b) if isinstance(output.data, ufl.Form): output = Vector(backend.Function(output.fn_space, backend.assemble(output.data))) elif b.data is None: backend.warning("Warning: got zero RHS for the solve associated with variable %s" % var) output = Vector(backend.Function(self.test_function().function_space())) else: dirichlet_bcs = [utils.homogenize(bc) for bc in self.bcs if isinstance(bc, backend.DirichletBC)] other_bcs = [bc for bc in self.bcs if not isinstance(bc, backend.DirichletBC)] bcs = dirichlet_bcs + other_bcs output = Vector(backend.Function(self.test_function().function_space())) #print "b.data is a %s in the solution of %s" % (b.data.__class__, var) if backend.parameters["adjoint"]["symmetric_bcs"] and backend.__version__ > '1.2.0': assembler = backend.SystemAssembler(self.data, b.data, bcs) assembled_rhs = backend.Vector() assembler.assemble(assembled_rhs) elif isinstance(b.data, ufl.Form): assembled_rhs = wrap_assemble(b.data, self.test_function()) else: if backend.__name__ == "dolfin": assembled_rhs = b.data.vector() else: assembled_rhs = b.data [bc.apply(assembled_rhs) for bc in bcs] if not var in caching.lu_solvers: if backend.parameters["adjoint"]["debug_cache"]: backend.info_red("Got a cache miss for %s" % var) if backend.parameters["adjoint"]["symmetric_bcs"] and backend.__version__ > '1.2.0': assembled_lhs = backend.Matrix() assembler.assemble(assembled_lhs) else: assembled_lhs = self.assemble_data() [bc.apply(assembled_lhs) for bc in bcs] caching.lu_solvers[var] = compatibility.LUSolver(assembled_lhs, "mumps") caching.lu_solvers[var].parameters["reuse_factorization"] = True else: if backend.parameters["adjoint"]["debug_cache"]: backend.info_green("Got a cache hit for %s" % var) caching.lu_solvers[var].solve(output.data.vector(), assembled_rhs) return output
def assemble_data(self): assert not isinstance(self.data, IdentityMatrix) if not self.cache: return backend.assemble(self.data) else: if self.data in caching.assembled_adj_forms: if backend.parameters["adjoint"]["debug_cache"]: backend.info_green("Got an assembly cache hit") return caching.assembled_adj_forms[self.data] else: if backend.parameters["adjoint"]["debug_cache"]: backend.info_red("Got an assembly cache miss") M = backend.assemble(self.data) caching.assembled_adj_forms[self.data] = M return M
def hessian(self, m_dot, project=False): ''' Evaluates the Hessian action in direction m_dot. ''' assert(len(self.controls) == 1) # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls] + [m_dot]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["hessian_cache"]: info_green("Got a Hessian cache hit.") return cache_load(self._cache["hessian_cache"][hash], fnspaces) else: info_red("Got a Hessian cache miss") # Compute the Hessian action by solving the second order adjoint equations if isinstance(m_dot, list): assert len(m_dot) == 1 Hm = self.H(m_dot[0], project=project) else: Hm = self.H(m_dot, project=project) # Apply the scaling factor scaled_Hm = [utils.scale(Hm, self.scale)] # Call callback control_data = [p.data() for p in self.controls] if self.current_func_value is not None: current_func_value = self.scale * self.current_func_value else: current_func_value = None self.hessian_cb(current_func_value, delist(control_data, list_type=self.controls), m_dot, scaled_Hm[0]) # Cache the result if self.cache is not None: self._cache["hessian_cache"][hash] = cache_store(scaled_Hm, self.cache) return scaled_Hm
def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call the user-specific callback routine if self.derivative_cb: if self.current_func_value is not None: values = [p.data() for p in self.controls] self.derivative_cb(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) else: info_red("Gradient evaluated without functional evaluation, not calling derivative callback function") # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): """ Evaluates the derivative of the reduced functional at the most recently evaluated control value. Args: forget (Optional[bool]): Delete the forward state while solving the adjoint equations. If you want to reevaluate derivative at the same point (or the Hessian) you will need to set this to False or None. Defaults to True. project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The functional derivative. The returned type is the same as the control type. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [ p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls ] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [ utils.scale(df, self.scale) for df in list(dfunc_value) ] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for p in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post( self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store( scaled_dfunc_value, self.cache) return scaled_dfunc_value
def __call__(self, value): """ Evaluates the reduced functional for the given control value. Args: value: The point in control space where to perform the Taylor test. Must be of the same type as the Control (e.g. Function, Constant or lists of latter). Returns: float: The functional value. """ # Make sure we do not annotate # Reset any cached data in dolfin-adjoint adj_reset_cache() #: The control values at which the reduced functional is to be evaluated. value = enlist(value) # Call callback self.eval_cb_pre(delist(value, list_type=self.controls)) # Update the control values on the tape ListControl(self.controls).update(value) # Check if the result is already cached if self.cache: hash = value_hash(value) if hash in self._cache["functional_cache"]: # Found a cache info_green("Got a functional cache hit") return self._cache["functional_cache"][hash] # Replay the annotation and evaluate the functional func_value = 0. for i in range(adjointer.equation_count): (fwd_var, output) = adjointer.get_forward_solution(i) if isinstance(output.data, Function): output.data.rename(str(fwd_var), "a Function from dolfin-adjoint") # Call callback self.replay_cb(fwd_var, output.data, delist(value, list_type=self.controls)) # Check if we checkpointing is active and if yes # record the exact same checkpoint variables as # in the initial forward run if adjointer.get_checkpoint_strategy() != None: if str(fwd_var) in mem_checkpoints: storage = libadjoint.MemoryStorage(output, cs=True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if str(fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) adjointer.record_variable(fwd_var, storage) storage = libadjoint.DiskStorage(output, cs=True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if not str(fwd_var) in mem_checkpoints and not str( fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) # No checkpointing, so we record everything else: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if i == adjointer.timestep_end_equation(fwd_var.timestep): func_value += adjointer.evaluate_functional( self.functional, fwd_var.timestep) if adjointer.get_checkpoint_strategy() != None: adjointer.forget_forward_equation(i) self.current_func_value = func_value # Call callback self.eval_cb_post(self.scale * func_value, delist(value, list_type=self.controls)) if self.cache: # Add result to cache info_red("Got a functional cache miss") self._cache["functional_cache"][hash] = self.scale * func_value return self.scale * func_value
def derivative(self, forget=True, project=False): """ Evaluates the derivative of the reduced functional at the most recently evaluated control value. Args: forget (Optional[bool]): Delete the forward state while solving the adjoint equations. If you want to reevaluate derivative at the same point (or the Hessian) you will need to set this to False or None. Defaults to True. project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The functional derivative. The returned type is the same as the control type. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def __call__(self, value): """ Evaluates the reduced functional for the given control value. Args: value: The point in control space where to perform the Taylor test. Must be of the same type as the Control (e.g. Function, Constant or lists of latter). Returns: float: The functional value. """ # Make sure we do not annotate # Reset any cached data in dolfin-adjoint adj_reset_cache() #: The control values at which the reduced functional is to be evaluated. value = enlist(value) # Call callback self.eval_cb_pre(delist(value, list_type=self.controls)) # Update the control values on the tape ListControl(self.controls).update(value) # Check if the result is already cached if self.cache: hash = value_hash(value) if hash in self._cache["functional_cache"]: # Found a cache info_green("Got a functional cache hit") return self._cache["functional_cache"][hash] # Replay the annotation and evaluate the functional func_value = 0. for i in range(adjointer.equation_count): (fwd_var, output) = adjointer.get_forward_solution(i) if isinstance(output.data, Function): output.data.rename(str(fwd_var), "a Function from dolfin-adjoint") # Call callback self.replay_cb(fwd_var, output.data, delist(value, list_type=self.controls)) # Check if we checkpointing is active and if yes # record the exact same checkpoint variables as # in the initial forward run if adjointer.get_checkpoint_strategy() != None: if str(fwd_var) in mem_checkpoints: storage = libadjoint.MemoryStorage(output, cs = True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if str(fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) adjointer.record_variable(fwd_var, storage) storage = libadjoint.DiskStorage(output, cs = True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if not str(fwd_var) in mem_checkpoints and not str(fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) # No checkpointing, so we record everything else: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if i == adjointer.timestep_end_equation(fwd_var.timestep): func_value += adjointer.evaluate_functional(self.functional, fwd_var.timestep) if adjointer.get_checkpoint_strategy() != None: adjointer.forget_forward_equation(i) self.current_func_value = func_value # Call callback self.eval_cb_post(self.scale * func_value, delist(value, list_type=self.controls)) if self.cache: # Add result to cache info_red("Got a functional cache miss") self._cache["functional_cache"][hash] = self.scale*func_value return self.scale*func_value