def set_perturbation(self, m_dot): '''Return another instance of the same class, representing the Control perturbed in a particular direction m_dot.''' m_dot = enlist(m_dot) if not len(self.controls) == len(m_dot): raise ValueError, "The perturbation m_dot must be a list of the same shape as the control list" return ListControl([p.set_perturbation(m) for (p, m) in zip(self.controls, m_dot)])
def __init__(self, J, m, warn=True): self.J = J self.enlisted_controls = enlist(m) self.m = ListControl(self.enlisted_controls) if warn: backend.info_red("Warning: Hessian computation is still experimental and is known to not work for some problems. Please Taylor test thoroughly.")
def compute_gradient(J, param, forget=True, ignore=[], callback=lambda var, output: None, project=False): backend.parameters["adjoint"]["stop_annotating"] = True enlisted_controls = enlist(param) param = ListControl(enlisted_controls) dJdparam = enlisted_controls.__class__([None] * len(enlisted_controls)) last_timestep = adjglobals.adjointer.timestep_count ignorelist = [] for fn in ignore: if isinstance(fn, backend.Function): ignorelist.append(adjglobals.adj_variables[fn]) elif isinstance(fn, str): ignorelist.append(libadjoint.Variable(fn, 0, 0)) else: ignorelist.append(fn) for i in range(adjglobals.adjointer.timestep_count): adjglobals.adjointer.set_functional_dependencies(J, i) for i in range(adjglobals.adjointer.equation_count)[::-1]: fwd_var = adjglobals.adjointer.get_forward_variable(i) if fwd_var in ignorelist: info("Ignoring the adjoint equation for %s" % fwd_var) continue (adj_var, output) = adjglobals.adjointer.get_adjoint_solution(i, J) callback(adj_var, output.data) storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjglobals.adjointer.record_variable(adj_var, storage) fwd_var = libadjoint.Variable(adj_var.name, adj_var.timestep, adj_var.iteration) out = param.equation_partial_derivative(adjglobals.adjointer, output.data, i, fwd_var) dJdparam = _add(dJdparam, out) if last_timestep > adj_var.timestep: # We have hit a new timestep, and need to compute this timesteps' \partial J/\partial m contribution out = param.functional_partial_derivative(adjglobals.adjointer, J, adj_var.timestep) dJdparam = _add(dJdparam, out) last_timestep = adj_var.timestep if forget is None: pass elif forget: adjglobals.adjointer.forget_adjoint_equation(i) else: adjglobals.adjointer.forget_adjoint_values(i) rename(J, dJdparam, param) return postprocess(dJdparam, project, list_type=enlisted_controls)
def _taylor_test_multi_control(J, m, Jm, dJdm, HJm, seed, perturbation_direction, value): if perturbation_direction is None: perturbation_direction = [None] * len(m.controls) perturbation_direction = enlist(perturbation_direction) if value is None: value = [None] * len(m.controls) value = enlist(value) # Create a deep copy of the initial control values m_cpy = [] for c in m: if isinstance(c.data(), backend.Function): m_cpy.append(c.data().copy(deepcopy=True)) elif isinstance(c.data(), backend.MultiMeshFunction): m_cpy.append(c.data().copy(deepcopy=True)) else: m_cpy.append(backend.Constant(c.data())) # Build a objective version restricted to the i'th control def J_cmp(J, i): m_values = list(m_cpy) def out(x): m_values[i] = x return J(m_values) return out # A Hessian version restricted to the i'th control if HJm is not None: HJm_cmp = lambda i: lambda x: HJm.__class__(HJm.J, m[i])(x) else: HJm_cmp = lambda i: None # Perform the Taylor tests for each control min_conv = 1e10 for i in range(len(m.controls)): print "\nRunning Taylor test for control {}".format(i) conv = _taylor_test_single_control(J_cmp(J, i), m[i], Jm, dJdm[i], HJm_cmp(i), seed, perturbation_direction[i], value[i]) min_conv = min(min_conv, conv) return min_conv
def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call the user-specific callback routine if self.derivative_cb: if self.current_func_value is not None: values = [p.data() for p in self.controls] self.derivative_cb(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) else: info_red("Gradient evaluated without functional evaluation, not calling derivative callback function") # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): """ Evaluates the derivative of the reduced functional at the most recently evaluated control value. Args: forget (Optional[bool]): Delete the forward state while solving the adjoint equations. If you want to reevaluate derivative at the same point (or the Hessian) you will need to set this to False or None. Defaults to True. project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The functional derivative. The returned type is the same as the control type. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def __init__(self, functional, controls, scale=1.0, eval_cb_pre=lambda *args: None, eval_cb_post=lambda *args: None, derivative_cb_pre=lambda *args: None, derivative_cb_post=lambda *args: None, replay_cb=lambda *args: None, hessian_cb=lambda *args: None, cache=None): #: The objective functional. self.functional = functional #: One, or a list of controls. self.controls = enlist(controls) # Check the types of the inputs self.__check_input_types(functional, self.controls, scale, cache) #: An optional scaling factor for the functional self.scale = scale #: An optional callback function that is executed before each functional #: evaluation. #: The interace must be eval_cb_pre(m) where #: m is the control value at which the functional is evaluated. self.eval_cb_pre = eval_cb_pre #: An optional callback function that is executed after each functional #: evaluation. #: The interace must be eval_cb_post(j, m) where j is the functional value and #: m is the control value at which the functional is evaluated. self.eval_cb_post = eval_cb_post #: An optional callback function that is executed before each functional #: gradient evaluation. #: The interface must be eval_cb_pre(m) where m is the control #: value at which the gradient is evaluated. self.derivative_cb_pre = derivative_cb_pre #: An optional callback function that is executed after each functional #: gradient evaluation. #: The interface must be eval_cb_post(j, dj, m) where j and dj are the #: functional and functional gradient values, and m is the control #: value at which the gradient is evaluated. self.derivative_cb_post = derivative_cb_post #: An optional callback function that is executed after each hessian #: action evaluation. The interface must be hessian_cb(j, m, mdot, h) #: where mdot is the direction in which the hessian action is evaluated #: and h the value of the hessian action. self.hessian_cb = hessian_cb #: An optional callback function that is executed after for each forward #: equation during a (forward) solve. The interface must be #: replay_cb(var, value, m) where var is the libadjoint variable #: containing information about the variable, value is the associated #: dolfin object and m is the control at which the functional is #: evaluated. self.replay_cb = replay_cb #: If not None, caching (memoization) will be activated. The control->ouput pairs #: are stored on disk in the filename given by cache. self.cache = cache if cache is not None: try: self._cache = pickle.load(open(cache, "r")) except IOError: # didn't exist self._cache = {"functional_cache": {}, "derivative_cache": {}, "hessian_cache": {}} #: Indicator if the user has overloaded the functional evaluation and #: hence re-annotates the forward model at every evaluation. #: By default the ReducedFunctional replays the tape for the #: evaluation. self.replays_annotation = True # Stores the functional value of the latest evaluation self.current_func_value = None # Set up the Hessian driver # Note: drivers.hessian currently only supports one control try: self.H = drivers.hessian(functional, delist(controls, list_type=controls), warn=False) except libadjoint.exceptions.LibadjointErrorNotImplemented: # Might fail as Hessian support is currently limited # to a single control pass
def __call__(self, value): """ Evaluates the reduced functional for the given control value. Args: value: The point in control space where to perform the Taylor test. Must be of the same type as the Control (e.g. Function, Constant or lists of latter). Returns: float: The functional value. """ # Make sure we do not annotate # Reset any cached data in dolfin-adjoint adj_reset_cache() #: The control values at which the reduced functional is to be evaluated. value = enlist(value) # Call callback self.eval_cb_pre(delist(value, list_type=self.controls)) # Update the control values on the tape ListControl(self.controls).update(value) # Check if the result is already cached if self.cache: hash = value_hash(value) if hash in self._cache["functional_cache"]: # Found a cache info_green("Got a functional cache hit") return self._cache["functional_cache"][hash] # Replay the annotation and evaluate the functional func_value = 0. for i in range(adjointer.equation_count): (fwd_var, output) = adjointer.get_forward_solution(i) if isinstance(output.data, Function): output.data.rename(str(fwd_var), "a Function from dolfin-adjoint") # Call callback self.replay_cb(fwd_var, output.data, delist(value, list_type=self.controls)) # Check if we checkpointing is active and if yes # record the exact same checkpoint variables as # in the initial forward run if adjointer.get_checkpoint_strategy() != None: if str(fwd_var) in mem_checkpoints: storage = libadjoint.MemoryStorage(output, cs = True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if str(fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) adjointer.record_variable(fwd_var, storage) storage = libadjoint.DiskStorage(output, cs = True) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if not str(fwd_var) in mem_checkpoints and not str(fwd_var) in disk_checkpoints: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) # No checkpointing, so we record everything else: storage = libadjoint.MemoryStorage(output) storage.set_overwrite(True) adjointer.record_variable(fwd_var, storage) if i == adjointer.timestep_end_equation(fwd_var.timestep): func_value += adjointer.evaluate_functional(self.functional, fwd_var.timestep) if adjointer.get_checkpoint_strategy() != None: adjointer.forget_forward_equation(i) self.current_func_value = func_value # Call callback self.eval_cb_post(self.scale * func_value, delist(value, list_type=self.controls)) if self.cache: # Add result to cache info_red("Got a functional cache miss") self._cache["functional_cache"][hash] = self.scale*func_value return self.scale*func_value
def __call__(self, m_dot, project=False): flag = misc.pause_annotation() hess_action_timer = backend.Timer("Hessian action") m_p = self.m.set_perturbation(m_dot) last_timestep = adjglobals.adjointer.timestep_count m_dot = enlist(m_dot) Hm = [] for m_dot_cmp in m_dot: if hasattr(m_dot_cmp, 'function_space'): Hm.append(backend.Function(m_dot_cmp.function_space())) elif isinstance(m_dot_cmp, float): Hm.append(0.0) else: raise NotImplementedError("Sorry, don't know how to handle this") tlm_timer = backend.Timer("Hessian action (TLM)") # run the tangent linear model for (tlm, tlm_var) in compute_tlm(m_p, forget=None): pass tlm_timer.stop() # run the adjoint and second-order adjoint equations. for i in range(adjglobals.adjointer.equation_count)[::-1]: adj_var = adjglobals.adjointer.get_forward_variable(i).to_adjoint(self.J) # Only recompute the adjoint variable if we do not have it yet try: adj = adjglobals.adjointer.get_variable_value(adj_var) except (libadjoint.exceptions.LibadjointErrorHashFailed, libadjoint.exceptions.LibadjointErrorNeedValue): adj_timer = backend.Timer("Hessian action (ADM)") adj = adjglobals.adjointer.get_adjoint_solution(i, self.J)[1] adj_timer.stop() storage = libadjoint.MemoryStorage(adj) adjglobals.adjointer.record_variable(adj_var, storage) adj = adj.data soa_timer = backend.Timer("Hessian action (SOA)") (soa_var, soa_vec) = adjglobals.adjointer.get_soa_solution(i, self.J, m_p) soa_timer.stop() soa = soa_vec.data def hess_inner(Hm, out): assert len(out) == len(Hm) for i in range(len(out)): if out[i] is not None: if isinstance(Hm[i], backend.Function): Hm[i].vector().axpy(1.0, out[i].vector()) elif isinstance(Hm[i], float): Hm[i] += out[i] else: raise ValueError, "Do not know what to do with this" return Hm func_timer = backend.Timer("Hessian action (derivative formula)") # now implement the Hessian action formula. out = self.m.equation_partial_derivative(adjglobals.adjointer, soa, i, soa_var.to_forward()) Hm = hess_inner(Hm, out) out = self.m.equation_partial_second_derivative(adjglobals.adjointer, adj, i, soa_var.to_forward(), m_dot) Hm = hess_inner(Hm, out) if last_timestep > adj_var.timestep: # We have hit a new timestep, and need to compute this timesteps' \partial^2 J/\partial m^2 contribution last_timestep = adj_var.timestep out = self.m.functional_partial_second_derivative(adjglobals.adjointer, self.J, adj_var.timestep, m_dot) Hm = hess_inner(Hm, out) func_timer.stop() storage = libadjoint.MemoryStorage(soa_vec) storage.set_overwrite(True) adjglobals.adjointer.record_variable(soa_var, storage) for Hm_cmp in Hm: if isinstance(Hm_cmp, backend.Function): Hm_cmp.rename("d^2(%s)/d(%s)^2" % (str(self.J), str(self.m)), "a Function from dolfin-adjoint") misc.continue_annotation(flag) return postprocess(Hm, project, list_type=self.enlisted_controls)