def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): ''' Evaluates the derivative of the reduced functional for the most recently evaluated control value. ''' # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call the user-specific callback routine if self.derivative_cb: if self.current_func_value is not None: values = [p.data() for p in self.controls] self.derivative_cb(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) else: info_red("Gradient evaluated without functional evaluation, not calling derivative callback function") # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): """ Evaluates the derivative of the reduced functional at the most recently evaluated control value. Args: forget (Optional[bool]): Delete the forward state while solving the adjoint equations. If you want to reevaluate derivative at the same point (or the Hessian) you will need to set this to False or None. Defaults to True. project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The functional derivative. The returned type is the same as the control type. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [ p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls ] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [ utils.scale(df, self.scale) for df in list(dfunc_value) ] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for p in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post( self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store( scaled_dfunc_value, self.cache) return scaled_dfunc_value
def derivative(self, forget=True, project=False): """ Evaluates the derivative of the reduced functional at the most recently evaluated control value. Args: forget (Optional[bool]): Delete the forward state while solving the adjoint equations. If you want to reevaluate derivative at the same point (or the Hessian) you will need to set this to False or None. Defaults to True. project (Optional[bool]): If True, the returned value will be the L2 Riesz representer, if False it will be the l2 Riesz representative. The L2 projection requires one additional linear solve. Defaults to False. Returns: The functional derivative. The returned type is the same as the control type. """ # Check if we have the gradient already in the cash. # If so, return the cached value if self.cache is not None: hash = value_hash([x.data() for x in self.controls]) fnspaces = [p.data().function_space() if isinstance(p.data(), Function) else None for p in self.controls] if hash in self._cache["derivative_cache"]: info_green("Got a derivative cache hit.") return cache_load(self._cache["derivative_cache"][hash], fnspaces) # Call callback values = [p.data() for p in self.controls] self.derivative_cb_pre(delist(values, list_type=self.controls)) # Compute the gradient by solving the adjoint equations dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project) dfunc_value = enlist(dfunc_value) # Reset the checkpointing state in dolfin-adjoint adjointer.reset_revolve() # Apply the scaling factor scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)] # Call callback # We might have forgotten the control values already, # in which case we can only return Nones values = [] for c in self.controls: try: values.append(p.data()) except libadjoint.exceptions.LibadjointErrorNeedValue: values.append(None) if self.current_func_value is not None: self.derivative_cb_post(self.scale * self.current_func_value, delist(scaled_dfunc_value, list_type=self.controls), delist(values, list_type=self.controls)) # Cache the result if self.cache is not None: info_red("Got a derivative cache miss") self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache) return scaled_dfunc_value