def _multiply_jacobian(self, vecs):
     """Multiply vecs by the Jacobian of losses."""
     jacobian_vecs_flat = utils.fwd_gradients(self._inputs_to_losses_flat,
                                              self._wrt_tensors,
                                              grad_xs=vecs)
     return nest.pack_sequence_as(self._inputs_to_losses,
                                  jacobian_vecs_flat)
Exemplo n.º 2
0
 def _multiply_jacobian(self, vecs):
     """Multiply vecs by the Jacobian of losses."""
     # We stop gradients at wrt_tensors to produce partial derivatives (which is
     # what we want for Jacobians).
     jacobian_vecs_flat = utils.fwd_gradients(
         self._inputs_to_losses_flat,
         self._wrt_tensors,
         grad_xs=vecs,
         stop_gradients=self._wrt_tensors)
     return nest.pack_sequence_as(self._inputs_to_losses,
                                  jacobian_vecs_flat)
Exemplo n.º 3
0
def jac_vec(ys, xs, vs):
    return fwd_gradients(ys, xs, grad_xs=vs, stop_gradients=xs)
 def _multiply_jacobian(self, vecs):
   """Multiply vecs by the Jacobian of losses."""
   jacobian_vecs_flat = utils.fwd_gradients(
       self._inputs_to_losses_flat, self._wrt_tensors, grad_xs=vecs)
   return nest.pack_sequence_as(self._inputs_to_losses, jacobian_vecs_flat)