def extra_grad_process(self, optimizer, loss):
            """Called after optimizer.zero_grad() and loss.backward() calls.

            Allows for gradient processing before optimizer.step() is called.
            E.g. for gradient clipping.
            """
            if extra_grad_process_fn:
                return extra_grad_process_fn(self, optimizer, loss)
            else:
                return TorchPolicy.extra_grad_process(self, optimizer, loss)
Example #2
0
 def extra_grad_process(self):
     if extra_grad_process_fn:
         return extra_grad_process_fn(self)
     else:
         return TorchPolicy.extra_grad_process(self)