def backward(self, loss): """Wrapper for backward pass. Some optimizer requires ownership of the backward pass.""" if self._with_fp16_wrapper: kwargs = {} if "update_master_grads" in fn_args(self._optimizer.backward): kwargs["update_master_grads"] = True self._optimizer.backward(loss, **kwargs) else: loss.backward()
def backward(self, loss, retain_graph=False): """Wrapper for backward pass. Some optimizer requires ownership of the backward pass.""" #with torch.autograd.detect_anomaly(): if self._with_fp16_wrapper: kwargs = {"retain_graph": retain_graph} if "update_master_grads" in fn_args(self._optimizer.backward): kwargs["update_master_grads"] = True self._optimizer.backward(loss, **kwargs) else: loss.backward(retain_graph=retain_graph)
def backward(self, loss): """Wrapper for backward pass. Some optimizer requires ownership of the backward pass.""" if self.amp: self._scaler.scale(loss).backward() elif self._fp16 == "legacy": kwargs = {} if "update_master_grads" in fn_args(self._optimizer.backward): kwargs["update_master_grads"] = True self._optimizer.backward(loss, **kwargs) else: loss.backward()
def backward(self, loss): """Wrapper for backward pass. Some optimizer requires ownership of the backward pass.""" if self._fp16 == "amp": import apex with apex.amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() elif self._fp16 == "legacy": kwargs = {} if "update_master_grads" in fn_args(self._optimizer.backward): kwargs["update_master_grads"] = True self._optimizer.backward(loss, **kwargs) else: loss.backward()