Exemple #1
0
    def _check_loss_value(self, loss_value: torch.FloatTensor) -> None:
        """Check loss value dimensionality, and ability for backward."""
        # test reduction
        self.assertEqual(0, loss_value.ndim)

        # Test backward
        loss_value.backward()
 def backprop(self, loss: torch.FloatTensor):
     if self.scaler is not None:
         self.scaler.scale(loss).backward()
         self.scaler.step(self.optimizer)
         self.scaler.update()
     else:
         loss.backward()
         self.optimizer.step()
     self.optimizer.zero_grad()
 def backprop(self, loss: torch.FloatTensor):
     """SGD parameter update, optionally with mixed precision."""
     if self.scaler is not None:
         self.scaler.scale(loss).backward()
         self.scaler.step(self.optimizer)
         self.scaler.update()
     else:
         loss.backward()
         self.optimizer.step()
     self.optimizer.zero_grad()
Exemple #4
0
def training_step(cost: torch.FloatTensor,
                  optimizer: torch.optim.Optimizer,
                  fp16: bool = False,
                  lr_scheduler=None):
    optimizer.zero_grad()

    if fp16:
        with amp.scale_loss(cost, optimizer) as scaled_loss:
            scaled_loss.backward()
    else:
        cost.backward()

    optimizer.step()

    if lr_scheduler is not None:
        lr_scheduler.step()
Exemple #5
0
def training_step(cost: torch.FloatTensor, optimizer: torch.optim.Optimizer):
    optimizer.zero_grad()
    cost.backward()
    optimizer.step()