def test_should_multiply_by_a_scalar(self): expected_result = ApexLoss(self.VALID_LOSS_ID, self.VALID_SCALAR * self.VALID_LOSS_VALUE, None) loss = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) assert_that(self.VALID_SCALAR * loss, equal_to(expected_result)) assert_that(loss * self.VALID_SCALAR, equal_to(expected_result))
def test_should_add_losses(self): expected_result = ApexLoss(self.VALID_LOSS_ID, torch.tensor([2.0, 4.0, 6.0]), None) apex_loss1 = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) apex_loss2 = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) assert_that(apex_loss1 + apex_loss2, equal_to(expected_result))
def test_should_substract_a_scalar(self): left_sub_expected_result = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE - self.VALID_SCALAR, None) right_sub_expected_result = ApexLoss(self.VALID_LOSS_ID, self.VALID_SCALAR - self.VALID_LOSS_VALUE, None) loss = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) assert_that(loss - self.VALID_SCALAR, equal_to(left_sub_expected_result)) assert_that(self.VALID_SCALAR - loss, equal_to(right_sub_expected_result))
def test_should_divide_by_a_scalar(self): left_div_expected_result = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE / self.VALID_SCALAR, None) right__div_expected_result = ApexLoss(self.VALID_LOSS_ID, self.VALID_SCALAR / self.VALID_LOSS_VALUE, None) loss = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) assert_that(loss / self.VALID_SCALAR, equal_to(left_div_expected_result)) assert_that(self.VALID_SCALAR / loss, equal_to(right__div_expected_result))
def compute_and_update_test_loss(self, name, pred, target) -> Union[ApexLoss, torch.Tensor]: self._step_test_loss[name] = self._criterions[name](pred, target) self._test_loss[name].update(self._step_test_loss[name].item()) return ApexLoss(self._amp_id, self._step_test_loss[name], self._optimizer) if self.use_amp else \ self._step_test_loss[name]
def compute_losses(self, pred, target) -> Dict[str, Union[ApexLoss, torch.Tensor]]: losses = {} for name, criterion in self._criterions.items(): loss = criterion(pred, target) losses[name] = ApexLoss(self._amp_id, loss, self._optimizer) if self.use_amp else loss return losses
def compute_and_update_test_losses( self, pred, target) -> Dict[str, Union[ApexLoss, torch.Tensor]]: losses = {} for name, criterion in self._criterions.items(): self._step_test_loss[name] = criterion(pred, target) self._test_loss[name].update(self._step_test_loss[name].item()) losses[name] = ApexLoss( self._amp_id, self._step_test_loss[name], self._optimizer ) if self.use_amp else self._step_test_loss[name] return losses
def compute_loss(self, name, pred, target) -> Union[ApexLoss, torch.Tensor]: loss = self._criterions[name](pred, target) return ApexLoss(self._amp_id, loss, self._optimizer) if self.use_amp else loss
def test_should_compute_the_mean(self): expected_result = ApexLoss(self.VALID_LOSS_ID, torch.tensor([2.0]), None) loss = ApexLoss(self.VALID_LOSS_ID, self.VALID_LOSS_VALUE, None) assert_that(loss.mean(), equal_to(expected_result))