def test_lpips_loss_raises_if_wrong_reduction(x, y) -> None:
    for mode in ['mean', 'sum', 'none']:
        LPIPS(reduction=mode)(x, y)

    for mode in [None, 'n', 2]:
        with pytest.raises(ValueError):
            LPIPS(reduction=mode)(x, y)
示例#2
0
def test_lpips_loss_raises_if_wrong_reduction(prediction: torch.Tensor,
                                              target: torch.Tensor) -> None:
    for mode in ['mean', 'sum', 'none']:
        LPIPS(reduction=mode)(prediction, target)

    for mode in [None, 'n', 2]:
        with pytest.raises(KeyError):
            LPIPS(reduction=mode)(prediction, target)
def test_lpips_loss_forward_for_special_cases(x, y, expectation: Any,
                                              value: float) -> None:
    loss = LPIPS()
    with expectation:
        loss_value = loss(x, y)
        assert torch.isclose(loss_value, torch.tensor(value), atol=1e-6), \
            f'Expected loss value to be equal to target value. Got {loss_value} and {value}'
def test_lpips_computes_grad(x, y, device: str) -> None:
    x.requires_grad_()
    loss_value = LPIPS()(x.to(device), y.to(device))
    loss_value.backward()
    assert x.grad is not None, NONE_GRAD_ERR_MSG
def test_lpips_loss_forward(input_tensors: Tuple[torch.Tensor, torch.Tensor],
                            device: str) -> None:
    x, y = input_tensors
    loss = LPIPS()
    loss(x.to(device), y.to(device))
def test_lpips_loss_init() -> None:
    LPIPS()
示例#7
0
def test_lpips_computes_grad(prediction: torch.Tensor, target: torch.Tensor,
                             device: str) -> None:
    prediction.requires_grad_()
    loss_value = LPIPS()(prediction.to(device), target.to(device))
    loss_value.backward()
    assert prediction.grad is not None, NONE_GRAD_ERR_MSG
示例#8
0
def test_lpips_loss_forward(input_tensors: Tuple[torch.Tensor, torch.Tensor],
                            device: str) -> None:
    prediction, target = input_tensors
    loss = LPIPS()
    loss(prediction.to(device), target.to(device))