示例#1
0
def test_reduce():
    torch.manual_seed(0)
    x = torch.rand(1, 3, 128, 128)

    actual = misc.reduce(x, "mean")
    desired = torch.mean(x)
    ptu.assert_allclose(actual, desired)

    actual = misc.reduce(x, "sum")
    desired = torch.sum(x)
    ptu.assert_allclose(actual, desired)

    actual = misc.reduce(x, "none")
    desired = x
    ptu.assert_allclose(actual, desired)
示例#2
0
def value_range_loss(input: torch.Tensor,
                     min: float = 0.0,
                     max: float = 1.0,
                     reduction: str = "mean") -> torch.Tensor:
    # TODO: remove sinh call; quadratic, i.e. x * (x-1) is enough
    loss = relu(torch.sinh(input - min) * torch.sinh(input - max))
    return reduce(loss, reduction)
示例#3
0
def total_variation_loss(input: torch.Tensor,
                         exponent: float = 2.0,
                         reduction: str = "mean") -> torch.Tensor:
    r"""Calculates the total variation loss. See
    :class:`pystiche.ops.TotalVariationOperator` for details.

    Args:
        input: Input image
        exponent: Parameter :math:`\beta` . A higher value leads to more smoothed
            results. Defaults to ``2.0``.
        reduction: Reduction method of the output passed to
            :func:`pystiche.misc.reduce`. Defaults to ``"mean"``.

    Examples:

        >>> import pystiche.loss.functional as F
        >>> input = torch.rand(2, 3, 256, 256)
        >>> score = F.total_variation_loss(input)
    """
    # this ignores the last row and column of the image
    grad_vert = input[:, :, :-1, :-1] - input[:, :, 1:, :-1]
    grad_horz = input[:, :, :-1, :-1] - input[:, :, :-1, 1:]
    grad = pystiche.nonnegsqrt(grad_vert**2.0 + grad_horz**2.0)
    loss = grad**exponent
    return reduce(loss, reduction)