def forward(self,
             input,
             target,
             inside_weights=None,
             outside_weights=None):
     if inside_weights is None:
         inside_weights = torch.autograd.Variable(torch.ones(input.size()))
     else:
         new_size = _infer_size(input.size(), inside_weights.size())
         inside_weights = inside_weights.expand(new_size)
         if torch.is_tensor(inside_weights):
             inside_weights = torch.autograd.Variable(inside_weights)
     if input.is_cuda:
         inside_weights = inside_weights.cuda()
     if outside_weights is None:
         outside_weights = torch.autograd.Variable(torch.ones(input.size()))
     else:
         new_size = _infer_size(input.size(), outside_weights.size())
         outside_weights = outside_weights.expand(new_size)
         if torch.is_tensor(outside_weights):
             outside_weights = torch.autograd.Variable(outside_weights)
     if input.is_cuda:
         outside_weights = outside_weights.cuda()
     return WarpSmoothL1LossFunction(self.sigma,
                                     self.size_average)(input, target,
                                                        inside_weights,
                                                        outside_weights)
Esempio n. 2
0
def binary_cross_entropy(input, target, weight=None, size_average=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output:

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: True
    """
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    return _functions.thnn.BCELoss.apply(input, target, weight, size_average)
Esempio n. 3
0
def binary_cross_entropy(input, target, weight=None, size_average=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output:

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: True
    """
    if not (target.size() == input.size()):
        warnings.warn(
            "Using a target size ({}) that is different to the input size ({}) is deprecated. "
            "Please ensure they have the same size.".format(
                target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError(
            "Target and input must have the same number of elements. target nelement ({}) "
            "!= input nelement ({})".format(target.nelement(),
                                            input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    return _functions.thnn.BCELoss.apply(input, target, weight, size_average)
Esempio n. 4
0
def binary_cross_entropy(input,
                         target,
                         weight=None,
                         size_average=True,
                         reduce=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                :attr:`size_average` is set to ``False``, the losses are instead summed
                for each minibatch. Default: ``True``
        reduce (bool, optional): By default, the losses are averaged or summed over
                observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce`
                is ``False``, returns a loss per input/target element instead and ignores
                :attr:`size_average`. Default: ``True``

    Examples::

        >>> input = torch.randn(3, requires_grad=True)
        >>> target = torch.LongTensor(3).random_(2)
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """
    if not (target.size() == input.size()):
        warnings.warn(
            "Using a target size ({}) that is different to the input size ({}) is deprecated. "
            "Please ensure they have the same size.".format(
                target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError(
            "Target and input must have the same number of elements. target nelement ({}) "
            "!= input nelement ({})".format(target.nelement(),
                                            input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)
        if torch.is_tensor(weight):
            weight = weight

    return torch._C._nn.binary_cross_entropy(input, target, weight,
                                             size_average, reduce)
def binary_cross_entropy_class_weighted(input, target, weight=None, size_average=None,
                                        reduce=None, reduction='elementwise_mean',
                                        class_weight=None):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Tensor of arbitrary shape
        target: Tensor of the same shape as input
        weight (Tensor, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
            the losses are averaged over each loss element in the batch. Note that for
            some losses, there multiple elements per sample. If the field :attr:`size_average`
            is set to ``False``, the losses are instead summed for each minibatch. Ignored
            when reduce is ``False``. Default: ``True``
        reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
            losses are averaged or summed over observations for each minibatch depending
            on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
            batch element instead and ignores :attr:`size_average`. Default: ``True``
        reduction (string, optional): Specifies the reduction to apply to the output:
            'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
            'elementwise_mean': the sum of the output will be divided by the number of
            elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`
            and :attr:`reduce` are in the process of being deprecated, and in the meantime,
            specifying either of those two args will override :attr:`reduction`. Default: 'elementwise_mean'
        class_weight: A list/tuple of two alpha values, summing up to 1, which indicate the relative weight
            of each class.

    Examples::

        >>> input = torch.randn((3, 2), requires_grad=True)
        >>> target = torch.rand((3, 2), requires_grad=False)
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """

    # import numpy as np
    # print('max input:', np.max(input.cpu().data.numpy()))
    # print('min input:', np.min(input.cpu().data.numpy()))
    eps = 1e-12
    input = torch.clamp(input, min=eps, max=1 - eps)
    # print('max input:', np.max(input.cpu().data.numpy()))
    # print('min input:', np.min(input.cpu().data.numpy()))

    if size_average is not None or reduce is not None:
        reduction = _Reduction.legacy_get_enum(size_average, reduce)
    else:
        reduction = _Reduction.get_enum(reduction)
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    if class_weight is not None:
        loss = class_weight[1] * (target * torch.log(input)) + \
               class_weight[0] * ((1 - target) * torch.log(1 - input))

        # loss = (target * torch.log(input)) + \
        #        ((1 - target) * torch.log(1 - input))

        mean_loss = torch.neg(torch.mean(loss))
        # print('mean_loss:', mean_loss.cpu().data.numpy())
        return mean_loss

    mean_loss = torch._C._nn.binary_cross_entropy(input, target, weight, reduction)
    # print('mean_loss:', mean_loss.cpu().data.numpy())
    return mean_loss