Example #1
0
 def updateGradInput(self, input, target):
     implicit_gradOutput = torch.ones(1).type_as(input)
     self._backend.SmoothL1Criterion_updateGradInput(
         self._backend.library_state,
         input,
         target,
         implicit_gradOutput,
         self.gradInput,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     return self.gradInput
Example #2
0
 def updateGradInput(self, input, target):
     assert input.is_same_size(target)
     implicit_gradOutput = torch.ones(1).type_as(input)
     self._backend.DistKLDivCriterion_updateGradInput(
         self._backend.library_state,
         input,
         target,
         implicit_gradOutput,
         self.gradInput,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     return self.gradInput
 def updateGradInput(self, input, target):
     assert input.nelement() == self._target.nelement()
     implicit_gradOutput = torch.Tensor([1]).type(input.type())
     self._backend.MSECriterion_updateGradInput(
         self._backend.library_state,
         input,
         self._target,
         implicit_gradOutput,
         self.gradInput,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     return self.gradInput
Example #4
0
 def updateOutput(self, input, target):
     if self.output_tensor is None:
         self.output_tensor = input.new(1)
     self._backend.SmoothL1Criterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     self.output = self.output_tensor[0].item()
     return self.output
Example #5
0
    def updateGradInput(self, input, target):
        implicit_gradOutput = torch.Tensor([1]).type(input.type())

        self._backend.MSECriterion_updateGradInput(
            self._backend.library_state,
            input,
            target,
            implicit_gradOutput,
            self.gradInput,
            _Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False),
        )
        return self.gradInput
Example #6
0
 def updateOutput(self, input, target):
     assert input.is_same_size(target)
     if self.output_tensor is None:
         self.output_tensor = input.new(1)
     self._backend.DistKLDivCriterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     self.output = self.output_tensor[0].item()
     return self.output
 def updateGradInput(self, input, target):
     target = target.long()
     implicit_gradOutput = torch.ones(1).type_as(input)
     self._backend.MultiLabelMarginCriterion_updateGradInput(
         self._backend.library_state,
         input,
         target,
         implicit_gradOutput,
         self.gradInput,
         self.isTarget,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     return self.gradInput
 def updateOutput(self, input, target):
     if self.output_tensor is None:
         self.output_tensor = input.new(1)
     target = target.long()
     self._backend.MultiLabelMarginCriterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         self.isTarget,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
     )
     self.output = self.output_tensor[0].item()
     return self.output
    def updateOutput(self, input, target):
        self._transformTarget(target)

        assert input.nelement() == self._target.nelement()
        if self.output_tensor is None:
            self.output_tensor = input.new(1)
        self._backend.MSECriterion_updateOutput(
            self._backend.library_state,
            input,
            self._target,
            self.output_tensor,
            _Reduction.legacy_get_enum(self.sizeAverage, True),
        )
        self.output = self.output_tensor[0].item()
        return self.output
Example #10
0
 def updateOutput(self, input, target):
     if not hasattr(self, 'ignore_index'):
         self.ignore_index = -100
     self._backend.SpatialClassNLLCriterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
         self.weights,
         self.total_weight_tensor,
         self.ignore_index,
     )
     self.output = self.output_tensor[0].item()
     return self.output
Example #11
0
 def updateGradInput(self, input, target):
     self.gradInput.resize_as_(input).zero_()
     implicit_gradOutput = torch.ones(1).type_as(input)
     self._backend.SpatialClassNLLCriterion_updateGradInput(
         self._backend.library_state,
         input,
         target,
         implicit_gradOutput,
         self.gradInput,
         _Reduction.legacy_get_enum(self.sizeAverage, True),
         self.weights,
         self.total_weight_tensor,
         self.ignore_index,
     )
     return self.gradInput
Example #12
0
 def updateGradInput(self, input, target):
     target = target.long()
     implicit_gradOutput = torch.ones(1).type_as(input)
     self._backend.MultiMarginCriterion_updateGradInput(
         self._backend.library_state,
         input,
         target,
         implicit_gradOutput,
         self.gradInput,
         _Reduction.legacy_get_enum(self.sizeAverage,
                                    True,
                                    emit_warning=False),
         self.p,
         self.weights,
         self.margin,
     )
     return self.gradInput
Example #13
0
 def updateOutput(self, input, target):
     self.ignore_index = getattr(self, "ignore_index", -100)
     target = target.long()
     self._backend.ClassNLLCriterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         _Reduction.legacy_get_enum(self.sizeAverage,
                                    True,
                                    emit_warning=False),
         self.weights,
         self.total_weight_tensor,
         self.ignore_index,
     )
     self.output = self.output_tensor[0].item()
     return self.output
Example #14
0
 def updateOutput(self, input, target):
     if self.output_tensor is None:
         self.output_tensor = input.new(1)
     target = target.long()
     self._backend.MultiMarginCriterion_updateOutput(
         self._backend.library_state,
         input,
         target,
         self.output_tensor,
         _Reduction.legacy_get_enum(self.sizeAverage,
                                    True,
                                    emit_warning=False),
         self.p,
         self.weights,
         self.margin,
     )
     self.output = self.output_tensor[0].item()
     return self.output
Example #15
0
    def updateGradInput(self, input, target):
        self.buffer.resize_as_(input).copy_(target)
        if input.dim() - 1 == self.weight.dim():
            for i in range(input.size(0)):
                self.buffer[i].mul_(self.weight)
        else:
            self.buffer.mul_(self.weight)

        implicit_gradOutput = torch.Tensor([1]).type(input.type())

        self._backend.MSECriterion_updateGradInput(
            self._backend.library_state,
            input,
            self.buffer,
            implicit_gradOutput,
            self.gradInput,
            _Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False),
        )
        return self.gradInput
Example #16
0
    def updateOutput(self, input, target):
        if self.buffer is None:
            self.buffer = input.new()
        self.buffer.resize_as_(input).copy_(target)
        if input.dim() - 1 == self.weight.dim():
            for i in range(input.size(0)):
                self.buffer[i].mul_(self.weight)
        else:
            self.buffer.mul_(self.weight)

        if self.output_tensor is None:
            self.output_tensor = input.new(1)
        self._backend.MSECriterion_updateOutput(
            self._backend.library_state,
            input,
            self.buffer,
            self.output_tensor,
            _Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False),
        )
        self.output = self.output_tensor[0].item()
        return self.output
def binary_cross_entropy_class_weighted(input, target, weight=None, size_average=None,
                                        reduce=None, reduction='elementwise_mean',
                                        class_weight=None):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Tensor of arbitrary shape
        target: Tensor of the same shape as input
        weight (Tensor, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
            the losses are averaged over each loss element in the batch. Note that for
            some losses, there multiple elements per sample. If the field :attr:`size_average`
            is set to ``False``, the losses are instead summed for each minibatch. Ignored
            when reduce is ``False``. Default: ``True``
        reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
            losses are averaged or summed over observations for each minibatch depending
            on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
            batch element instead and ignores :attr:`size_average`. Default: ``True``
        reduction (string, optional): Specifies the reduction to apply to the output:
            'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
            'elementwise_mean': the sum of the output will be divided by the number of
            elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`
            and :attr:`reduce` are in the process of being deprecated, and in the meantime,
            specifying either of those two args will override :attr:`reduction`. Default: 'elementwise_mean'
        class_weight: A list/tuple of two alpha values, summing up to 1, which indicate the relative weight
            of each class.

    Examples::

        >>> input = torch.randn((3, 2), requires_grad=True)
        >>> target = torch.rand((3, 2), requires_grad=False)
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """

    # import numpy as np
    # print('max input:', np.max(input.cpu().data.numpy()))
    # print('min input:', np.min(input.cpu().data.numpy()))
    eps = 1e-12
    input = torch.clamp(input, min=eps, max=1 - eps)
    # print('max input:', np.max(input.cpu().data.numpy()))
    # print('min input:', np.min(input.cpu().data.numpy()))

    if size_average is not None or reduce is not None:
        reduction = _Reduction.legacy_get_enum(size_average, reduce)
    else:
        reduction = _Reduction.get_enum(reduction)
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    if class_weight is not None:
        loss = class_weight[1] * (target * torch.log(input)) + \
               class_weight[0] * ((1 - target) * torch.log(1 - input))

        # loss = (target * torch.log(input)) + \
        #        ((1 - target) * torch.log(1 - input))

        mean_loss = torch.neg(torch.mean(loss))
        # print('mean_loss:', mean_loss.cpu().data.numpy())
        return mean_loss

    mean_loss = torch._C._nn.binary_cross_entropy(input, target, weight, reduction)
    # print('mean_loss:', mean_loss.cpu().data.numpy())
    return mean_loss