def forward(self, predicted, target):
     _assert_no_grad(target)
     diff = (predicted - target) ** 2
     if not self.reduce:
         return diff
     loss = torch.sqrt(torch.mean(diff)) if self.size_average else torch.sqrt(torch.sum(diff))
     return loss
Пример #2
0
 def forward(self, input, trueDistributions):
     _assert_no_grad(trueDistributions)
     logSoftmaxesForAll = F.log_softmax(input)
     # elmentwise multiply every logsoft_max by the true distribution for its example to get
     # the cross-entropy
     unsumedCrossEntropy = logSoftmaxesForAll * trueDistributions
     # take negative and average across all examples
     return torch.sum(unsumedCrossEntropy) * -1 / input.size(0)
Пример #3
0
 def forward(self, input, target):
     Moddy._assert_no_grad(target)
     zet = ((th.sqrt(th.sum(input * input, dim=-1) + 1)))
     inner = (th.sum(input * position[target.data], dim=-1))
     return th.sum(
         self.ruler(
             th.clamp(zet * position_zet[target.data] - inner,
                      min=1 + eps).to(device)))
Пример #4
0
 def forward(self, input, target):
     _assert_no_grad(target)
     return dice_loss(input,
                      target,
                      weight=self.weight,
                      smooth=self.smooth,
                      size_average=self.size_average,
                      reduce=self.reduce)
Пример #5
0
 def forward(self, x, y, m, a):
     _assert_no_grad(y)
     return self._vague_loss(x,
                             y,
                             m,
                             a,
                             size_average=self.size_average,
                             reduce=self.reduce)
Пример #6
0
 def forward(self, input, target, weight):
     _assert_no_grad(target)
     """
     Weighted binary classification loss + Dice coefficient loss
     """
     loss1 = F.binary_cross_entropy(input, target, weight,
                                    self.size_average, self.reduce)
     loss2 = self.dice_loss(input, target)
     return loss1, loss2
Пример #7
0
 def forward(self, x, target):
     _assert_no_grad(target)
     num_classes = x.size(1)
     expected_class = (F.softmax(x, dim=1) * torch.arange(
         end=num_classes, requires_grad=True).cuda()).sum(dim=1)
     return F.mse_loss(expected_class,
                       target,
                       size_average=self.size_average,
                       reduce=self.reduce)
    def forward(self, predicted, target):
        _assert_no_grad(target)

        first_log = torch.log(predicted + 1e-6)
        second_log = torch.log(target + 1e-6)
        log_term = torch.mean(torch.pow(first_log - second_log, 2))
        sc_inv_term = torch.pow(torch.mean((first_log - second_log)), 2)
        loss = log_term - sc_inv_term

        return loss
Пример #9
0
 def forward(self, scores, target):
     _assert_no_grad(target)
     if len(scores.size()) != 4:
         raise ValueError(
             "Scores should have 4 dimensions, but has {}: {}".format(
                 len(scores.size()), scores.size()))
     _, c, _, _ = scores.size()
     scores = scores.permute(0, 2, 3, 1).contiguous().view(-1, c)
     target = target.view(-1)
     return F.cross_entropy(scores, target, self.weight, self.size_average,
                            self.ignore_index, self.reduce)
Пример #10
0
 def forward(self, input, target, level):
     Moddy._assert_no_grad(target)
     levy = categorize[level]
     zet = ((th.sqrt(th.sum(input * input, dim=-1) + 1)))
     inner = (th.sum(input * position_all[levy[target.data]], dim=-1))
     return th.sum(
         self.ruler(
             th.clamp(
                 zet * position_all_zet[categorize[level][target.data]] -
                 inner,
                 min=1 + eps).to(device)))
Пример #11
0
    def forward(self, output, target):
        _assert_no_grad(target)
        delta_out = (output - output.mean(0, keepdim=True).expand_as(output))
        delta_target = (target -
                        target.mean(0, keepdim=True).expand_as(target))

        var_out = delta_out.pow(2).mean(0, keepdim=True)
        var_target = delta_target.pow(2).mean(0, keepdim=True)

        corrs = (delta_out * delta_target).sum(0, keepdim=True) / (
            (var_out + self.eps) * (var_target + self.eps)).sqrt()
        return corrs, delta_out.size(0)
Пример #12
0
    def forward(self, input, target, weight):
        _assert_no_grad(target)
        """
        Weighted Focal Loss
        """
        if not (target.size() == input.size()):
            raise ValueError(
                "Target size ({}) must be the same as input size ({})".format(
                    target.size(), input.size()))

        loss = self.focal_loss(input, target, weight)
        return loss
Пример #13
0
    def forward(self, input, target):
        _assert_no_grad(target)
        if not (target.size() == input.size()):
            raise ValueError(
                "Target size ({}) must be the same as input size ({})".format(
                    target.size(), input.size()))

        if self.reduce:
            loss = self.dice_loss(input, target)
        else:
            loss = self.dice_loss_batch(input, target)
        return loss
Пример #14
0
 def forward(self, input, target):
     Moddy._assert_no_grad(target)
     fst = time.time()
     zet = ((th.sqrt(th.sum(input * input, dim=-1) + 1)))
     snd = time.time()
     inner = (th.sum(input * position[target.data], dim=-1))
     trd = time.time()
     resulty = th.sum(
         self.ruler(
             th.clamp(zet * position_zet[target.data] - inner,
                      min=1).cuda()))
     fth = time.time()
     return resulty, fst, snd, trd, fth
Пример #15
0
    def forward(self, input, target, weight):
        _assert_no_grad(target)
        """
        Weighted binary classification loss + Dice coefficient loss
        """
        if not (target.size() == input.size()):
            raise ValueError(
                "Target size ({}) must be the same as input size ({})".format(
                    target.size(), input.size()))

        loss1 = self.focal_loss(input, target, weight)
        loss2 = self.dice_loss(input, target)
        return loss1, loss2
Пример #16
0
    def forward(self, input, target,
                size_average=True,
                reduce = True):
        _assert_no_grad(target)

        input = input.double()
        target = target.double()
        # self.weight_matrix = weight_matrix.double()

        l = torch.mm((input - target)**2, self.weight_matrix)

        if not self.reduce:
            return l
        return torch.mean(l) if self.size_average else torch.sum(l)
Пример #17
0
 def forward(self, input, target, weights=None):
     _assert_no_grad(target)
     if weights is not None:
         out = F.mse_loss(input,
                          target,
                          size_average=self.size_average,
                          reduce=self.average)
         if self.sum_weight:
             out = torch.sum(
                 out * weights) / (torch.sum(weights * target) + .0001)
         return out
     else:
         out = F.mse_loss(input, target, size_average=self.size_average)
         return out
    def forward(self, input, target):
        input = (input + 1) / 2.0 * 4095.0
        target = (target + 1) / 2.0 * 4095.0
        loss._assert_no_grad(target)
        abs_diff = torch.abs(target - input)
        relative_abs_diff = abs_diff / (target + np.finfo(float).eps)
        rel_mae = torch.mean(relative_abs_diff)

        #from eval:
        # compute MRAE
        # diff = gt - rc
        # abs_diff = np.abs(diff)
        # relative_abs_diff = np.divide(abs_diff, gt + np.finfo(float).eps)  # added epsilon to avoid division by zero.
        # MRAEs[f] = np.mean(relative_abs_diff)
        return rel_mae
Пример #19
0
 def forward(self, input, target, level):
     Moddy._assert_no_grad(target)
     levy = categorize[level]
     zet = ((th.sqrt(th.sum(input * input, dim=-1) + 1)))
     inner = (th.sum(input * position_all[levy[target.data]], dim=-1))
     resulty = (self.ruler(
         th.clamp(zet * position_all_zet[categorize[level][target.data]] -
                  inner,
                  min=1 + eps)).cuda())
     for i, tf in enumerate(th.isnan(resulty)):
         if tf:
             print 'Target : '
             print position_all[levy[target.data[i]]]
             print 'Output : '
             print input[i]
     return th.sum(resulty)
    def forward(self, input, target):
        _assert_no_grad(target)
        # if a pixel's probability > 0.5, then assume it is true since labels might be noisy
        input = self.flatten_images(input)
        target = self.flatten_images(target)
        weights = torch.where(
            target > 0,
            torch.ones_like(target) * self.words_weight,  # words are 1
            torch.ones_like(target) * self.background_weight)

        bootstrap_target = self.beta * target + (1 - self.beta) * (
            F.sigmoid(input) > 0.5).float()
        return F.binary_cross_entropy_with_logits(
            input,
            bootstrap_target,
            weight=weights,
            size_average=self.size_average,
            reduce=self.reduce)
    def forward(self, input, target):
        loss._assert_no_grad(target)

        n = input.data.shape[0]
        representation_vector_length = input[0].data.shape[0]

        # we should sum up distances for all pairs of inputs
        result = 0.0
        for i in range(n - 1):
            distances_i = self.get_distances_i(i, input, n,
                                               representation_vector_length)
            result = self.get_result_i(self, i, distances_i, target, result, n)
            #print('result ', result, 'i = ', i)

        if self.size_average:
            #result = torch.mean(result).cuda()
            result = torch.div(result, float(n))

        return result
Пример #22
0
    def forward(self, input, target):

        # LongTensor,FloatTensor=type(target.data),type(input)
        if torch.cuda.is_available():
            LongTensor, FloatTensor = torch.cuda.LongTensor, torch.cuda.FloatTensor
        _assert_no_grad(target)  #
        prob = F.softmax(input, dim=1)
        m = torch.distributions.Categorical(prob)
        sel2 = m.sample().type(LongTensor)  # replacement = True

        rewards = ((target.view(sel2.size()) == sel2) > 0).type(FloatTensor)

        rewards[rewards == 1] = self.correct_reward
        rewards[rewards == 0] = self.incorrect_pentality
        if self.normalize == True:
            self.meanreward = rewards.mean()
            rewards = (rewards - self.meanreward) / (
                rewards.std() + float(np.finfo(np.float32).eps))
        loss = -m.log_prob(sel2) * rewards
        return loss.mean()
    def forward(self, input, target):
        _assert_no_grad(target)
        fy = input[range(target.size()[0]), target]
        fj = input

        aj = fj - fy.view(-1, 1)

        if self.k != 1:
            aj[range(target.size()[0]),
               target] = np.inf  #set positions where the target is to infinity to avoid select them among the smalles m-k components
            aj = aj.topk(aj.shape[1] - self.k,
                         largest=False)[0]  #select the smallest m-k components

        loss = torch.log(1 + torch.exp(aj).sum(1))

        if self.reduce:
            if self.size_average:
                loss = loss.mean()
            else:
                loss = loss.sum()

        return loss
Пример #24
0
    def forward(self, input, target, mask=None):
        _assert_no_grad(target)
        mean1 = F.conv2d(input,
                         self.weight,
                         padding=self.size,
                         groups=self.in_channels)
        mean2 = F.conv2d(target,
                         self.weight,
                         padding=self.size,
                         groups=self.in_channels)
        mean1_sq = mean1 * mean1
        mean2_sq = mean2 * mean2
        mean_12 = mean1 * mean2

        sigma1_sq = F.conv2d(input * input,
                             self.weight,
                             padding=self.size,
                             groups=self.in_channels) - mean1_sq
        sigma2_sq = F.conv2d(target * target,
                             self.weight,
                             padding=self.size,
                             groups=self.in_channels) - mean2_sq
        sigma_12 = F.conv2d(input * target,
                            self.weight,
                            padding=self.size,
                            groups=self.in_channels) - mean_12

        C1 = 0.01**2
        C2 = 0.03**2

        ssim = ((2 * mean_12 + C1) *
                (2 * sigma_12 + C2)) / ((mean1_sq + mean2_sq + C1) *
                                        (sigma1_sq + sigma2_sq + C2))
        if self.size_average:
            out = 1 - ssim.mean()
        else:
            out = 1 - ssim.view(ssim.size(0), -1).mean(1)
        return out
Пример #25
0
def my_warpctc(acts, labels, act_lens, label_lens, size_average=False):
    """Chainer like CTC Loss
    acts: Tensor of (seqLength x batch x outputDim) containing output from network
    labels: 1 dimensional Tensor containing all the targets of the batch in one sequence
    act_lens: Tensor of size (batch) containing size of each output sequence from the network
    act_lens: Tensor of (batch) containing label length of each example
    """
    assert len(labels.size()) == 1  # labels must be 1 dimensional
    _assert_no_grad(labels)
    _assert_no_grad(act_lens)
    _assert_no_grad(label_lens)
    return _CTC.apply(acts, labels, act_lens, label_lens, size_average)
Пример #26
0
 def forward(self, acts, labels, act_lens, label_lens):
     """
     acts: Tensor of (seqLength x batch x outputDim) containing output from network
     labels: 1 dimensional Tensor containing all the targets of the batch in one sequence
     act_lens: Tensor of size (batch) containing size of each output sequence from the network
     act_lens: Tensor of (batch) containing label length of each example
     """
     assert len(labels.size()) == 1  # labels must be 1 dimensional
     _assert_no_grad(labels)
     _assert_no_grad(act_lens)
     _assert_no_grad(label_lens)
     return self.ctc(acts, labels, act_lens, label_lens, self.size_average)
Пример #27
0
def chainer_like_ctc_loss(acts, labels, act_lens, label_lens):
    """Chainer like CTC Loss

    acts: Tensor of (seqLength x batch x outputDim) containing output from network
    labels: 1 dimensional Tensor containing all the targets of the batch in one sequence
    act_lens: Tensor of size (batch) containing size of each output sequence from the network
    act_lens: Tensor of (batch) containing label length of each example
    """
    assert len(labels.size()) == 1  # labels must be 1 dimensional
    from torch.nn.modules.loss import _assert_no_grad
    _assert_no_grad(labels)
    _assert_no_grad(act_lens)
    _assert_no_grad(label_lens)
    return _ChainerLikeCTC.apply(acts, labels, act_lens, label_lens)
Пример #28
0
 def forward(self, input, target, weight):
     _assert_no_grad(target)
     return F.binary_cross_entropy(input, target, weight, self.size_average,
                                   self.reduce)
Пример #29
0
 def forward(self, input, target, weight):
     _assert_no_grad(target)
     return self.weighted_mse_loss(input, target, weight)
Пример #30
0
 def forward(self, input, target, level):
     Moddy._assert_no_grad(target)
     diff = input - position_all[categorize[level][target.data]]
     return th.sum(diff * diff)
 def forward(self, input, target):
     _assert_no_grad(target)
     return dice_coefficient(input, target, smooth=self.smooth)
 def forward(self, input, target):
     _assert_no_grad(target)
     return dice_loss(input, target, optimize_bg=self.optimize_bg,
                      weight=self.weight, smooth=self.smooth,
                      size_average=self.size_average,
                      reduce=self.reduce)
 def forward(self, input, target, n_objects, max_n_objects):
     _assert_no_grad(target)
     return discriminative_loss(input, target, n_objects, max_n_objects,
                                self.delta_var, self.delta_dist, self.norm,
                                self.usegpu)