def weighted_bce_loss(input, target, ignore_index=None, reduction='mean', from_logits=True): n = target.size()[1:].numel() n = torch.full((len(target), ), n, dtype=target.dtype, device=target.device) dim = dims(target)[1:] if ignore_index is not None: weight = (target != ignore_index).float() n = torch.sum(weight, dim=dim) target = target.masked_fill(target == ignore_index, 0) else: weight = 1 n_pos = torch.sum(target, dim=dim) neg_weight = n_pos / n neg_weight = unsqueeze(neg_weight, dim) pos_weight = 1 - neg_weight weight *= target * pos_weight + (1 - target) * neg_weight if from_logits: return F.binary_cross_entropy_with_logits(input, target, weight, reduction=reduction) else: return F.binary_cross_entropy(input, target, weight, reduction=reduction)
def f1_loss(pred, target, eps=1e-8, average='micro'): assert pred.shape == target.shape if average == 'samples': dim = dims(pred)[1:] tp = torch.sum(pred * target, dim=dim) fp = torch.sum((1 - pred) * target, dim=dim) fn = torch.sum(pred * (1 - target), dim=dim) elif average == 'micro': tp = torch.sum(pred * target) fp = torch.sum((1 - pred) * target) fn = torch.sum(pred * (1 - target)) else: raise ValueError( "`average` must be one of [`samples`, 'micro'], got `%s`" % average) p = tp / (tp + fp + eps) r = tp / (tp + fn + eps) f1 = 2 * p * r / (p + r + eps) return 1 - torch.mean(f1)
def dice_loss(pred, target): dim = dims(pred) numerator = 2 * torch.sum(pred * target, dim=dim) denominator = torch.sum(pred + target, dim=dim) losses = 1 - (numerator + 1) / (denominator + 1) return torch.mean(losses)