예제 #1
0
    def _log_conditional_prob(
            cls,
            box1: TBoxTensor,
            box2: TBoxTensor,
            temp: float = 1.,
            scale: Union[float, Tensor] = 1.) -> Tuple[Tensor, Tensor]:
        """ Gives P(b1|b2=1) ie two values, one for b1=1 and other for b1=0

        Assume the shape of boxes to be (**, 2, num_dim)
        """
        log_numerator = box1.intersection_log_soft_volume(
            box2, temp=temp)  # shape = (**,)
        log_denominator = box2.log_soft_volume(temp=temp)  # shape =(**,)

        if not cls._in_zero_one(scale):
            raise ValueError(
                "scale should be in (0,1] but is {}".format(scale))

        if isinstance(scale, float):
            s = torch.tensor(scale)
        else:
            s = scale

        log_cp1 = log_numerator - log_denominator + torch.log(s)
        log_cp2 = log1mexp(log_cp1)

        return log_cp1, log_cp2
예제 #2
0
    def _dimension_wise_log_conditional_prob(
            cls,
            box1: TBoxTensor,
            box2: TBoxTensor,
            temp: float = 1.,
            scale: Union[float, Tensor] = 1.) -> Tuple[Tensor, Tensor]:
        """ Gives P(b1|b2=1) ie two values, one for b1=1 and other for b1=0

            Returns:
                Tuple of tensors of shape (**, num_dims). First tensor is
                log_p and the second is log(1-p)
        """
        log_numerators = box1.dimension_wise_intersection_log_soft_volume(
            box2, temp=temp)  # shape = (**, num_dims)
        log_denominators = box2.dimension_wise_log_soft_volume(
            temp=temp)  # shape =(**, num_dims)

        if not cls._in_zero_one(scale):
            raise ValueError(
                "scale should be in (0,1] but is {}".format(scale))

        if isinstance(scale, float):
            s = torch.tensor(scale)
        else:
            s = scale

        log_cp1 = log_numerators - log_denominators + torch.log(s)
        log_cp2 = log1mexp(log_cp1)

        return log_cp1, log_cp2
예제 #3
0
    def get_loss(self, scores: Tuple[torch.Tensor, torch.Tensor],
                 label: torch.Tensor) -> torch.Tensor:
        log_p = scores[0]
        log1mp = log1mexp(log_p)
        logits = torch.stack([log1mp, log_p], dim=-1)
        loss = self.loss_f(logits, label) + \
            self.get_regularization_penalty()
        l1 = scores[1]
        # reduce l1 for pos, increase for the neg
        pos = label.type(torch.bool)
        neg = ~pos
        pos_l1s = l1[pos]
        neg_l1s = -1 * l1[neg]
        # protect against empty tensors as mean produces nan

        if l1[pos].nelement():
            l1_loss_pos = torch.mean(pos_l1s)
        else:
            l1_loss_pos = 0.0

        if l1[neg].nelement():
            l1_loss_neg = torch.mean(neg_l1s)
        else:
            l1_loss_neg = 0.0
        l1_loss = l1_loss_pos + l1_loss_neg

        return loss + self.l1_regularization_weight * l1_loss
예제 #4
0
    def get_loss(self, scores: torch.Tensor,
                 label: torch.Tensor) -> torch.Tensor:
        log_p = scores
        log1mp = log1mexp(log_p)
        logits = torch.stack([log1mp, log_p], dim=-1)
        loss = self.loss_f(logits, label) + self.get_regularization_penalty()

        return loss
예제 #5
0
    def get_loss_validation(self, scores: torch.Tensor,
                            label: torch.Tensor) -> torch.Tensor:
        log_p = scores
        log1mp = log1mexp(log_p)

        kld = torch.mean(label * (torch.log(label) - log_p) + (1 - label) *
                         (torch.log(1 - label) - log1mp))

        return kld
예제 #6
0
 def get_loss(self, scores: Tuple[torch.Tensor, torch.Tensor],
              label: torch.Tensor) -> torch.Tensor:
     # max margin loss expects label to be float
     log_p = F.logsigmoid(scores)
     log1mp = log1mexp(log_p)
     if not self.is_eval():
         with torch.no_grad():
             self.train_f1(torch.stack([log1mp, log_p], dim=-1), label)
     label = (2.0 * label) - 1
     return -F.logsigmoid(
         label * scores).mean() + self.regularization_weight * self.reg_loss
예제 #7
0
    def get_loss(self, scores: torch.Tensor,
                 label: torch.Tensor) -> torch.Tensor:
        log_p = scores[0]
        log1mp = log1mexp(log_p)
        logits = torch.stack([log1mp, log_p], dim=-1)
        centre_loss = self.loss_f_centre(scores[2], scores[1],
                                         torch.Tensor([1]))
        self.centre_loss_metric(centre_loss.item())
        loss = self.loss_f(logits,
                           label) + self.regularization_weight * centre_loss

        return loss
예제 #8
0
    def get_loss(self, scores: torch.Tensor,
                 label: torch.Tensor) -> torch.Tensor:
        log_p = scores
        log1mp = log1mexp(log_p)
        logits = torch.stack([log1mp, log_p], dim=-1)
        loss = self.loss_f(logits, label) + self.get_regularization_penalty()

        if not self.is_eval():
            with torch.no_grad():
                self.train_f1(logits, label)

        return loss
예제 #9
0
    def get_loss(self, scores: torch.Tensor,
                 label: torch.Tensor) -> torch.Tensor:
        log_p = scores
        log1mp = log1mexp(log_p)

        return torch.mean(-label * log_p - (1 - label) * log1mp)