Beispiel #1
0
    def forward(self, logit, truth, weight=None):
        loss = 0
        if self.mode =='cls':
            batch_size,num_class, H,W = logit.shape
            logit = logit.view(batch_size,num_class)
            truth = truth.view(batch_size,num_class)
            assert(logit.shape==truth.shape)

            loss = F.binary_cross_entropy_with_logits(logit, truth, reduction='none')

            if weight is None:
                loss = loss.mean()
            else:
                pos = (truth>0.5).float()
                neg = (truth<0.5).float()
                pos_sum = pos.sum().item() + 1e-12
                neg_sum = neg.sum().item() + 1e-12
                loss = (weight[1]*pos*loss/pos_sum + weight[0]*neg*loss/neg_sum).sum()
        else:
            logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, 5)
            truth = truth.permute(0, 2, 3, 1).contiguous().view(-1)

            if weight is not None: weight = torch.FloatTensor([1]+weight).cuda()
            loss = F.cross_entropy(logit, truth, weight=weight, reduction='none')

            loss = loss.mean()
            return loss
        return loss
Beispiel #2
0
 def forward(self, input, target):
     loss = F.nll_loss(F.log_softmax(input), target, reduce=False)
     fraction = 0.25
     k = input.shape[2] * input.shape[3] * fraction
     loss, _ = torch.topk(loss.view(batch_size, -1), int(k))
     loss = loss.mean(dim=1).sum()
     return loss
Beispiel #3
0
    def forward(self, logits, target, weights=None):
        assert logits.dim() == 2
        assert not target.requires_grad
        target = target.squeeze(1) if target.dim() == 2 else target
        assert target.dim() == 1
        softmax_result = F.log_softmax(logits, dim=1)
        loss = class_select(-softmax_result, target)

        if self.weighted == 1 or self.weighted == 2:
            assert list(loss.size()) == list(weights.size())
            loss = weights * loss

        if self.aggregate == 'sum':
            return loss.sum()
        elif self.aggregate == 'mean':
            return loss.mean()
        elif self.aggregate is None:
            return loss
Beispiel #4
0
    def forward(self, input, target, weights=None):
        assert input.dim() == 2
        assert not target.requires_grad
        target = target.squeeze(1) if target.dim() == 2 else target
        assert target.dim() == 1

        logpt = F.log_softmax(input, dim=1)
        logpt_gt = logpt.gather(1, target.unsqueeze(1))
        logpt_gt = logpt_gt.view(-1)
        pt_gt = logpt_gt.exp()
        assert logpt_gt.size() == pt_gt.size()

        loss = -self.alpha * (torch.pow((1 - pt_gt), self.gamma)) * logpt_gt

        if self.aggregate == 'sum':
            return loss.sum()
        elif self.aggregate == 'mean':
            return loss.mean()
        elif self.aggregate is None:
            return loss
Beispiel #5
0
    def forward(self, input, target, lambda1):
        assert input.dim() == 2
        assert not target.requires_grad
        target = target.squeeze(1) if target.dim() == 2 else target
        assert target.dim() == 1

        logpt = F.log_softmax(input, dim=1)
        logpt_gt = logpt.gather(1, target.unsqueeze(1))
        logpt_gt = logpt_gt.view(-1)
        logpt_pred, _ = torch.max(logpt, 1)
        logpt_pred = logpt_pred.view(-1)
        assert logpt_gt.size() == logpt_pred.size()
        loss = -(1 - lambda1) * logpt_gt - lambda1 * logpt_pred

        if self.aggregate == 'sum':
            return loss.sum()
        elif self.aggregate == 'mean':
            return loss.mean()
        elif self.aggregate is None:
            return loss
Beispiel #6
0
    def forward(self, input, target, weights=None):
        assert input.dim() == 2
        assert not target.requires_grad
        target = target.squeeze(1) if target.dim() == 2 else target
        assert target.dim() == 1

        logpt = F.log_softmax(input, dim=1)
        logpt_gt = logpt.gather(1, target.unsqueeze(1))
        logpt_gt = logpt_gt.view(-1)
        logpt_pred, _ = torch.max(logpt, 1)
        logpt_pred = logpt_pred.view(-1)
        assert logpt_gt.size() == logpt_pred.size()
        loss = -(1 - self.beta) * logpt_gt - self.beta * logpt_pred

        if self.weighted == 1:
            assert list(loss.size()) == list(weights.size())
            loss = loss * weights.exp()
        if self.aggregate == 'sum':
            return loss.sum()
        elif self.aggregate == 'mean':
            return loss.mean()
        elif self.aggregate is None:
            return loss
Beispiel #7
0
 def forward(self, input, target, eps=1e-10, gamma=2):
     probs = torch.clamp(input, eps, 1 - eps)
     loss = -(torch.pow((1 - probs), gamma) * target * torch.log(probs) +
              torch.pow(probs, gamma) * (1 - target) * torch.log(1 - probs))
     loss = loss.sum(1)
     return loss.mean()
Beispiel #8
0
    def forward(
        self,
        target_dimension_indicator: torch.Tensor,
        past_time_feat: torch.Tensor,
        past_target_cdf: torch.Tensor,
        past_observed_values: torch.Tensor,
        past_is_pad: torch.Tensor,
        future_time_feat: torch.Tensor,
        future_target_cdf: torch.Tensor,
        future_observed_values: torch.Tensor,
    ) -> Tuple[torch.Tensor, ...]:
        """
        Computes the loss for training DeepVAR, all inputs tensors representing
        time series have NTC layout.

        Parameters
        ----------
        target_dimension_indicator
            Indices of the target dimension (batch_size, target_dim)
        past_time_feat
            Dynamic features of past time series (batch_size, history_length,
            num_features)
        past_target_cdf
            Past marginal CDF transformed target values (batch_size,
            history_length, target_dim)
        past_observed_values
            Indicator whether or not the values were observed (batch_size,
            history_length, target_dim)
        past_is_pad
            Indicator whether the past target values have been padded
            (batch_size, history_length)
        future_time_feat
            Future time features (batch_size, prediction_length, num_features)
        future_target_cdf
            Future marginal CDF transformed target values (batch_size,
            prediction_length, target_dim)
        future_observed_values
            Indicator whether or not the future values were observed
            (batch_size, prediction_length, target_dim)

        Returns
        -------
        distr
            Loss with shape (batch_size, 1)
        likelihoods
            Likelihoods for each time step
            (batch_size, context + prediction_length, 1)
        distr_args
            Distribution arguments (context + prediction_length,
            number_of_arguments)
        """

        seq_len = self.context_length + self.prediction_length

        # unroll the decoder in "training mode", i.e. by providing future data
        # as well
        rnn_outputs, _, scale, _, _ = self.unroll_encoder(
            past_time_feat=past_time_feat,
            past_target_cdf=past_target_cdf,
            past_observed_values=past_observed_values,
            past_is_pad=past_is_pad,
            future_time_feat=future_time_feat,
            future_target_cdf=future_target_cdf,
            target_dimension_indicator=target_dimension_indicator,
        )

        # put together target sequence
        # (batch_size, seq_len, target_dim)
        target = torch.cat(
            (past_target_cdf[:, -self.context_length:,
                             ...], future_target_cdf),
            dim=1,
        )

        # assert_shape(target, (-1, seq_len, self.target_dim))

        distr_args = self.distr_args(rnn_outputs=rnn_outputs)
        if self.scaling:
            self.diffusion.scale = scale

        # we sum the last axis to have the same shape for all likelihoods
        # (batch_size, subseq_length, 1)

        likelihoods = self.diffusion.log_prob(target, distr_args).unsqueeze(-1)

        # assert_shape(likelihoods, (-1, seq_len, 1))

        past_observed_values = torch.min(past_observed_values,
                                         1 - past_is_pad.unsqueeze(-1))

        # (batch_size, subseq_length, target_dim)
        observed_values = torch.cat(
            (
                past_observed_values[:, -self.context_length:, ...],
                future_observed_values,
            ),
            dim=1,
        )

        # mask the loss at one time step if one or more observations is missing
        # in the target dimensions (batch_size, subseq_length, 1)
        loss_weights, _ = observed_values.min(dim=-1, keepdim=True)

        # assert_shape(loss_weights, (-1, seq_len, 1))

        loss = weighted_average(likelihoods, weights=loss_weights, dim=1)

        # assert_shape(loss, (-1, -1, 1))

        # self.distribution = distr

        return (loss.mean(), likelihoods, distr_args)