示例#1
0
文件: losses.py 项目: RweBs/PDKE
    def forward(
        self,
        pos_scores: FloatTensorType,
        neg_scores: FloatTensorType,
    ) -> FloatTensorType:

        # print(pos_scores.size())
        num_pos = match_shape(pos_scores, -1)
        # print(pos_scores.size())
        num_neg = match_shape(neg_scores, num_pos, -1)
        # print("%d  ---  %d",num_pos,num_neg)
        # FIXME Workaround for https://github.com/pytorch/pytorch/issues/15870
        # and https://github.com/pytorch/pytorch/issues/15223.
        if num_pos == 0 or num_neg == 0:
            return torch.zeros((), requires_grad=True)

        # print("---", pos_scores.size())
        # print("---", neg_scores.size())
        scores = torch.cat([
            pos_scores.unsqueeze(1),
            neg_scores.logsumexp(dim=1, keepdim=True)
        ],
                           dim=1)

        # print("scores ", type(scores))
        # x = torch.zeros((), dtype=torch.long).expand(num_pos)
        # print("x= ", x)
        loss = F.cross_entropy(
            scores,
            torch.zeros((), dtype=torch.long).expand(num_pos),
            reduction='sum',
        )
        # print("loss", loss)
        return loss
示例#2
0
    def forward(
        self,
        pos_scores: FloatTensorType,
        neg_scores: FloatTensorType,
    ) -> FloatTensorType:
        num_pos = match_shape(pos_scores, -1)
        num_neg = match_shape(neg_scores, num_pos, -1)

        # FIXME Workaround for https://github.com/pytorch/pytorch/issues/15870
        # and https://github.com/pytorch/pytorch/issues/15223.
        if num_pos == 0 or num_neg == 0:
            return torch.zeros((),
                               device=pos_scores.device,
                               requires_grad=True)

        scores = torch.cat([
            pos_scores.unsqueeze(1),
            neg_scores.logsumexp(dim=1, keepdim=True)
        ],
                           dim=1)
        loss = F.cross_entropy(
            scores,
            pos_scores.new_zeros((), dtype=torch.long).expand(num_pos),
            reduction='sum',
        )

        return loss
示例#3
0
    def forward(
        self,
        pos_scores: FloatTensorType,
        neg_scores: FloatTensorType,
        weight: Optional[FloatTensorType],
    ) -> FloatTensorType:
        num_pos = match_shape(pos_scores, -1)
        num_neg = match_shape(neg_scores, num_pos, -1)

        # FIXME Workaround for https://github.com/pytorch/pytorch/issues/15870
        # and https://github.com/pytorch/pytorch/issues/15223.
        if num_pos == 0 or num_neg == 0:
            return torch.zeros((),
                               device=pos_scores.device,
                               requires_grad=True)

        scores = torch.cat([
            pos_scores.unsqueeze(1),
            neg_scores.logsumexp(dim=1, keepdim=True)
        ],
                           dim=1)
        if weight is not None:
            loss_per_sample = F.cross_entropy(
                scores,
                pos_scores.new_zeros((), dtype=torch.long).expand(num_pos),
                reduction="none",
            )
            match_shape(weight, num_pos)
            loss_per_sample = loss_per_sample * weight
        else:
            loss_per_sample = F.cross_entropy(
                scores,
                pos_scores.new_zeros((), dtype=torch.long).expand(num_pos),
                reduction="sum",
            )

        return loss_per_sample.sum()