コード例 #1
0
    def forward(self, pos_caseid, pos_entityid, neg_caseid, neg_entity):
        """
        重写forward函数,传入四个Variable
        :param pos_caseid:正例案件的id
        :param pos_entityid:正例的实体id
        :param neg_caseid:负采样得到的负例案件id
        :param neg_entity:负采样得到的负例实体id
        :return:loss的值
        """
        #正例得分
        pos_case = self.case_emb(pos_caseid)
        pos_entity = self.entity_emb(pos_entityid)
        pos_score = t.mul(pos_case, pos_entity)
        pos_score = t.sum(pos_score, dim=1)
        pos_score = F.logsigmoid(pos_score)
        #负例得分
        neg_case = self.entity_embedding(neg_caseid)
        neg_entity = self.app_embedding(neg_entity)
        neg_score = t.mul(neg_case, neg_entity)
        neg_score = t.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        #总得分
        pos_score_sum = t.sum(pos_score)
        neg_score_sum = t.sum(neg_score)
        all_score = -1 * (pos_score_sum + neg_score_sum)

        return all_score
コード例 #2
0
ファイル: metrics.py プロジェクト: violetguos/lapool
def masked_soft_margin_loss(y_pred, y_true, mask=None, per_tasks=False):
    r"""
    Compute the masked soft margin loss.

    .. caution::
        This is kept for compatibility and could be removed in later version. 
        See pytorch `multilabel_soft_margin_loss` implementation with weights
        and different reduction strategy.

    Arguments
    ----------
        y_pred: torch.Tensor
            logits from network output
        y_true: torch.Tensor
            True label for the classification task.
        mask: torch.Tensor, optional
            weight/mask covering the output.
        per_tasks: bool, optional
            whether to return loss for each task independently
            (Default, value = False)

    Returns
    -------
        loss metric according the set of arguments

    """
    loss = -(y_true * F.logsigmoid(y_pred) +
             (1 - y_true) * F.logsigmoid(-y_pred))
    if mask is not None:
        loss *= mask
    if not per_tasks:
        loss = loss.sum(dim=1)
    return torch.mean(loss, dim=0)
コード例 #3
0
    def forward(self, pos_app, pos_entity, neg_app, neg_entity):
        """
        重写forward函数,传入四个Variable
        :param pos_app: 正例app
        :param pos_entity: 正例实体
        :param neg_app: 负例app
        :param neg_entity: 负例实体
        :return: 损失函数的值
        """
        # 正例得分
        pos_emb_app = self.app_emb(pos_app)
        pos_emb_entity = self.entity_emb(pos_entity)
        pos_score = t.mul(pos_emb_app, pos_emb_entity)
        pos_score = pos_score.squeeze()
        pos_score = t.sum(pos_score, dim=1)
        pos_score = F.logsigmoid(pos_score)
        # 负例得分
        neg_emb_app = self.app_emb(neg_app)
        neg_emb_entity = self.entity_emb(neg_entity)
        neg_score = t.mul(neg_emb_app, neg_emb_entity)
        neg_score = neg_score.squeeze()
        neg_score = t.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        # 总得分
        pos_score_sum = t.sum(pos_score)
        neg_score_sum = t.sum(neg_score)
        all_score = -1 * (pos_score_sum + neg_score_sum)

        return all_score
コード例 #4
0
    def forward(self, x_positive: torch.Tensor, x_negative: torch.Tensor,
                y: torch.Tensor):
        # [batch_size, embedding_dim, 1]
        h = self.embedding_v(x_positive).mean(dim=1).unsqueeze(-1)
        # [batch_size, 1, embedding_dim]
        target_vector = self.embedding_u(y).unsqueeze(1)
        # [batch_size, 1, 1]
        positive_score = torch.bmm(target_vector, h)
        # [batch_size]
        positive_score = F.logsigmoid(positive_score).squeeze()

        # [batch_size, negative_samples, embedding_dim]
        negative_vectors = self.embedding_u(x_negative)
        # [batch_size, negative_samples, 1]
        negative_score = torch.bmm(-negative_vectors, h)
        # [batch_size]
        negative_score = F.logsigmoid(negative_score).squeeze(dim=-1).sum(
            dim=1)
        return -torch.mean(positive_score + negative_score)
コード例 #5
0
    def forward(self, x_positive: torch.Tensor, y: torch.Tensor):
        # [batch_size, embedding_dim, 1]
        h = self.embedding_v(x_positive).mean(dim=1).unsqueeze(-1)
        # [batch_size, 1, 1]
        positive_score = torch.bmm(
            self.embedding_u(y).reshape(y.shape[0], 1, 100), h)
        # [batch_size]
        positive_score = -F.logsigmoid(positive_score.squeeze())

        # [batch_size, vocab_size, 1]
        negative_score = torch.bmm(
            self.embedding_u.weight.repeat(y.shape[0], 1, 1).neg(), h)
        # [batch_size]
        negative_score = negative_score.sigmoid().squeeze().sum(dim=1).log()
        return torch.mean(positive_score + negative_score)
コード例 #6
0
    def forward(self, input, target):
        # Inspired by the implementation of binary_cross_entropy_with_logits
        if not (target.size() == input.size()):
            raise ValueError(
                "Target size ({}) must be the same as input size ({})".format(
                    target.size(), input.size()))

        max_val = (-input).clamp(min=0)
        loss = input - input * target + max_val + (
            (-max_val).exp() + (-input - max_val).exp()).log()

        # This formula gives us the log sigmoid of 1-p if y is 0 and of p if y is 1
        invprobs = F.logsigmoid(-input * (target * 2 - 1))
        loss = (invprobs * self.gamma).exp() * loss

        return loss.mean()