Exemple #1
0
 def forward(self, embedding, target):
     origin_logits = self.fc(embedding)
     one_hot_target = F.one_hot(target, self.num_class).astype("bool")
     large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
     small_margined_logit = origin_logits
     margined_logit = F.where(origin_logits >= 0, large_margined_logit,
                              small_margined_logit)
     logits = F.where(one_hot_target, margined_logit, origin_logits)
     logits = logits * self.scale
     loss = F.loss.cross_entropy(logits, target)
     accuracy = F.topk_accuracy(origin_logits, target, topk=1)
     return loss, accuracy
def test_empty_grad_in_backward():
    x = mge.Parameter(F.full(100, 0.5))
    y = mge.Parameter(F.ones(100))

    gm = GradManager()
    gm.attach([x, y])

    with gm:
        z = F.where(x > 0.7, x, y)
        loss = z.sum()
        gm.backward(loss)
        assert np.all(x.grad.numpy() == 0)
        assert np.all(y.grad.numpy() == 1)
Exemple #3
0
def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
    r"""Smooth L1 Loss

    Args:
        pred (Tensor):
            the predictions
        target (Tensor):
            the assigned targets with the same shape as pred
        beta (int):
            the parameter of smooth l1 loss.

    Returns:
        the calculated smooth l1 loss.
    """
    x = pred - target
    abs_x = F.abs(x)
    if beta < 1e-5:
        loss = abs_x
    else:
        in_loss = 0.5 * x ** 2 / beta
        out_loss = abs_x - 0.5 * beta
        loss = F.where(abs_x < beta, in_loss, out_loss)
    return loss
Exemple #4
0
     1000,
 ),
 (
     "transpose",
     lambda x: MF.transpose(x,
                            list(range(len(x.shape)))[::-1]),
     lambda x: torch.permute(x,
                             list(range(len(x.shape)))[::-1]),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "where",
     lambda x: MF.where(x > 0.5, x, x),
     lambda x: torch.where(x > 0.5, x, x),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "uniform",
     lambda x: mge.random.uniform(0, 1, x.shape),
     lambda x: torch.rand(x.shape, device="cuda"),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),