def cross_entropy_with_probs(probs, target, dim=-1, eps=1e-8): log_prob = torch.log(probs.clamp(min=eps)) neg_xent = index_one_hot(log_prob, dim, target) return -neg_xent
def cross_entropy_with_logits(logits, target, dim): log_prob = F.log_softmax(logits, dim) neg_xent = index_one_hot(log_prob, dim, target) return -neg_xent