Exemple #1
0
def test_onehot_low_dimension():
    inp = tensor(np.arange(1, 4, dtype=np.int32))
    out = F.one_hot(inp)

    assertTensorClose(
        out.numpy(),
        np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)])
Exemple #2
0
    def onehot_low_dimension():
        inp = tensor(np.arange(1, 4, dtype=np.int32))
        out = F.one_hot(inp, num_classes=4)

        np.testing.assert_allclose(
            out.numpy(),
            np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)])
Exemple #3
0
def test_onehot_high_dimension():
    arr = np.array(
        [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
        dtype=np.int32)

    inp = tensor(arr)
    out = F.one_hot(inp, 10)

    assertTensorClose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
Exemple #4
0
    def onehot_high_dimension():
        arr = np.array(
            [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
            dtype=np.int32,
        )

        inp = tensor(arr)
        out = F.one_hot(inp, 10)

        np.testing.assert_allclose(out.numpy(),
                                   np.eye(10, dtype=np.int32)[arr])
Exemple #5
0
 def forward(self, embedding, target):
     origin_logits = self.fc(embedding)
     one_hot_target = F.one_hot(target, self.num_class).astype("bool")
     large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
     small_margined_logit = origin_logits
     margined_logit = F.where(origin_logits >= 0, large_margined_logit,
                              small_margined_logit)
     logits = F.where(one_hot_target, margined_logit, origin_logits)
     logits = logits * self.scale
     loss = F.loss.cross_entropy(logits, target)
     accuracy = F.topk_accuracy(origin_logits, target, topk=1)
     return loss, accuracy
Exemple #6
0
    def forward(self, embedding, target):
        origin_logits = self.fc(embedding)
        one_hot_target = F.one_hot(target, self.num_class)

        # get how much to decrease
        delta_one_hot_target = one_hot_target * self.margin

        # apply the decrease
        logits = origin_logits - delta_one_hot_target
        logits = logits * self.scale
        loss = F.loss.cross_entropy(logits, target)
        accuracy = F.topk_accuracy(origin_logits, target, topk=1)
        return loss, accuracy
Exemple #7
0
 def fwd(x):
     return F.one_hot(x, num_classes=4)
Exemple #8
0
#         if i == 0:
#             plt.title(cls)
# plt.show()

from megengine import tensor
import megengine.functional as F
''' flatten '''
x = tensor(np.random.random((10, 28, 28, 1)))
out = F.flatten(x, start_axis=1,
                end_axis=-1)  # 将从 start_axis 维到 end_axis 维的子张量展平
print(x.shape)
print(out.shape)
''' Softmax '''
# 举例:某张手写数字图片的对应标签为 3,进行 one-hot 编码表示
inp = tensor([3])
out = F.one_hot(inp, num_classes=10)
print(out.numpy())  # 输出是 2-D 的,因为将数量 n 也包括进去了,此时 n=1

# 也可以选择将整个 train_label 转换成 one_hot 编码
print(F.one_hot(tensor(train_label), num_classes=10).shape)

inp = tensor([1., 2., 3., 4.])
average = F.div(inp, F.sum(inp))
softmax = F.softmax(inp)
print(average.numpy().round(decimals=4))
print(softmax.numpy().round(decimals=4))
''' 交叉熵(Cross Entropy) '''
# 预测值完全准确的情况,loss 应该为 0
pred = tensor([0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]).reshape(1, -1)
label = tensor([3])
loss = F.loss.cross_entropy(pred, label, with_logits=False)