Exemple #1
0
 def test_softmax_normal(self):
     b = Softmax()
     d = Variable(torch.FloatTensor(np.random.random((5, 3))))
     pred = b(d).data.numpy()
     predsums = np.sum(pred, axis=1)
     self.assertTrue(np.allclose(predsums, np.ones_like(predsums)))
     self.assertEqual(d.size(), pred.shape)
Exemple #2
0
 def test_masked_softmax_numerical_stability(self):
     d = Variable(
         torch.FloatTensor(
             np.asarray([[-1e9, 1e9, 1], [-1e6, 1e6, 1], [-1e3, 1e3, 1],
                         [-1e2, 1e2, 1], [-1e1, 1e1, 1], [-1, 1e2, 1],
                         [1, 1e2, 1], [0.5, 1e2, 1]])))
     m = Variable(
         torch.FloatTensor(
             np.asarray([[1, 0, 1], [1, 0, 1], [1, 0, 1], [1, 0, 1],
                         [1, 0, 1], [1, 0, 1], [1, 0, 1], [1, 0, 1]])))
     d2 = d[:, [0, 2]]
     o, _ = Softmax()(d, m)
     pred = o.data.numpy()
     pred2 = Softmax()(d2).data.numpy()
     pred3 = Softmax()(d).data.numpy()
     print(pred)
     print(pred2)
     print(pred3)
     self.assertTrue(np.allclose(pred[:, 1], np.zeros_like(pred[:, 1])))
     self.assertTrue(np.allclose(pred[:, [0, 2]], pred2))
Exemple #3
0
 def __init__(self,
              dist=None,
              normalizer=Softmax(),
              data_selector=None,
              scale=1.,
              dropout=0.):
     super(AttentionGenerator, self).__init__()
     self.dist = dist
     self.data_selector = data_selector
     self.normalizer = normalizer
     self.dropout = nn.Dropout(p=dropout) if dropout > 0. else None
     self.scale = scale
Exemple #4
0
 def test_softmax_3D_prop_seq_mask(self):
     b = Softmax()
     d = Variable(torch.FloatTensor(np.random.random((5, 4, 3))))
     m = np.ones((5, 4))
     m[:, 2:] = 0
     m = Variable(torch.FloatTensor(m))
     pred, mask = b(d, m)
     predmask = mask.data.numpy()
     pred = pred.data.numpy()
     self.assertTrue(np.allclose(predmask, m.data.numpy()))
     predsums = np.sum(pred, axis=-1)
     self.assertTrue(np.allclose(predsums, np.ones_like(predsums)))
Exemple #5
0
 def test_softmax_normal_masked(self):
     b = Softmax()
     d = Variable(torch.FloatTensor(np.random.random((5, 3))))
     m = np.ones_like(d.data.numpy())
     m[:, 2] = 0
     m = Variable(torch.FloatTensor(m))
     pred, _ = b(d, m)
     pred = pred.data.numpy()
     print(pred)
     self.assertTrue(np.allclose(np.zeros_like(pred[:, 2]), pred[:, 2]))
     self.assertEqual(d.size(), pred.shape)
     predsums = np.sum(pred, axis=-1)
     self.assertTrue(np.allclose(predsums, np.ones_like(predsums)))
Exemple #6
0
 def test_logsoftmax_masked_same_as_softmax(self):
     lsm = LogSoftmax()
     d = Variable(torch.FloatTensor(np.random.random((5, 3))))
     m = np.ones_like(d.data.numpy())
     m[:, 2] = 0
     m = Variable(torch.FloatTensor(m))
     pred, _ = lsm(d, m)
     pred = pred.data.numpy()
     print(pred)
     self.assertTrue(
         np.allclose(np.zeros_like(pred[:, 2]),
                     np.exp(pred)[:, 2]))
     self.assertEqual(d.size(), pred.shape)
     predsums = np.sum(np.exp(pred), axis=-1)
     print(predsums)
     self.assertTrue(np.allclose(predsums, np.ones_like(predsums)))
     predexp, _ = Softmax()(d, m)
     predexp = predexp.data.numpy()
     self.assertTrue(np.allclose(predexp, np.exp(pred)))
Exemple #7
0
 def test_masked_logsoftmax_numerical_stability(self):
     d = Variable(
         torch.FloatTensor(
             np.asarray([[-1e9, 1e9, 1], [-1e6, 1e6, 1], [-1e3, 1e3, 1],
                         [-1e2, 1e2, 1], [-1e1, 1e1, 1], [-1, 1e2, 1],
                         [1, 1e2, 1], [0.5, 1e2, 1]])))
     m = Variable(
         torch.FloatTensor(
             np.asarray([[1, 0, 1], [1, 0, 1], [1, 0, 1], [1, 0, 1],
                         [1, 0, 1], [1, 0, 1], [1, 0, 1], [1, 0, 1]])))
     d2 = d[:, [0, 2]]
     o, _ = LogSoftmax()(d, m)
     pred = o.data.numpy()
     pred2 = LogSoftmax()(d2).data.numpy().astype("float64")
     pred3 = Softmax()(d2).data.numpy().astype("float64")
     print(pred)
     print(pred2)
     print(np.log(pred3))
     onetotwo = np.isclose(pred[:, [0, 2]], pred2)
     onetothree = np.isclose(pred[:, [0, 2]], np.log(pred3))
     self.assertTrue(np.all(onetothree | onetotwo))
     self.assertTrue(
         np.allclose(pred[:, 1], np.log(np.zeros_like(pred[:, 1]))))
Exemple #8
0
 def test_softmax_normal_with_temperature(self):
     b = Softmax(temperature=1e-6)
     d = Variable(torch.FloatTensor(np.random.random((5, 3))))
     pred = b(d).data.numpy()
     print(pred)