Ejemplo n.º 1
0
 def test_forward(self):
     # fix the embedding weights
     weights = self._get_random_embedding_weights()
     emb = Embeddings(embedding_dim=self.emb_size,
                      vocab_size=self.vocab_size,
                      padding_idx=self.pad_idx)
     self._fill_embeddings(emb, weights)
     indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
     embedded = emb.forward(x=indices)
     # embedding operation is just slicing from weights matrix
     self.assertTensorEqual(embedded, torch.index_select(input=weights,
                                                   index=indices, dim=0))
     # after embedding, representations for PAD should still be zero
     self.assertTensorEqual(embedded[2], torch.zeros([self.emb_size]))
Ejemplo n.º 2
0
 def test_scale(self):
     # fix the embedding weights
     weights = self._get_random_embedding_weights()
     emb = Embeddings(embedding_dim=self.emb_size,
                      vocab_size=self.vocab_size,
                      padding_idx=self.pad_idx,
                      scale=True)
     emb.lut.weight.data = weights
     indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
     embedded = emb.forward(x=indices)
     # now scaled
     self.assertTensorNotEqual(
         torch.index_select(input=weights, index=indices, dim=0), embedded)
     self.assertTensorEqual(
         torch.index_select(input=weights, index=indices, dim=0)*
         (self.emb_size**0.5), embedded)