def test_auto_regressive_seq_decoder_init(self):
        decoder_inout_dim = 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        AutoRegressiveSeqDecoder(
            vocab, decoder_net, 10, Embedding(vocab.get_vocab_size(), decoder_inout_dim)
        )

        with pytest.raises(ConfigurationError):
            AutoRegressiveSeqDecoder(
                vocab, decoder_net, 10, Embedding(vocab.get_vocab_size(), decoder_inout_dim + 1)
            )
    def test_auto_regressive_seq_decoder_indices_to_tokens(self):
        decoder_inout_dim = 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab, decoder_net, 10, Embedding(vocab.get_vocab_size(), decoder_inout_dim)
        )

        predictions = torch.tensor([[3, 2, 5, 0, 0], [2, 2, 3, 5, 0]])

        tokens_ground_truth = [["B", "A"], ["A", "A", "B"]]
        predicted_tokens = auto_regressive_seq_decoder.indices_to_tokens(predictions.numpy())
        assert predicted_tokens == tokens_ground_truth
    def test_auto_regressive_seq_decoder_post_process(self):
        decoder_inout_dim = 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab, decoder_net, 10, Embedding(vocab.get_vocab_size(), decoder_inout_dim)
        )

        predictions = torch.tensor([[3, 2, 5, 0, 0], [2, 2, 3, 5, 0]])

        tokens_ground_truth = [["B", "A"], ["A", "A", "B"]]

        output_dict = {"predictions": predictions}
        predicted_tokens = auto_regressive_seq_decoder.post_process(output_dict)["predicted_tokens"]
        assert predicted_tokens == tokens_ground_truth
示例#4
0
    def test_auto_regressive_seq_decoder_forward(self):
        batch_size, time_steps, decoder_inout_dim = 2, 3, 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab,
            decoder_net,
            10,
            Embedding(num_embeddings=vocab.get_vocab_size(),
                      embedding_dim=decoder_inout_dim),
        )

        encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
        source_mask = torch.ones(batch_size, time_steps).long()
        target_tokens = {
            "tokens": {
                "tokens": torch.ones(batch_size, time_steps).long()
            }
        }
        source_mask[0, 1:] = 0
        encoder_out = {
            "source_mask": source_mask,
            "encoder_outputs": encoded_state
        }

        assert auto_regressive_seq_decoder.forward(encoder_out) == {}
        loss = auto_regressive_seq_decoder.forward(encoder_out,
                                                   target_tokens)["loss"]
        assert loss.shape == torch.Size([]) and loss.requires_grad
        auto_regressive_seq_decoder.eval()
        assert "predictions" in auto_regressive_seq_decoder.forward(
            encoder_out)
    def test_auto_regressive_seq_decoder_tensor_and_token_based_metric(self):
        # set all seeds to a fixed value (torch, numpy, etc.).
        # this enable a deterministic behavior of the `auto_regressive_seq_decoder`
        # below (i.e., parameter initialization and `encoded_state = torch.randn(..)`)
        prepare_environment(Params({}))

        batch_size, time_steps, decoder_inout_dim = 2, 3, 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab,
            decoder_net,
            10,
            Embedding(vocab.get_vocab_size(), decoder_inout_dim),
            tensor_based_metric=BLEU(),
            token_based_metric=DummyMetric(),
        ).eval()

        encoded_state = torch.randn(batch_size, time_steps, decoder_inout_dim)
        source_mask = torch.ones(batch_size, time_steps).long()
        target_tokens = {"tokens": torch.ones(batch_size, time_steps).long()}
        source_mask[0, 1:] = 0
        encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}

        auto_regressive_seq_decoder.forward(encoder_out, target_tokens)
        assert auto_regressive_seq_decoder.get_metrics()["BLEU"] == 1.388809517005903e-11
        assert auto_regressive_seq_decoder.get_metrics()["em"] == 0.0
        assert auto_regressive_seq_decoder.get_metrics()["f1"] == 1 / 3
示例#6
0
 def test_model(self):
     self.setUp()
     embedding = Embedding(
         num_embeddings=self.vocab.get_vocab_size('tokens'),
         embedding_dim=EMBEDDING_DIM)
     embedder = BasicTextFieldEmbedder({'tokens': embedding})
     encoder = PytorchSeq2SeqWrapper(
         DenoisingEncoder(bidirectional=True,
                          num_layers=2,
                          input_size=EMBEDDING_DIM,
                          hidden_size=HIDDEN_DIM,
                          use_bridge=True))
     decoder_net = LstmCellDecoderNet(decoding_dim=HIDDEN_DIM,
                                      target_embedding_dim=EMBEDDING_DIM)
     decoder = AutoRegressiveSeqDecoder(max_decoding_steps=100,
                                        target_namespace='tokens',
                                        target_embedder=embedding,
                                        beam_size=5,
                                        decoder_net=decoder_net,
                                        vocab=self.vocab)
     model = SalienceSeq2Seq(encoder=encoder,
                             decoder=decoder,
                             vocab=self.vocab,
                             source_text_embedder=embedder)
     optimizer = optim.Adam(model.parameters(), lr=0.1)
     iterator = BucketIterator(batch_size=4,
                               sorting_keys=[("source_tokens", "num_tokens")
                                             ])
     iterator.index_with(self.vocab)
     if torch.cuda.is_available():
         cuda_device = 0
         model = model.cuda(cuda_device)
     else:
         cuda_device = -1
     trainer = Trainer(model=model,
                       optimizer=optimizer,
                       train_dataset=self.train_dataset,
                       validation_dataset=self.val_dataset,
                       iterator=iterator,
                       num_epochs=2,
                       cuda_device=cuda_device)
     trainer.train()
示例#7
0
     vocab.save_to_files(vocab_path)
 embedding = Embedding(num_embeddings=vocab.get_vocab_size('train'),
                       vocab_namespace='train',
                       embedding_dim=128,
                       trainable=True)
 embedder = BasicTextFieldEmbedder({'tokens': embedding})
 encoder = PytorchSeq2SeqWrapper(
     torch.nn.LSTM(input_size=128,
                   hidden_size=128,
                   num_layers=1,
                   batch_first=True))
 decoder_net = LstmCellDecoderNet(decoding_dim=128,
                                  target_embedding_dim=128)
 decoder = AutoRegressiveSeqDecoder(max_decoding_steps=100,
                                    target_namespace='train',
                                    target_embedder=embedding,
                                    beam_size=5,
                                    decoder_net=decoder_net,
                                    vocab=vocab)
 model = Seq2SeqModel(encoder=encoder,
                      decoder=decoder,
                      vocab=vocab,
                      src_embedder=embedder)
 optimizer = optim.SGD(model.parameters(), lr=0.1)
 iterator = BucketIterator(batch_size=8,
                           sorting_keys=[("source_tokens", "num_tokens")])
 iterator.index_with(vocab)
 if torch.cuda.is_available():
     cuda_device = 0
     model = model.cuda(cuda_device)
 else:
     cuda_device = -1