Exemplo n.º 1
0
    def test_auto_regressive_seq_decoder_forward(self):
        batch_size, time_steps, decoder_inout_dim = 2, 3, 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab,
            decoder_net,
            10,
            Embedding(num_embeddings=vocab.get_vocab_size(),
                      embedding_dim=decoder_inout_dim),
        )

        encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
        source_mask = torch.ones(batch_size, time_steps).bool()
        target_tokens = {
            "tokens": {
                "tokens": torch.ones(batch_size, time_steps).long()
            }
        }
        source_mask[0, 1:] = False
        encoder_out = {
            "source_mask": source_mask,
            "encoder_outputs": encoded_state
        }

        assert auto_regressive_seq_decoder.forward(encoder_out) == {}
        loss = auto_regressive_seq_decoder.forward(encoder_out,
                                                   target_tokens)["loss"]
        assert loss.shape == torch.Size([]) and loss.requires_grad
        auto_regressive_seq_decoder.eval()
        assert "predictions" in auto_regressive_seq_decoder.forward(
            encoder_out)
    def test_auto_regressive_seq_decoder_tensor_and_token_based_metric(self):
        # set all seeds to a fixed value (torch, numpy, etc.).
        # this enable a deterministic behavior of the `auto_regressive_seq_decoder`
        # below (i.e., parameter initialization and `encoded_state = torch.randn(..)`)
        prepare_environment(Params({}))

        batch_size, time_steps, decoder_inout_dim = 2, 3, 4
        vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)

        auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
            vocab,
            decoder_net,
            Embedding(num_embeddings=vocab.get_vocab_size(),
                      embedding_dim=decoder_inout_dim),
            beam_search=Lazy(BeamSearch,
                             constructor_extras={
                                 "max_steps": 10,
                                 "beam_size": 4
                             }),
            tensor_based_metric=BLEU(),
            token_based_metric=DummyMetric(),
        ).eval()

        encoded_state = torch.randn(batch_size, time_steps, decoder_inout_dim)
        source_mask = torch.ones(batch_size, time_steps).bool()
        target_tokens = {
            "tokens": {
                "tokens": torch.ones(batch_size, time_steps).long()
            }
        }
        source_mask[0, 1:] = False
        encoder_out = {
            "source_mask": source_mask,
            "encoder_outputs": encoded_state
        }

        auto_regressive_seq_decoder.forward(encoder_out, target_tokens)
        assert auto_regressive_seq_decoder.get_metrics(
        )["BLEU"] == 1.388809517005903e-11
        assert auto_regressive_seq_decoder.get_metrics()["em"] == 0.0
        assert auto_regressive_seq_decoder.get_metrics()["f1"] == 1 / 3