Exemplo n.º 1
0
 def test_loss_model1(self):
     layer_dim = 512
     model = DefaultTranslator(
         src_reader=self.src_reader,
         trg_reader=self.trg_reader,
         src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                     hidden_dim=layer_dim),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                   trg_embed_dim=layer_dim,
                                   rnn_layer=UniLSTMSeqTransducer(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_input_dim=layer_dim,
                                       yaml_path="model.decoder.rnn_layer"),
                                   mlp_layer=MLP(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_rnn_dim=layer_dim,
                                       vocab_size=100,
                                       yaml_path="model.decoder.rnn_layer"),
                                   bridge=CopyBridge(dec_dim=layer_dim,
                                                     dec_layers=1)),
     )
     model.set_train(False)
     self.assert_single_loss_equals_batch_loss(model)
Exemplo n.º 2
0
 def test_overfitting(self):
     layer_dim = 16
     batcher = SrcBatcher(batch_size=10, break_ties_randomly=False)
     train_args = {}
     train_args['src_file'] = "examples/data/head.ja"
     train_args['trg_file'] = "examples/data/head.en"
     train_args['loss_calculator'] = MLELoss()
     train_args['model'] = DefaultTranslator(
         src_reader=PlainTextReader(),
         trg_reader=PlainTextReader(),
         src_embedder=SimpleWordEmbedder(vocab_size=100, emb_dim=layer_dim),
         encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                     hidden_dim=layer_dim),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         trg_embedder=SimpleWordEmbedder(vocab_size=100, emb_dim=layer_dim),
         decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                   trg_embed_dim=layer_dim,
                                   rnn_layer=UniLSTMSeqTransducer(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_input_dim=layer_dim,
                                       yaml_path="model.decoder.rnn_layer"),
                                   mlp_layer=MLP(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_rnn_dim=layer_dim,
                                       vocab_size=100,
                                       yaml_path="model.decoder.rnn_layer"),
                                   bridge=CopyBridge(dec_dim=layer_dim,
                                                     dec_layers=1)),
     )
     train_args['dev_tasks'] = [
         LossEvalTask(model=train_args['model'],
                      src_file="examples/data/head.ja",
                      ref_file="examples/data/head.en",
                      batcher=batcher)
     ]
     train_args['run_for_epochs'] = 1
     train_args['trainer'] = AdamTrainer(alpha=0.1)
     train_args['batcher'] = batcher
     training_regimen = xnmt.training_regimen.SimpleTrainingRegimen(
         **train_args)
     for _ in range(50):
         training_regimen.run_training(save_fct=lambda: None,
                                       update_weights=True)
     self.assertAlmostEqual(
         0.0,
         training_regimen.train_loss_tracker.epoch_loss.sum() /
         training_regimen.train_loss_tracker.epoch_words,
         places=2)
Exemplo n.º 3
0
    def setUp(self):
        layer_dim = 512
        xnmt.events.clear()
        ParamManager.init_param_col()
        self.model = DefaultTranslator(
            src_reader=PlainTextReader(),
            trg_reader=PlainTextReader(),
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                        hidden_dim=layer_dim),
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                      trg_embed_dim=layer_dim,
                                      rnn_layer=UniLSTMSeqTransducer(
                                          input_dim=layer_dim,
                                          hidden_dim=layer_dim,
                                          decoder_input_dim=layer_dim,
                                          yaml_path="model.decoder.rnn_layer"),
                                      mlp_layer=MLP(
                                          input_dim=layer_dim,
                                          hidden_dim=layer_dim,
                                          decoder_rnn_dim=layer_dim,
                                          vocab_size=100,
                                          yaml_path="model.decoder.rnn_layer"),
                                      bridge=CopyBridge(dec_dim=layer_dim,
                                                        dec_layers=1)),
        )
        self.model.set_train(False)
        self.model.initialize_generator()

        self.src_data = list(
            self.model.src_reader.read_sents("examples/data/head.ja"))
        self.trg_data = list(
            self.model.trg_reader.read_sents("examples/data/head.en"))

        self.search = GreedySearch()
Exemplo n.º 4
0
                                hidden_dim=layer_dim,
                                layers=1),
    attender=MlpAttender(hidden_dim=layer_dim,
                         state_dim=layer_dim,
                         input_dim=layer_dim),
    trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim,
                                    vocab_size=len(trg_vocab)),
    decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                              rnn_layer=UniLSTMSeqTransducer(
                                  input_dim=layer_dim,
                                  hidden_dim=layer_dim,
                                  decoder_input_dim=layer_dim,
                                  yaml_path="decoder"),
                              mlp_layer=MLP(input_dim=layer_dim,
                                            hidden_dim=layer_dim,
                                            decoder_rnn_dim=layer_dim,
                                            yaml_path="decoder",
                                            vocab_size=len(trg_vocab)),
                              trg_embed_dim=layer_dim,
                              bridge=CopyBridge(dec_dim=layer_dim,
                                                dec_layers=1)),
    inference=inference)

train = SimpleTrainingRegimen(
    name=f"{EXP}",
    model=model,
    batcher=batcher,
    trainer=AdamTrainer(alpha=0.001),
    run_for_epochs=2,
    src_file="examples/data/head.ja",
    trg_file="examples/data/head.en",