Пример #1
0
 def test_uni_lstm_encoder_len(self):
     model = DefaultTranslator(
         src_embedder=SimpleWordEmbedder(self.model_context,
                                         vocab_size=100),
         encoder=UniLSTMSeqTransducer(self.model_context),
         attender=MlpAttender(self.model_context),
         trg_embedder=SimpleWordEmbedder(self.model_context,
                                         vocab_size=100),
         decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),
     )
     self.assert_in_out_len_equal(model)
Пример #2
0
 def __init__(self,
              exp_global=Ref(Path("exp_global")),
              layers=1,
              input_dim=None,
              hidden_dim=None,
              downsampling_method="concat",
              reduce_factor=2,
              dropout=None):
     register_handler(self)
     hidden_dim = hidden_dim or exp_global.default_layer_dim
     input_dim = input_dim or exp_global.default_layer_dim
     self.dropout = dropout or exp_global.dropout
     assert layers > 0
     assert hidden_dim % 2 == 0
     assert type(reduce_factor) == int or (type(reduce_factor) == list and
                                           len(reduce_factor) == layers - 1)
     assert downsampling_method in ["concat", "skip"]
     self.builder_layers = []
     self.downsampling_method = downsampling_method
     self.reduce_factor = reduce_factor
     self.input_dim = input_dim
     f = UniLSTMSeqTransducer(exp_global=exp_global,
                              input_dim=input_dim,
                              hidden_dim=hidden_dim / 2,
                              dropout=dropout)
     b = UniLSTMSeqTransducer(exp_global=exp_global,
                              input_dim=input_dim,
                              hidden_dim=hidden_dim / 2,
                              dropout=dropout)
     self.builder_layers.append((f, b))
     for _ in range(layers - 1):
         layer_input_dim = hidden_dim if downsampling_method == "skip" else hidden_dim * reduce_factor
         f = UniLSTMSeqTransducer(exp_global=exp_global,
                                  input_dim=layer_input_dim,
                                  hidden_dim=hidden_dim / 2,
                                  dropout=dropout)
         b = UniLSTMSeqTransducer(exp_global=exp_global,
                                  input_dim=layer_input_dim,
                                  hidden_dim=hidden_dim / 2,
                                  dropout=dropout)
         self.builder_layers.append((f, b))
Пример #3
0
 def test_overfitting(self):
     layer_dim = 16
     batcher = SrcBatcher(batch_size=10, break_ties_randomly=False)
     train_args = {}
     train_args['src_file'] = "examples/data/head.ja"
     train_args['trg_file'] = "examples/data/head.en"
     train_args['loss_calculator'] = MLELoss()
     train_args['model'] = DefaultTranslator(
         src_reader=PlainTextReader(),
         trg_reader=PlainTextReader(),
         src_embedder=SimpleWordEmbedder(vocab_size=100, emb_dim=layer_dim),
         encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                     hidden_dim=layer_dim),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         trg_embedder=SimpleWordEmbedder(vocab_size=100, emb_dim=layer_dim),
         decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                   trg_embed_dim=layer_dim,
                                   rnn_layer=UniLSTMSeqTransducer(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_input_dim=layer_dim,
                                       yaml_path="model.decoder.rnn_layer"),
                                   mlp_layer=MLP(
                                       input_dim=layer_dim,
                                       hidden_dim=layer_dim,
                                       decoder_rnn_dim=layer_dim,
                                       vocab_size=100,
                                       yaml_path="model.decoder.rnn_layer"),
                                   bridge=CopyBridge(dec_dim=layer_dim,
                                                     dec_layers=1)),
     )
     train_args['dev_tasks'] = [
         LossEvalTask(model=train_args['model'],
                      src_file="examples/data/head.ja",
                      ref_file="examples/data/head.en",
                      batcher=batcher)
     ]
     train_args['run_for_epochs'] = 1
     train_args['trainer'] = AdamTrainer(alpha=0.1)
     train_args['batcher'] = batcher
     training_regimen = xnmt.training_regimen.SimpleTrainingRegimen(
         **train_args)
     for _ in range(50):
         training_regimen.run_training(save_fct=lambda: None,
                                       update_weights=True)
     self.assertAlmostEqual(
         0.0,
         training_regimen.train_loss_tracker.epoch_loss.sum() /
         training_regimen.train_loss_tracker.epoch_words,
         places=2)
Пример #4
0
    def setUp(self):
        # Seeding
        numpy.random.seed(2)
        random.seed(2)
        layer_dim = 64
        xnmt.events.clear()
        ParamManager.init_param_col()
        self.segment_composer = SumComposer()
        self.src_reader = CharFromWordTextReader()
        self.trg_reader = PlainTextReader()
        self.loss_calculator = AutoRegressiveMLELoss()
        self.segmenting_encoder = SegmentingSeqTransducer(
            segment_composer=self.segment_composer,
            final_transducer=BiLSTMSeqTransducer(input_dim=layer_dim,
                                                 hidden_dim=layer_dim),
        )

        self.model = DefaultTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            encoder=self.segmenting_encoder,
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            decoder=AutoRegressiveDecoder(
                input_dim=layer_dim,
                rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim,
                                         decoder_input_dim=layer_dim,
                                         yaml_path="decoder"),
                transform=AuxNonLinear(input_dim=layer_dim,
                                       output_dim=layer_dim,
                                       aux_input_dim=layer_dim),
                scorer=Softmax(vocab_size=100, input_dim=layer_dim),
                trg_embed_dim=layer_dim,
                bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
        )
        self.model.set_train(True)

        self.layer_dim = layer_dim
        self.src_data = list(
            self.model.src_reader.read_sents("examples/data/head.ja"))
        self.trg_data = list(
            self.model.trg_reader.read_sents("examples/data/head.en"))
        my_batcher = xnmt.batcher.TrgBatcher(batch_size=3,
                                             src_pad_token=1,
                                             trg_pad_token=2)
        self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
        dy.renew_cg(immediate_compute=True, check_validity=True)
Пример #5
0
 def __init__(self,
              yaml_context,
              num_layers,
              input_dim,
              hidden_dim,
              add_to_output=False,
              dropout=None):
     assert num_layers > 1
     assert hidden_dim % 2 == 0
     self.forward_layer = UniLSTMSeqTransducer(yaml_context,
                                               input_dim,
                                               hidden_dim / 2,
                                               dropout=dropout)
     self.backward_layer = UniLSTMSeqTransducer(yaml_context,
                                                input_dim,
                                                hidden_dim / 2,
                                                dropout=dropout)
     self.residual_network = ResidualRNNBuilder(yaml_context,
                                                num_layers - 1,
                                                hidden_dim,
                                                hidden_dim,
                                                add_to_output,
                                                dropout=dropout)
Пример #6
0
 def test_bi_lstm_encoder_len(self):
   layer_dim = 512
   model = DefaultTranslator(
     src_reader=self.src_reader,
     trg_reader=self.trg_reader,
     src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, layers=3),
     attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),
     trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     decoder=AutoRegressiveDecoder(input_dim=layer_dim,
                               trg_embed_dim=layer_dim,
                               rnn=UniLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, decoder_input_dim=layer_dim, yaml_path="model.decoder.rnn"),
                               transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),
                               scorer=Softmax(input_dim=layer_dim, vocab_size=100),
                               bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
   )
   self.assert_in_out_len_equal(model)
Пример #7
0
    def __init__(self,
                 num_layers,
                 input_dim,
                 hidden_dim,
                 add_to_output=False,
                 dropout=None,
                 builder_layers=None):
        assert num_layers > 0
        self.builder_layers = self.add_serializable_component(
            "builder_layers", builder_layers, lambda: [
                UniLSTMSeqTransducer(input_dim=input_dim
                                     if i == 0 else hidden_dim,
                                     hidden_dim=hidden_dim,
                                     dropout=dropout)
                for i in range(num_layers)
            ])

        self.add_to_output = add_to_output
Пример #8
0
 def test_uni_lstm_encoder_len(self):
     layer_dim = 512
     model = DefaultTranslator(
         src_reader=self.src_reader,
         trg_reader=self.trg_reader,
         src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         encoder=UniLSTMSeqTransducer(input_dim=layer_dim,
                                      hidden_dim=layer_dim),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
         decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                   lstm_dim=layer_dim,
                                   mlp_hidden_dim=layer_dim,
                                   trg_embed_dim=layer_dim,
                                   vocab_size=100),
     )
     self.assert_in_out_len_equal(model)
Пример #9
0
 def test_loss_model2(self):
   layer_dim = 512
   model = DefaultTranslator(
     src_reader=self.src_reader,
     trg_reader=self.trg_reader,
     src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     encoder=PyramidalLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, layers=3),
     attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),
     trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     decoder=AutoRegressiveDecoder(input_dim=layer_dim,
                               trg_embed_dim=layer_dim,
                               rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                                              hidden_dim=layer_dim,
                                                              decoder_input_dim=layer_dim,
                                                              yaml_path="model.decoder.rnn"),
                               transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),
                               scorer=Softmax(input_dim=layer_dim, vocab_size=100),
                               bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
   )
   model.set_train(False)
   self.assert_single_loss_equals_batch_loss(model, pad_src_to_multiple=4)
Пример #10
0
    def setUp(self):
        layer_dim = 512
        xnmt.events.clear()
        ParamManager.init_param_col()
        self.model = DefaultTranslator(
            src_reader=PlainTextReader(),
            trg_reader=PlainTextReader(),
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                        hidden_dim=layer_dim),
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                                      trg_embed_dim=layer_dim,
                                      rnn_layer=UniLSTMSeqTransducer(
                                          input_dim=layer_dim,
                                          hidden_dim=layer_dim,
                                          decoder_input_dim=layer_dim,
                                          yaml_path="model.decoder.rnn_layer"),
                                      mlp_layer=MLP(
                                          input_dim=layer_dim,
                                          hidden_dim=layer_dim,
                                          decoder_rnn_dim=layer_dim,
                                          vocab_size=100,
                                          yaml_path="model.decoder.rnn_layer"),
                                      bridge=CopyBridge(dec_dim=layer_dim,
                                                        dec_layers=1)),
        )
        self.model.set_train(False)
        self.model.initialize_generator()

        self.src_data = list(
            self.model.src_reader.read_sents("examples/data/head.ja"))
        self.trg_data = list(
            self.model.trg_reader.read_sents("examples/data/head.en"))

        self.search = GreedySearch()
Пример #11
0
 def test_py_lstm_encoder_len(self):
   layer_dim = 512
   model = DefaultTranslator(
     src_reader=self.src_reader,
     trg_reader=self.trg_reader,
     src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     encoder=PyramidalLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, layers=3),
     attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),
     trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
     decoder=AutoRegressiveDecoder(input_dim=layer_dim,
                               trg_embed_dim=layer_dim,
                               rnn=UniLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, decoder_input_dim=layer_dim, yaml_path="model.decoder.rnn"),
                               transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),
                               scorer=Softmax(input_dim=layer_dim, vocab_size=100),
                               bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
   )
   self.set_train(True)
   for sent_i in range(10):
     dy.renew_cg()
     src = self.src_data[sent_i].get_padded_sent(Vocab.ES, 4 - (self.src_data[sent_i].sent_len() % 4))
     self.start_sent(src)
     embeddings = model.src_embedder.embed_sent(src)
     encodings = model.encoder.transduce(embeddings)
     self.assertEqual(int(math.ceil(len(embeddings) / float(4))), len(encodings))
Пример #12
0
    src_reader=PlainTextReader(vocab=src_vocab),
    trg_reader=PlainTextReader(vocab=trg_vocab),
    src_embedder=SimpleWordEmbedder(emb_dim=layer_dim,
                                    vocab_size=len(src_vocab)),
    encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                hidden_dim=layer_dim,
                                layers=1),
    attender=MlpAttender(hidden_dim=layer_dim,
                         state_dim=layer_dim,
                         input_dim=layer_dim),
    trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim,
                                    vocab_size=len(trg_vocab)),
    decoder=MlpSoftmaxDecoder(input_dim=layer_dim,
                              rnn_layer=UniLSTMSeqTransducer(
                                  input_dim=layer_dim,
                                  hidden_dim=layer_dim,
                                  decoder_input_dim=layer_dim,
                                  yaml_path="decoder"),
                              mlp_layer=MLP(input_dim=layer_dim,
                                            hidden_dim=layer_dim,
                                            decoder_rnn_dim=layer_dim,
                                            yaml_path="decoder",
                                            vocab_size=len(trg_vocab)),
                              trg_embed_dim=layer_dim,
                              bridge=CopyBridge(dec_dim=layer_dim,
                                                dec_layers=1)),
    inference=inference)

train = SimpleTrainingRegimen(
    name=f"{EXP}",
    model=model,
Пример #13
0
    def setUp(self):
        # Seeding
        numpy.random.seed(2)
        random.seed(2)
        layer_dim = 64
        xnmt.events.clear()
        ParamManager.init_param_col()
        self.segment_encoder_bilstm = BiLSTMSeqTransducer(input_dim=layer_dim,
                                                          hidden_dim=layer_dim)
        self.segment_composer = SumComposer()
        self.src_reader = CharFromWordTextReader()
        self.trg_reader = PlainTextReader()
        self.loss_calculator = AutoRegressiveMLELoss()

        baseline = Linear(input_dim=layer_dim, output_dim=1)
        policy_network = Linear(input_dim=layer_dim, output_dim=2)
        self.poisson_prior = PoissonPrior(mu=3.3)
        self.eps_greedy = EpsilonGreedy(eps_prob=0.0, prior=self.poisson_prior)
        self.conf_penalty = ConfidencePenalty()
        self.policy_gradient = PolicyGradient(input_dim=layer_dim,
                                              output_dim=2,
                                              baseline=baseline,
                                              policy_network=policy_network,
                                              z_normalization=True,
                                              conf_penalty=self.conf_penalty,
                                              sample=5)
        self.length_prior = PoissonLengthPrior(lmbd=3.3, weight=1)
        self.segmenting_encoder = SegmentingSeqTransducer(
            embed_encoder=self.segment_encoder_bilstm,
            segment_composer=self.segment_composer,
            final_transducer=BiLSTMSeqTransducer(input_dim=layer_dim,
                                                 hidden_dim=layer_dim),
            policy_learning=self.policy_gradient,
            eps_greedy=self.eps_greedy,
            length_prior=self.length_prior,
        )

        self.model = DefaultTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            encoder=self.segmenting_encoder,
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            decoder=AutoRegressiveDecoder(
                input_dim=layer_dim,
                rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim,
                                         decoder_input_dim=layer_dim,
                                         yaml_path="decoder"),
                transform=AuxNonLinear(input_dim=layer_dim,
                                       output_dim=layer_dim,
                                       aux_input_dim=layer_dim),
                scorer=Softmax(vocab_size=100, input_dim=layer_dim),
                trg_embed_dim=layer_dim,
                bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
        )
        self.model.set_train(True)

        self.layer_dim = layer_dim
        self.src_data = list(
            self.model.src_reader.read_sents("examples/data/head.ja"))
        self.trg_data = list(
            self.model.trg_reader.read_sents("examples/data/head.en"))
        my_batcher = xnmt.batcher.TrgBatcher(batch_size=3,
                                             src_pad_token=1,
                                             trg_pad_token=2)
        self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
        dy.renew_cg(immediate_compute=True, check_validity=True)