Ejemplo n.º 1
0
 def test_policy(self):
     event_trigger.set_train(True)
     self.model.policy_learning = PolicyGradient(input_dim=3 *
                                                 self.layer_dim)
     mle_loss = MLELoss()
     loss = mle_loss.calc_loss(self.model, self.src[0], self.trg[0])
     event_trigger.calc_additional_loss(self.trg[0], self.model, loss)
Ejemplo n.º 2
0
    def assert_single_loss_equals_batch_loss(self,
                                             model,
                                             pad_src_to_multiple=1):
        """
    Tests whether single loss equals batch loss.
    Here we don't truncate the target side and use masking.
    """
        batch_size = 5
        src_sents = self.src_data[:batch_size]
        src_min = min([x.sent_len() for x in src_sents])
        src_sents_trunc = [s.words[:src_min] for s in src_sents]
        for single_sent in src_sents_trunc:
            single_sent[src_min - 1] = Vocab.ES
            while len(single_sent) % pad_src_to_multiple != 0:
                single_sent.append(Vocab.ES)
        trg_sents = sorted(self.trg_data[:batch_size],
                           key=lambda x: x.sent_len(),
                           reverse=True)
        trg_max = max([x.sent_len() for x in trg_sents])
        np_arr = np.zeros([batch_size, trg_max])
        for i in range(batch_size):
            for j in range(trg_sents[i].sent_len(), trg_max):
                np_arr[i, j] = 1.0
        trg_masks = Mask(np_arr)
        trg_sents_padded = [[w for w in s] + [Vocab.ES] *
                            (trg_max - s.sent_len()) for s in trg_sents]

        src_sents_trunc = [
            sent.SimpleSentence(words=s) for s in src_sents_trunc
        ]
        trg_sents_padded = [
            sent.SimpleSentence(words=s) for s in trg_sents_padded
        ]

        single_loss = 0.0
        for sent_id in range(batch_size):
            tt.reset_graph()
            train_loss = MLELoss().calc_loss(model=model,
                                             src=src_sents_trunc[sent_id],
                                             trg=trg_sents[sent_id]).value()
            single_loss += train_loss[0]

        tt.reset_graph()

        batched_loss = MLELoss().calc_loss(model=model,
                                           src=mark_as_batch(src_sents_trunc),
                                           trg=mark_as_batch(
                                               trg_sents_padded,
                                               trg_masks)).value()
        self.assertAlmostEqual(single_loss, np.sum(batched_loss), places=4)
Ejemplo n.º 3
0
 def test_train_dev_loss_equal(self):
   layer_dim = 512
   batcher = SrcBatcher(batch_size=5, break_ties_randomly=False)
   train_args = {}
   train_args['src_file'] = "examples/data/head.ja"
   train_args['trg_file'] = "examples/data/head.en"
   train_args['loss_calculator'] = MLELoss()
   train_args['model'] = DefaultTranslator(src_reader=PlainTextReader(vocab=Vocab(vocab_file="examples/data/head.ja.vocab")),
                                           trg_reader=PlainTextReader(vocab=Vocab(vocab_file="examples/data/head.en.vocab")),
                                           src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
                                           encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),
                                           attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim,
                                                                hidden_dim=layer_dim),
                                           decoder=AutoRegressiveDecoder(input_dim=layer_dim,
                                                                     embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
                                                                     rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                                                                                    hidden_dim=layer_dim,
                                                                                                    decoder_input_dim=layer_dim,
                                                                                                    yaml_path="model.decoder.rnn"),
                                                                     transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),
                                                                     scorer=Softmax(input_dim=layer_dim, vocab_size=100),
                                                                     bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
                                           )
   train_args['dev_tasks'] = [LossEvalTask(model=train_args['model'],
                                           src_file="examples/data/head.ja",
                                           ref_file="examples/data/head.en",
                                           batcher=batcher)]
   train_args['trainer'] = DummyTrainer()
   train_args['batcher'] = batcher
   train_args['run_for_epochs'] = 1
   training_regimen = regimens.SimpleTrainingRegimen(**train_args)
   training_regimen.run_training(save_fct = lambda: None)
   self.assertAlmostEqual(training_regimen.train_loss_tracker.epoch_loss.sum_factors() / training_regimen.train_loss_tracker.epoch_words,
                          training_regimen.dev_loss_tracker.dev_score.loss, places=5)
Ejemplo n.º 4
0
 def test_reinforce_loss(self):
     fertility_loss = GlobalFertilityLoss()
     mle_loss = MLELoss()
     loss = CompositeLoss(pt_losses=[mle_loss, fertility_loss]).calc_loss(
         self.model, self.src[0], self.trg[0])
     reinforce_loss = event_trigger.calc_additional_loss(
         self.trg[0], self.model, loss)
     pl = self.model.encoder.policy_learning
     # Ensure correct length
     src = self.src[0]
     mask = src.mask.np_arr
     outputs = self.segmenting_encoder.compose_output
     actions = self.segmenting_encoder.segment_actions
     # Ensure sample == outputs
     for i, sample_item in enumerate(actions):
         # The last segmentation is 1
         self.assertEqual(sample_item[-1], src[i].len_unpadded())
     self.assertTrue("mle" in loss.expr_factors)
     self.assertTrue("global_fertility" in loss.expr_factors)
     self.assertTrue("rl_reinf" in reinforce_loss.expr_factors)
     self.assertTrue("rl_baseline" in reinforce_loss.expr_factors)
     self.assertTrue("rl_confpen" in reinforce_loss.expr_factors)
     # Ensure we are sampling from the policy learning
     self.assertEqual(self.model.encoder.segmenting_action,
                      SegmentingSeqTransducer.SegmentingAction.POLICY)
Ejemplo n.º 5
0
  def setUp(self):
    # Seeding
    numpy.random.seed(2)
    random.seed(2)
    layer_dim = 64
    xnmt.events.clear()
    ParamManager.init_param_col()
    self.segment_encoder_bilstm = BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim)
    self.segment_composer = SumComposer()

    self.src_reader = CharFromWordTextReader(vocab=Vocab(vocab_file="examples/data/head.ja.charvocab"))
    self.trg_reader = PlainTextReader(vocab=Vocab(vocab_file="examples/data/head.en.vocab"))
    self.loss_calculator = FeedbackLoss(child_loss=MLELoss(), repeat=5)

    baseline = Linear(input_dim=layer_dim, output_dim=1)
    policy_network = Linear(input_dim=layer_dim, output_dim=2)
    self.poisson_prior = PoissonPrior(mu=3.3)
    self.eps_greedy = EpsilonGreedy(eps_prob=0.0, prior=self.poisson_prior)
    self.conf_penalty = ConfidencePenalty()
    self.policy_gradient = PolicyGradient(input_dim=layer_dim,
                                          output_dim=2,
                                          baseline=baseline,
                                          policy_network=policy_network,
                                          z_normalization=True,
                                          conf_penalty=self.conf_penalty)
    self.length_prior = PoissonLengthPrior(lmbd=3.3, weight=1)
    self.segmenting_encoder = SegmentingSeqTransducer(
      embed_encoder = self.segment_encoder_bilstm,
      segment_composer =  self.segment_composer,
      final_transducer = BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),
      policy_learning = self.policy_gradient,
      eps_greedy = self.eps_greedy,
      length_prior = self.length_prior,
    )

    self.model = DefaultTranslator(
      src_reader=self.src_reader,
      trg_reader=self.trg_reader,
      src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
      encoder=self.segmenting_encoder,
      attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),
      trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
      decoder=AutoRegressiveDecoder(input_dim=layer_dim,
                                    rnn=UniLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim,
                                                             decoder_input_dim=layer_dim, yaml_path="decoder"),
                                    transform=AuxNonLinear(input_dim=layer_dim, output_dim=layer_dim,
                                                           aux_input_dim=layer_dim),
                                    scorer=Softmax(vocab_size=100, input_dim=layer_dim),
                                    trg_embed_dim=layer_dim,
                                    bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
    )
    event_trigger.set_train(True)

    self.layer_dim = layer_dim
    self.src_data = list(self.model.src_reader.read_sents("examples/data/head.ja"))
    self.trg_data = list(self.model.trg_reader.read_sents("examples/data/head.en"))
    my_batcher = batchers.TrgBatcher(batch_size=3)
    self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
    dy.renew_cg(immediate_compute=True, check_validity=True)
Ejemplo n.º 6
0
    def setUp(self):
        # Seeding
        numpy.random.seed(2)
        random.seed(2)
        layer_dim = 32
        xnmt.events.clear()
        ParamManager.init_param_col()

        edge_vocab = Vocab(vocab_file="examples/data/parse/head.en.edge_vocab")
        node_vocab = Vocab(vocab_file="examples/data/parse/head.en.node_vocab")
        value_vocab = Vocab(vocab_file="examples/data/head.en.vocab")

        self.src_reader = input_readers.PlainTextReader(vocab=value_vocab)
        self.trg_reader = input_readers.CoNLLToRNNGActionsReader(
            surface_vocab=value_vocab,
            nt_vocab=node_vocab,
            edg_vocab=edge_vocab)

        self.layer_dim = layer_dim
        self.src_data = list(
            self.src_reader.read_sents("examples/data/head.en"))
        self.trg_data = list(
            self.trg_reader.read_sents("examples/data/parse/head.en.conll"))
        self.loss_calculator = MLELoss()
        self.head_composer = composer.DyerHeadComposer(
            fwd_combinator=UniLSTMSeqTransducer(input_dim=layer_dim,
                                                hidden_dim=layer_dim),
            bwd_combinator=UniLSTMSeqTransducer(input_dim=layer_dim,
                                                hidden_dim=layer_dim),
            transform=AuxNonLinear(input_dim=layer_dim,
                                   aux_input_dim=layer_dim,
                                   output_dim=layer_dim))

        self.model = DefaultTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=LookupEmbedder(emb_dim=layer_dim,
                                        vocab_size=len(value_vocab)),
            encoder=IdentitySeqTransducer(),
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            decoder=RNNGDecoder(
                input_dim=layer_dim,
                rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim,
                                         decoder_input_dim=layer_dim),
                transform=AuxNonLinear(input_dim=layer_dim,
                                       output_dim=layer_dim,
                                       aux_input_dim=layer_dim),
                bridge=NoBridge(dec_dim=layer_dim, dec_layers=1),
                graph_reader=self.trg_reader,
                head_composer=self.head_composer))
        event_trigger.set_train(True)

        my_batcher = batchers.TrgBatcher(batch_size=1)
        self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
        dy.renew_cg(immediate_compute=True, check_validity=True)
Ejemplo n.º 7
0
    def assert_single_loss_equals_batch_loss(self,
                                             model,
                                             pad_src_to_multiple=1):
        """
    Tests whether single loss equals batch loss.
    Truncating src / trg sents to same length so no masking is necessary
    """
        batch_size = 5
        src_sents = self.src_data[:batch_size]
        src_min = min([x.sent_len() for x in src_sents])
        src_sents_trunc = [s.words[:src_min] for s in src_sents]
        for single_sent in src_sents_trunc:
            single_sent[src_min - 1] = Vocab.ES
            while len(single_sent) % pad_src_to_multiple != 0:
                single_sent.append(Vocab.ES)
        trg_sents = self.trg_data[:batch_size]
        trg_min = min([x.sent_len() for x in trg_sents])
        trg_sents_trunc = [s.words[:trg_min] for s in trg_sents]
        for single_sent in trg_sents_trunc:
            single_sent[trg_min - 1] = Vocab.ES

        src_sents_trunc = [
            sent.SimpleSentence(words=s) for s in src_sents_trunc
        ]
        trg_sents_trunc = [
            sent.SimpleSentence(words=s) for s in trg_sents_trunc
        ]

        single_loss = 0.0
        for sent_id in range(batch_size):
            tt.reset_graph()
            train_loss = MLELoss().calc_loss(
                model=model,
                src=src_sents_trunc[sent_id],
                trg=trg_sents_trunc[sent_id]).value()
            single_loss += train_loss[0]

        tt.reset_graph()

        batched_loss = MLELoss().calc_loss(
            model=model,
            src=mark_as_batch(src_sents_trunc),
            trg=mark_as_batch(trg_sents_trunc)).value()
        self.assertAlmostEqual(single_loss, np.sum(batched_loss), places=4)
Ejemplo n.º 8
0
    def setUp(self):
        # Seeding
        numpy.random.seed(2)
        random.seed(2)
        layer_dim = 4
        xnmt.events.clear()
        ParamManager.init_param_col()
        self.segment_composer = SumComposer()
        self.src_reader = CharFromWordTextReader(vocab=Vocab(
            vocab_file="examples/data/head.ja.charvocab"))
        self.trg_reader = PlainTextReader(vocab=Vocab(
            vocab_file="examples/data/head.en.vocab"))
        self.loss_calculator = FeedbackLoss(child_loss=MLELoss(), repeat=5)
        self.segmenting_encoder = SegmentingSeqTransducer(
            segment_composer=self.segment_composer,
            final_transducer=BiLSTMSeqTransducer(input_dim=layer_dim,
                                                 hidden_dim=layer_dim),
        )

        self.model = DefaultTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
            encoder=self.segmenting_encoder,
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            decoder=AutoRegressiveDecoder(
                input_dim=layer_dim,
                rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim,
                                         decoder_input_dim=layer_dim,
                                         yaml_path="decoder"),
                transform=AuxNonLinear(input_dim=layer_dim,
                                       output_dim=layer_dim,
                                       aux_input_dim=layer_dim),
                scorer=Softmax(vocab_size=100, input_dim=layer_dim),
                embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100),
                bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
        )
        event_trigger.set_train(True)

        self.layer_dim = layer_dim
        self.src_data = list(
            self.model.src_reader.read_sents("examples/data/head.ja"))
        self.trg_data = list(
            self.model.trg_reader.read_sents("examples/data/head.en"))
        my_batcher = batchers.TrgBatcher(batch_size=3)
        self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
        dy.renew_cg(immediate_compute=True, check_validity=True)
Ejemplo n.º 9
0
    def setUp(self):
        # Seeding
        numpy.random.seed(2)
        random.seed(2)
        layer_dim = 32
        xnmt.events.clear()
        ParamManager.init_param_col()

        self.src_reader = PlainTextReader(vocab=Vocab(
            vocab_file="test/data/head.ja.vocab"))
        self.trg_reader = PlainTextReader(vocab=Vocab(
            vocab_file="test/data/head.en.vocab"))
        self.layer_dim = layer_dim
        self.src_data = list(self.src_reader.read_sents("test/data/head.ja"))
        self.trg_data = list(self.trg_reader.read_sents("test/data/head.en"))
        self.input_vocab_size = len(self.src_reader.vocab.i2w)
        self.output_vocab_size = len(self.trg_reader.vocab.i2w)
        self.loss_calculator = MLELoss()

        self.model = SimultaneousTranslator(
            src_reader=self.src_reader,
            trg_reader=self.trg_reader,
            src_embedder=SimpleWordEmbedder(emb_dim=layer_dim,
                                            vocab_size=self.input_vocab_size),
            encoder=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim),
            attender=MlpAttender(input_dim=layer_dim,
                                 state_dim=layer_dim,
                                 hidden_dim=layer_dim),
            decoder=AutoRegressiveDecoder(
                input_dim=layer_dim,
                rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                         hidden_dim=layer_dim,
                                         decoder_input_dim=layer_dim,
                                         yaml_path="decoder"),
                transform=AuxNonLinear(input_dim=layer_dim,
                                       output_dim=layer_dim,
                                       aux_input_dim=layer_dim),
                scorer=Softmax(vocab_size=self.output_vocab_size,
                               input_dim=layer_dim),
                embedder=SimpleWordEmbedder(emb_dim=layer_dim,
                                            vocab_size=self.output_vocab_size),
                bridge=NoBridge(dec_dim=layer_dim, dec_layers=1)),
        )
        event_trigger.set_train(True)

        my_batcher = batchers.TrgBatcher(batch_size=3)
        self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data)
        dy.renew_cg(immediate_compute=True, check_validity=True)
Ejemplo n.º 10
0
 def test_overfitting(self):
     layer_dim = 16
     batcher = SrcBatcher(batch_size=10, break_ties_randomly=False)
     train_args = {}
     train_args['src_file'] = "examples/data/head.ja"
     train_args['trg_file'] = "examples/data/head.en"
     train_args['loss_calculator'] = MLELoss()
     train_args['model'] = DefaultTranslator(
         src_reader=PlainTextReader(vocab=Vocab(
             vocab_file="examples/data/head.ja.vocab")),
         trg_reader=PlainTextReader(vocab=Vocab(
             vocab_file="examples/data/head.en.vocab")),
         src_embedder=LookupEmbedder(vocab_size=100, emb_dim=layer_dim),
         encoder=BiLSTMSeqTransducer(input_dim=layer_dim,
                                     hidden_dim=layer_dim),
         attender=MlpAttender(input_dim=layer_dim,
                              state_dim=layer_dim,
                              hidden_dim=layer_dim),
         decoder=AutoRegressiveDecoder(
             input_dim=layer_dim,
             embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),
             rnn=UniLSTMSeqTransducer(input_dim=layer_dim,
                                      hidden_dim=layer_dim,
                                      decoder_input_dim=layer_dim,
                                      yaml_path="model.decoder.rnn"),
             transform=NonLinear(input_dim=layer_dim * 2,
                                 output_dim=layer_dim),
             scorer=Softmax(input_dim=layer_dim, vocab_size=100),
             bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),
     )
     train_args['dev_tasks'] = [
         LossEvalTask(model=train_args['model'],
                      src_file="examples/data/head.ja",
                      ref_file="examples/data/head.en",
                      batcher=batcher)
     ]
     train_args['run_for_epochs'] = 1
     train_args['trainer'] = AdamTrainer(alpha=0.1)
     train_args['batcher'] = batcher
     training_regimen = regimens.SimpleTrainingRegimen(**train_args)
Ejemplo n.º 11
0
 def calc_loss_single_batch(self):
   loss = MLELoss().calc_loss(self.model, self.src[0], self.trg[0])
   reinforce_loss = event_trigger.calc_additional_loss(self.trg[0], self.model, loss)
   return loss, reinforce_loss
Ejemplo n.º 12
0
 def test_train_nll(self):
     event_trigger.set_train(True)
     mle_loss = MLELoss()
     mle_loss.calc_loss(self.model, self.src[0], self.trg[0])