class TestBLEU(unittest.TestCase): def setUp(self): xnmt.events.clear() self.hyp = ["the taro met the hanako".split()] self.ref = ["taro met hanako".split()] vocab = Vocab() self.hyp_id = list(map(vocab.convert, self.hyp[0])) self.ref_id = list(map(vocab.convert, self.ref[0])) def test_bleu_1gram(self): bleu = evaluator.BLEUEvaluator(ngram=1) exp_bleu = 3.0 / 5.0 act_bleu = bleu.evaluate(self.ref, self.hyp).value() self.assertEqual(act_bleu, exp_bleu) @unittest.skipUnless(has_cython(), "requires cython to run") def test_bleu_4gram_fast(self): bleu = evaluator.FastBLEUEvaluator(ngram=4, smooth=1) exp_bleu = math.exp(math.log((3.0/5.0) * (2.0/5.0) * (1.0/4.0) * (1.0/3.0))/4.0) act_bleu = bleu.evaluate(self.ref_id, self.hyp_id) self.assertEqual(act_bleu, exp_bleu)
class TestRunningConfig(unittest.TestCase): def setUp(self): xnmt.events.clear() def test_assemble(self): run.main(["test/config/assemble.yaml"]) def test_classifier(self): run.main(["test/config/classifier.yaml"]) def test_component_sharing(self): run.main(["test/config/component_sharing.yaml"]) def test_encoders(self): run.main(["test/config/encoders.yaml"]) def test_ensembling(self): run.main(["test/config/ensembling.yaml"]) def test_forced(self): run.main(["test/config/forced.yaml"]) def test_lm(self): run.main(["test/config/lm.yaml"]) def test_load_model(self): run.main(["test/config/load_model.yaml"]) def test_multi_task(self): run.main(["test/config/multi_task.yaml"]) def test_multi_task_speech(self): run.main(["test/config/multi_task_speech.yaml"]) def test_preproc(self): run.main(["test/config/preproc.yaml"]) def test_pretrained_emb(self): run.main(["test/config/pretrained_embeddings.yaml"]) def test_random_search_test_params(self): run.main(["test/config/random_search_test_params.yaml"]) def test_random_search_train_params(self): run.main(["test/config/random_search_train_params.yaml"]) def test_reload(self): run.main(["test/config/reload.yaml"]) def test_segmenting(self): run.main(["test/config/seg_report.yaml"]) def test_reload_exception(self): with self.assertRaises(ValueError) as context: run.main(["test/config/reload_exception.yaml"]) self.assertEqual(str(context.exception), 'VanillaLSTMGates: x_t has inconsistent dimension') def test_report(self): run.main(["test/config/report.yaml"]) @unittest.expectedFailure # TODO: these tests need to be fixed def test_retrieval(self): run.main(["test/config/retrieval.yaml"]) def test_score(self): run.main(["test/config/score.yaml"]) def test_self_attentional_am(self): run.main(["test/config/self_attentional_am.yaml"]) def test_seq_labeler(self): run.main(["test/config/seq_labeler.yaml"]) def test_speech(self): run.main(["test/config/speech.yaml"]) @unittest.expectedFailure # TODO: these tests need to be fixed def test_speech_retrieval(self): run.main(["test/config/speech_retrieval.yaml"]) def test_standard(self): run.main(["test/config/standard.yaml"]) @unittest.expectedFailure # TODO: these tests need to be fixed def test_transformer(self): run.main(["test/config/transformer.yaml"]) @unittest.skipUnless(has_cython(), "requires cython to run") def test_search_strategy_reinforce(self): run.main(["test/config/reinforce.yaml"]) @unittest.skipUnless(has_cython(), "requires cython to run") def test_search_strategy_minrisk(self): run.main(["test/config/minrisk.yaml"]) def tearDown(self): try: if os.path.isdir("test/tmp"): shutil.rmtree("test/tmp") except: pass
class TestSegmentingEncoder(unittest.TestCase): def setUp(self): # Seeding numpy.random.seed(2) random.seed(2) layer_dim = 64 xnmt.events.clear() ParamManager.init_param_col() self.segment_encoder_bilstm = BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim) self.segment_composer = SumComposer() self.src_reader = CharFromWordTextReader() self.trg_reader = PlainTextReader() self.loss_calculator = AutoRegressiveMLELoss() baseline = Linear(input_dim=layer_dim, output_dim=1) policy_network = Linear(input_dim=layer_dim, output_dim=2) self.poisson_prior = PoissonPrior(mu=3.3) self.eps_greedy = EpsilonGreedy(eps_prob=0.0, prior=self.poisson_prior) self.conf_penalty = ConfidencePenalty() self.policy_gradient = PolicyGradient(input_dim=layer_dim, output_dim=2, baseline=baseline, policy_network=policy_network, z_normalization=True, conf_penalty=self.conf_penalty, sample=5) self.length_prior = PoissonLengthPrior(lmbd=3.3, weight=1) self.segmenting_encoder = SegmentingSeqTransducer( embed_encoder=self.segment_encoder_bilstm, segment_composer=self.segment_composer, final_transducer=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim), policy_learning=self.policy_gradient, eps_greedy=self.eps_greedy, length_prior=self.length_prior, ) self.model = DefaultTranslator( src_reader=self.src_reader, trg_reader=self.trg_reader, src_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100), encoder=self.segmenting_encoder, attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim), trg_embedder=SimpleWordEmbedder(emb_dim=layer_dim, vocab_size=100), decoder=AutoRegressiveDecoder( input_dim=layer_dim, rnn=UniLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, decoder_input_dim=layer_dim, yaml_path="decoder"), transform=AuxNonLinear(input_dim=layer_dim, output_dim=layer_dim, aux_input_dim=layer_dim), scorer=Softmax(vocab_size=100, input_dim=layer_dim), trg_embed_dim=layer_dim, bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)), ) self.model.set_train(True) self.layer_dim = layer_dim self.src_data = list( self.model.src_reader.read_sents("examples/data/head.ja")) self.trg_data = list( self.model.trg_reader.read_sents("examples/data/head.en")) my_batcher = xnmt.batcher.TrgBatcher(batch_size=3, src_pad_token=1, trg_pad_token=2) self.src, self.trg = my_batcher.pack(self.src_data, self.trg_data) dy.renew_cg(immediate_compute=True, check_validity=True) def test_reinforce_loss(self): self.model.global_fertility = 1.0 loss = self.model.calc_loss(self.src[0], self.trg[0], AutoRegressiveMLELoss()) reinforce_loss = self.model.calc_additional_loss( self.trg[0], self.model, loss) pl = self.model.encoder.policy_learning # Ensure correct length src = self.src[0] mask = src.mask.np_arr outputs = self.segmenting_encoder.compose_output actions = self.segmenting_encoder.segment_actions # Ensure sample == outputs self.assertEqual(len(outputs), pl.sample) self.assertEqual(len(actions), pl.sample) for sample_action in actions: for i, sample_item in enumerate(sample_action): # The last segmentation is 1 self.assertEqual(sample_item[-1], src[i].len_unpadded()) # Assert that all flagged actions are </s> list( self.assertEqual(pl.actions[j][0][i], 1) for j in range(len(mask[i])) if mask[i][j] == 1) self.assertTrue("mle" in loss.expr_factors) self.assertTrue("fertility" in loss.expr_factors) self.assertTrue("rl_reinf" in reinforce_loss.expr_factors) self.assertTrue("rl_baseline" in reinforce_loss.expr_factors) self.assertTrue("rl_confpen" in reinforce_loss.expr_factors) # Ensure we are sampling from the policy learning self.assertEqual(self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.POLICY) def calc_loss_single_batch(self): loss = self.model.calc_loss(self.src[0], self.trg[0], AutoRegressiveMLELoss()) reinforce_loss = self.model.calc_additional_loss( self.trg[0], self.model, loss) return loss, reinforce_loss def test_gold_input(self): self.model.encoder.policy_learning = None self.model.encoder.eps_greedy = None self.calc_loss_single_batch() self.assertEqual(self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.GOLD) @unittest.skipUnless(has_cython(), "requires cython to run") def test_sample_input(self): self.model.encoder.eps_greedy.eps_prob = 1.0 self.calc_loss_single_batch() self.assertEqual( self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.POLICY_SAMPLE) self.assertEqual(self.model.encoder.policy_learning.sampling_action, PolicyGradient.SamplingAction.PREDEFINED) def test_global_fertility(self): # Test Global fertility weight self.model.global_fertility = 1.0 self.segmenting_encoder.policy_learning = None loss1, _ = self.calc_loss_single_batch() self.assertTrue("fertility" in loss1.expr_factors) def test_policy_train_test(self): self.model.set_train(True) self.calc_loss_single_batch() self.assertEqual(self.model.encoder.policy_learning.sampling_action, PolicyGradient.SamplingAction.POLICY_CLP) self.model.set_train(False) self.calc_loss_single_batch() self.assertEqual(self.model.encoder.policy_learning.sampling_action, PolicyGradient.SamplingAction.POLICY_AMAX) def test_no_policy_train_test(self): self.model.encoder.policy_learning = None self.model.set_train(True) self.calc_loss_single_batch() self.assertEqual(self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.PURE_SAMPLE) self.model.set_train(False) self.calc_loss_single_batch() self.assertEqual(self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.PURE_SAMPLE) def test_sample_during_search(self): self.model.set_train(False) self.model.encoder.sample_during_search = True self.calc_loss_single_batch() self.assertEqual(self.model.encoder.segmenting_action, SegmentingSeqTransducer.SegmentingAction.POLICY) @unittest.skipUnless(has_cython(), "requires cython to run") def test_policy_gold(self): self.model.encoder.eps_greedy.prior = GoldInputPrior("segment") self.model.encoder.eps_greedy.eps_prob = 1.0 self.calc_loss_single_batch()