Exemplo n.º 1
0
    def run_viterbi_estep(self, batch_token, batch_lan, batch_sen, epoch):
        batch_score, batch_root_score, batch_decision_score = self.evaluate_batch_score(
            batch_token, batch_sen, self.sentence_trans_param, epoch)
        batch_score = np.array(
            batch_score)  # log p : batch_size, sentence_length, _, v_c_num
        batch_root_score = np.array(batch_root_score)
        batch_decision_score = np.array(batch_decision_score)
        batch_pos = [s[0] for s in batch_token]
        batch_word = [s[1] for s in batch_token]
        if self.function_mask:
            batch_score = self.function_to_mask(batch_score, batch_pos)
        batch_size, sentence_length, _, v_c_num = batch_score.shape
        _, _, _, v_d_num, _ = batch_decision_score.shape
        # partial code is consistent with eisner_for_dmv.py (only child decision without root)
        batch_score = np.concatenate((np.full(
            (batch_size, 1, sentence_length, v_c_num), -np.inf), batch_score),
                                     axis=1)  # for eisner
        batch_score = np.concatenate(
            (np.full((batch_size, sentence_length + 1, 1, v_c_num),
                     -np.inf), batch_score),
            axis=2)  # for eisner
        batch_score[:, 0, 1:, 0] = batch_root_score
        batch_score[:, 0, 1:, 1] = batch_root_score
        batch_decision_score = np.concatenate(
            (np.zeros((batch_size, 1, 2, v_d_num, 2)), batch_decision_score),
            axis=1
        )  #np.concatenate((np.full((batch_size, 1, 2, v_d_num, 2), 0), batch_decision_score), axis=1)

        batch_score = batch_score.reshape(batch_size, sentence_length + 1,
                                          sentence_length + 1, 1, 1,
                                          self.cvalency)
        batch_decision_score = batch_decision_score.reshape(
            batch_size, sentence_length + 1, 1, 2, self.dvalency, 2)
        best_parse = eisner_for_dmv.batch_parse(batch_score,
                                                batch_decision_score,
                                                self.dvalency, self.cvalency)
        # update count and samples. direction of root is RIGHT, so dir is 1.
        batch_pos_add_root = np.array([[self.trans_counter.shape[0]] + list(i)
                                       for i in batch_pos])
        batch_word_add_root = np.array([[len(self.words)] + list(i)
                                        for i in batch_word])
        batch_likelihood = self.update_counter(best_parse, self.trans_counter,
                                               self.root_counter,
                                               self.decision_counter,
                                               batch_pos_add_root,
                                               batch_word_add_root, batch_sen,
                                               batch_lan)
        return batch_likelihood
Exemplo n.º 2
0
    def run_viterbi_estep(self, batch_pos, batch_words, batch_sen, trans_counter, decision_counter, lex_counter):
        batch_size = len(batch_pos)
        batch_score, batch_decision_score = self.evaluate_batch_score(batch_words, batch_pos)
        batch_score = np.array(batch_score)
        batch_decision_score = np.array(batch_decision_score)
        batch_score[:, :, 0, :, :, :] = -np.inf
        if self.specify_splitting:
            batch_score, batch_decision_score = self.mask_scores(batch_score, batch_decision_score, batch_pos)
        if self.function_mask:
            batch_score = self.function_to_mask(batch_score, batch_pos)

        best_parse = eisner_for_dmv.batch_parse(batch_score, batch_decision_score, self.dvalency, self.cvalency)

        batch_likelihood = self.update_counter(best_parse, trans_counter, decision_counter, lex_counter, batch_pos,
                                               batch_words)

        self.trans_counter = trans_counter
        return batch_likelihood
Exemplo n.º 3
0
 def do_eval(dmv_model, w2i, pos, options):
     print "===================================="
     print 'Do evaluation on development set'
     eval_sentences = utils.read_data(options.dev, True)
     dmv_model.eval()
     eval_sen_idx = 0
     eval_data_list = list()
     devpath = os.path.join(
         options.output,
         'eval_pred' + str(epoch + 1) + '_' + str(options.sample_idx))
     for s in eval_sentences:
         s_word, s_pos = s.set_data_list(w2i, pos)
         s_data_list = list()
         s_data_list.append(s_pos)
         s_data_list.append(s_word)
         s_data_list.append([eval_sen_idx])
         eval_data_list.append(s_data_list)
         eval_sen_idx += 1
     eval_batch_data = utils.construct_batch_data(eval_data_list,
                                                  options.batchsize)
     parse_results = {}
     for batch_id, one_batch in enumerate(eval_batch_data):
         eval_batch_pos,eval_batch_words, eval_batch_sen = [s[0] for s in one_batch], [s[1] for s in one_batch], \
                                                            [s[2][0] for s in one_batch]
         eval_batch_words = np.array(eval_batch_words)
         eval_batch_pos = np.array(eval_batch_pos)
         batch_score, batch_decision_score = dmv_model.evaluate_batch_score(
             eval_batch_words, eval_batch_pos)
         if options.function_mask:
             batch_score = dmv_model.function_to_mask(
                 batch_score, eval_batch_pos)
         batch_parse = eisner_for_dmv.batch_parse(batch_score,
                                                  batch_decision_score,
                                                  dmv_model.dvalency,
                                                  dmv_model.cvalency)
         for i in range(len(eval_batch_pos)):
             parse_results[eval_batch_sen[i]] = (batch_parse[0][i],
                                                 batch_parse[1][i])
     utils.eval(parse_results, eval_sentences, devpath,
                options.log + '_dev' + str(options.sample_idx), epoch)
     # utils.write_distribution(dmv_model)
     print "===================================="
Exemplo n.º 4
0
 def do_eval(dmv_model, m_model, pos, languages, language_map, epoch,
             options):
     print "===================================="
     print 'Do evaluation'
     if not options.eval_new_language:
         eval_language_set = languages.keys()
         eval_languages = languages
     else:
         eval_language_set = utils.read_language_list(options.language_path)
         eval_languages = {l: i for i, l in enumerate(eval_language_set)}
     eval_file_list = os.listdir(options.dev)
     eval_file_set = utils.get_file_set(eval_file_list, eval_language_set,
                                        False)
     eval_sentences, eval_language_map = utils.read_multiple_data(
         options.dev, eval_file_set, True)
     dmv_model.eval()
     if options.use_neural:
         m_model.eval()
     devpath = os.path.join(
         options.output,
         'eval_pred' + str(epoch + 1) + '_' + str(options.sample_idx))
     eval_data_list, _, eval_sentence_map = utils.construct_ml_pos_data(
         eval_sentences, pos, eval_languages, eval_language_map)
     eval_batch_data = utils.construct_batch_data(eval_data_list,
                                                  options.batchsize)
     parse_results = {}
     classify_results = np.zeros(len(eval_data_list))
     if options.sentence_predict and epoch > options.non_neural_iter:
         eval_trans_param = np.zeros(
             (len(eval_data_list), len(pos.keys()), len(pos.keys()), 2,
              options.c_valency))
     else:
         eval_trans_param = None
     for batch_id, one_batch in enumerate(eval_batch_data):
         eval_batch_pos, eval_batch_lan, eval_batch_sen = [
             s[0] for s in one_batch
         ], [s[1] for s in one_batch], [s[2][0] for s in one_batch]
         eval_batch_sen = np.array(eval_batch_sen)
         eval_batch_lan = np.array(eval_batch_lan)
         eval_batch_pos = np.array(eval_batch_pos)
         if (options.sentence_predict and epoch > options.non_neural_iter
             ) or options.language_predict:
             batch_rule_samples = dmv_model.find_predict_samples(
                 eval_batch_pos, eval_batch_lan, eval_batch_sen)
             batch_predict_data = utils.construct_ml_predict_data(
                 batch_rule_samples)
             batch_predict_pos_v = torch.LongTensor(
                 batch_predict_data['pos'])
             batch_predict_pos_index = np.array(batch_predict_data['pos'])
             batch_predict_dir_v = torch.LongTensor(
                 batch_predict_data['dir'])
             batch_predict_dir_index = np.array(batch_predict_data['dir'])
             batch_predict_cvalency_v = torch.LongTensor(
                 batch_predict_data['cvalency'])
             batch_predict_cvalency_index = np.array(
                 batch_predict_data['cvalency'])
             batch_predict_lan_v = torch.LongTensor(
                 batch_predict_data['languages'])
             batch_predict_lan_index = np.array(
                 batch_predict_data['languages'])
             batch_predict_sen_v = []
             for sentence_id in batch_predict_data['sentence']:
                 batch_predict_sen_v.append(eval_sentence_map[sentence_id])
             batch_predict_sen_index = np.array(
                 batch_predict_data['sentence'])
             batch_predict_sen_v = torch.LongTensor(batch_predict_sen_v)
             batch_predicted, batch_predicted_lan = m_model.forward_(
                 batch_predict_pos_v, batch_predict_dir_v,
                 batch_predict_cvalency_v, None, None, True, 'child',
                 batch_predict_lan_v, batch_predict_sen_v, None)
             if options.sentence_predict or options.language_predict:
                 # Evaluation of language pediction
                 for i in range(len(batch_predict_sen_v)):
                     sentence_idx = batch_predict_data['sentence'][i]
                     classify_results[sentence_idx] = batch_predicted_lan[i]
                 if options.sentence_predict:
                     eval_trans_param[
                         batch_predict_sen_index,
                         batch_predict_pos_index, :,
                         batch_predict_dir_index,
                         batch_predict_cvalency_index] = batch_predicted.detach(
                         ).numpy()
             else:
                 eval_trans_param[
                     batch_predict_pos_index, :, batch_predict_dir_index,
                     batch_predict_cvalency_index,
                     batch_predict_lan_index] = batch_predicted.detach(
                     ).numpy()
         batch_score, batch_decision_score = dmv_model.evaluate_batch_score(
             eval_batch_pos, eval_batch_sen, eval_language_map,
             eval_languages, eval_trans_param)
         if options.function_mask:
             batch_score = dmv_model.function_to_mask(
                 batch_score, eval_batch_pos)
         batch_score = np.expand_dims(batch_score, 3)
         batch_score = np.expand_dims(batch_score, 4)
         batch_decision_score = np.expand_dims(batch_decision_score, 2)
         batch_parse = eisner_for_dmv.batch_parse(batch_score,
                                                  batch_decision_score,
                                                  dmv_model.dvalency,
                                                  dmv_model.cvalency)
         for i in range(len(eval_batch_pos)):
             parse_results[eval_batch_sen[i]] = (batch_parse[0][i],
                                                 batch_parse[1][i])
     utils.eval_ml(parse_results, eval_sentences, devpath,
                   options.log + '_dev' + str(options.sample_idx),
                   eval_language_map, eval_languages, epoch)
     # utils.write_distribution(dmv_model)
     print "===================================="
     # language classification results
     if not options.eval_new_language and (options.sentence_predict
                                           or options.language_predict):
         correct = 0
         for i in range(len(classify_results)):
             if classify_results[i] == languages[eval_language_map[i]]:
                 correct += 1
         correct_rate = float(correct) / len(classify_results)
         print "Language classification accuracy " + str(correct_rate)
Exemplo n.º 5
0
    print('Model loaded')
    learned_model.eval()
    outpath = os.path.join(options.output, 'test_pred' + '_' + str(options.sample_idx))
    eval_sentences = utils.read_data(options.test, True)
    learned_model.eval()
    eval_sen_idx = 0
    eval_data_list = list()

    for s in eval_sentences:
        s_word, s_pos = s.set_data_list(w2i, pos)
        s_data_list = list()
        s_data_list.append(s_word)
        s_data_list.append(s_pos)
        s_data_list.append([eval_sen_idx])
        eval_data_list.append(s_data_list)
        eval_sen_idx += 1
    eval_batch_data = utils.construct_batch_data(eval_data_list, options.batchsize)
    parse_results = {}
    for batch_id, one_batch in enumerate(eval_batch_data):
        eval_batch_words, eval_batch_pos, eval_batch_sen = [s[0] for s in one_batch], [s[1] for s in one_batch], \
                                                           [s[2][0] for s in one_batch]
        eval_batch_words = np.array(eval_batch_words)
        eval_batch_pos = np.array(eval_batch_pos)
        batch_score, batch_decision_score = learned_model.evaluate_batch_score(eval_batch_words, eval_batch_pos)
        batch_parse = eisner_for_dmv.batch_parse(batch_score, batch_decision_score, learned_model.dvalency,
                                                 learned_model.cvalency)
        for i in range(len(eval_batch_pos)):
            parse_results[eval_batch_sen[i]] = (batch_parse[0][i], batch_parse[1][i])
    utils.eval(parse_results, eval_sentences, outpath,
               options.log + str(options.model_idx) + '_' + str(options.sample_idx), 0)
Exemplo n.º 6
0
    def do_eval(dmv_model, m_model, pos, options, epoch):
        print "===================================="
        print 'Do evaluation on development set'
        # eval_sentences = utils.read_data(options.dev, True)
        if not options.load_model:
            ml_sentences = utils.read_ml_corpus(options.language_path,
                                                options.dev,
                                                stc_length=15,
                                                isPredict=True,
                                                isadd=False)
        else:
            ml_sentences = utils.read_ml_corpus(options.language_path,
                                                options.dev,
                                                stc_length=15,
                                                isPredict=True,
                                                isadd=True)
        eval_sentences = ml_sentences[0]
        dmv_model.eval()
        eval_sentence_map = {}
        eval_sen_idx = 0
        eval_data_list = list()
        devpath = os.path.join(
            options.output,
            'eval_pred' + str(epoch + 1) + '_' + str(options.sample_idx))
        lang_id = languages[
            options.
            dev] if options.dev in languages else 0  # 0 is manually specified (when dev_lang is not trained before)
        for s in eval_sentences:
            _, s_pos = s.set_data_list(None, pos)
            s_data_list = list()
            s_data_list.append(s_pos)
            s_data_list.append([eval_sen_idx])
            eval_data_list.append(s_data_list)
            eval_sentence_map[eval_sen_idx] = s_pos
            eval_sen_idx += 1
        eval_batch_data = utils.construct_batch_data(eval_data_list,
                                                     options.batchsize)
        parse_results = {}
        eval_sentence_trans_param = np.zeros(
            (len(eval_data_list), len(pos.keys()), len(pos.keys()), 2,
             options.c_valency))
        for batch_id, one_batch in enumerate(eval_batch_data):
            eval_batch_pos, eval_batch_sen = [s[0] for s in one_batch
                                              ], [s[1][0] for s in one_batch]
            eval_batch_sen = np.array(eval_batch_sen)
            eval_batch_pos = np.array(eval_batch_pos)
            if dmv_model.initial_flag:
                batch_score, batch_root_score, batch_decision_score = dmv_model.evaluate_batch_score(
                    eval_batch_pos, eval_batch_sen, None, epoch)
            else:
                batch_rule_samples = dmv_model.find_predict_samples(
                    eval_batch_pos, eval_batch_sen)
                batch_predict_data = utils.construct_ml_predict_data(
                    batch_rule_samples)
                batch_predict_pos_v = torch.LongTensor(
                    batch_predict_data['pos'])
                batch_predict_pos_index = np.array(batch_predict_data['pos'])
                batch_predict_dir_v = torch.LongTensor(
                    batch_predict_data['dir'])
                batch_predict_dir_index = np.array(batch_predict_data['dir'])
                batch_predict_cvalency_v = torch.LongTensor(
                    batch_predict_data['cvalency'])
                batch_predict_cvalency_index = np.array(
                    batch_predict_data['cvalency'])
                batch_predict_sen_v = []
                for sentence_id in batch_predict_data['sentence']:
                    batch_predict_sen_v.append(eval_sentence_map[sentence_id])
                batch_predict_sen_index = np.array(
                    batch_predict_data['sentence'])
                batch_predict_sen_v = torch.LongTensor(batch_predict_sen_v)
                batch_predict_sen_len = torch.LongTensor(
                    np.array([len(i) for i in batch_predict_sen_v]))
                batch_predict_lan_v = torch.LongTensor(
                    np.array([lang_id for _ in batch_predict_sen_v]))  # TODO
                batch_predicted = m_model.forward_(batch_predict_pos_v,
                                                   batch_predict_dir_v,
                                                   batch_predict_cvalency_v,
                                                   None,
                                                   None,
                                                   True,
                                                   'child',
                                                   options.em_type,
                                                   batch_predict_lan_v,
                                                   batch_predict_sen_v,
                                                   batch_predict_sen_len,
                                                   epoch=epoch)

                eval_sentence_trans_param[
                    batch_predict_sen_index, batch_predict_pos_index, :,
                    batch_predict_dir_index,
                    batch_predict_cvalency_index] = batch_predicted.detach(
                    ).numpy()
                batch_score, batch_root_score, batch_decision_score = dmv_model.evaluate_batch_score(
                    eval_batch_pos, eval_batch_sen, eval_sentence_trans_param,
                    epoch)
            batch_size, sentence_length, _, v_c_num = batch_score.shape
            _, _, _, v_d_num, _ = batch_decision_score.shape

            batch_score = np.concatenate(
                (np.full((batch_size, 1, sentence_length, v_c_num),
                         -np.inf), batch_score),
                axis=1)  # for eisner
            batch_score = np.concatenate(
                (np.full((batch_size, sentence_length + 1, 1, v_c_num),
                         -np.inf), batch_score),
                axis=2)  # for eisner
            batch_score[:, 0, 1:, 0] = batch_root_score
            batch_decision_score = np.concatenate((np.zeros(
                (batch_size, 1, 2, v_d_num, 2)), batch_decision_score),
                                                  axis=1)

            batch_score = np.expand_dims(batch_score, 3)
            batch_score = np.expand_dims(batch_score, 4)
            batch_decision_score = np.expand_dims(batch_decision_score, 2)
            batch_parse = eisner_for_dmv.batch_parse(batch_score,
                                                     batch_decision_score,
                                                     dmv_model.dvalency,
                                                     dmv_model.cvalency)
            for i in range(len(eval_batch_pos)):
                parse_results[eval_batch_sen[i]] = (batch_parse[0][i],
                                                    batch_parse[1][i])
        utils.eval(parse_results, eval_sentences, devpath,
                   options.log + '_dev' + str(options.sample_idx), epoch)
        # utils.write_distribution(dmv_model)
        print "===================================="
Exemplo n.º 7
0
    def do_eval(dmv_model, m_model, pos, words, options, epoch):
        print("====================================")
        print('Do evaluation on development set')
        # eval_sentences = utils.read_data(options.dev, True)
        if not options.load_model:
            ml_sentences = utils.read_ml_corpus(
                options.language_path,
                options.dev,
                stc_length=options.val_stc_length,
                isPredict=True,
                isadd=False)
        else:
            ml_sentences = utils.read_ml_corpus(
                options.language_path,
                options.dev,
                stc_length=options.val_stc_length,
                isPredict=True,
                isadd=True)
        eval_sentences = ml_sentences[0]
        dmv_model.eval()
        eval_sentence_map = {}
        eval_sentence_word_map = {}
        eval_sen_idx = 0
        eval_data_list = list()
        devpath = os.path.join(
            options.output,
            'eval_pred' + str(epoch + 1) + '_' + str(options.sample_idx))
        lang_id = languages[
            options.
            dev] if options.dev in languages else 0  # 0 is manually specified (when dev_lang is not trained before)
        for s in eval_sentences:
            s_word, s_pos = s.set_data_list(words, pos)
            s_data_list = list()
            s_data_list.append([s_pos, s_word])
            s_data_list.append([eval_sen_idx])
            eval_data_list.append(s_data_list)
            eval_sentence_map[eval_sen_idx] = s_pos
            eval_sentence_word_map[eval_sen_idx] = s_word
            eval_sen_idx += 1
        eval_batch_data = utils.construct_batch_data(eval_data_list,
                                                     options.batchsize)
        eval_sentence_trans_param = np.zeros(
            (len(eval_data_list), options.val_stc_length,
             options.val_stc_length, options.c_valency))
        parse_results = {}
        for batch_id, one_batch in enumerate(eval_batch_data):
            eval_batch_token, eval_batch_sen = [s[0] for s in one_batch], [
                s[1][0] for s in one_batch
            ]
            eval_batch_sen = np.array(eval_batch_sen)
            eval_batch_token = np.array(eval_batch_token)
            if dmv_model.initial_flag:
                batch_score, batch_root_score, batch_decision_score = dmv_model.evaluate_batch_score(
                    eval_batch_token, eval_batch_sen, None, epoch)
            else:
                # batch_rule_samples = dmv_model.find_predict_samples(eval_batch_token, eval_batch_sen)
                # batch_predict_data = utils.construct_ml_predict_data(batch_rule_samples)
                # batch_predict_pos_v = torch.LongTensor(batch_predict_data['pos'])
                # batch_predict_pos_index = np.array(batch_predict_data['pos'])
                # batch_predict_dir_v = torch.LongTensor(batch_predict_data['dir'])
                # batch_predict_dir_index = np.array(batch_predict_data['dir'])
                # batch_predict_cvalency_v = torch.LongTensor(batch_predict_data['cvalency'])
                # batch_predict_cvalency_index = np.array(batch_predict_data['cvalency'])
                # batch_predict_sen_v = []
                # for sentence_id in batch_predict_data['sentence']:
                #     batch_predict_sen_v.append(eval_sentence_map[sentence_id])
                # batch_predict_sen_index = np.array(batch_predict_data['sentence'])
                # batch_predict_sen_v = torch.LongTensor(batch_predict_sen_v)
                # batch_predict_sen_len = torch.LongTensor(np.array([len(i) for i in batch_predict_sen_v]))
                # batch_predict_lan_v = torch.LongTensor(np.array([lang_id for _ in batch_predict_sen_v]))  # TODO
                # batch_predicted, batch_word_predicted = m_model.forward_(batch_predict_pos_v, batch_predict_dir_v, batch_predict_cvalency_v,
                #                                    None, None, None, True, 'child', options.em_type, batch_predict_lan_v, batch_predict_sen_v,
                #                                    batch_predict_sen_len, epoch=epoch)
                # eval_sentence_trans_param[batch_predict_sen_index, batch_predict_word_index, batch_predict_output_word_index, batch_predict_cvalency_index] = batch_predicted.detach().numpy()
                # batch_score, batch_root_score, batch_decision_score = dmv_model.evaluate_batch_score(eval_batch_token, eval_batch_sen, eval_sentence_trans_param, epoch)

                for i in range(len(one_batch)):
                    sentence_map = eval_sentence_map
                    sentence_word_map = eval_sentence_word_map
                    cvalency = options.c_valency
                    s = eval_batch_sen[i]
                    batch_target_lan_v = torch.LongTensor([lang_id]).expand(
                        len(sentence_map[s])**2)  # TODO hanwj
                    batch_input_len = torch.LongTensor([len(
                        sentence_map[s])]).expand(len(sentence_map[s])**2)
                    batch_input_sen_v = torch.LongTensor([
                        sentence_map[s]
                    ]).expand(len(sentence_map[s])**2, len(sentence_map[s]))
                    batch_input_sen_v_mlist = torch.LongTensor(
                        [sentence_word_map[s]]).expand(
                            len(sentence_word_map[s])**2,
                            len(sentence_word_map[s]))
                    one_batch_input_pos = torch.LongTensor([
                        sentence_map[s][h] for h in range(len(sentence_map[s]))
                        for _ in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_input_word = torch.LongTensor([
                        sentence_word_map[s][h]
                        for h in range(len(sentence_word_map[s]))
                        for c in range(len(sentence_word_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_output_word = torch.LongTensor([
                        sentence_word_map[s][c]
                        for h in range(len(sentence_word_map[s]))
                        for c in range(len(sentence_word_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_output_pos = torch.LongTensor([
                        sentence_map[s][c] for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_dir = torch.LongTensor([
                        1 if h < c else 0 for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_cvalency = torch.LongTensor([
                        v for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_input_tag_index = np.array([
                        h for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_input_word_index = np.array([
                        h for h in range(len(sentence_word_map[s]))
                        for c in range(len(sentence_word_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_output_tag_index = np.array([
                        c for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_output_word_index = np.array([
                        c for h in range(len(sentence_word_map[s]))
                        for c in range(len(sentence_word_map[s]))
                        for v in range(cvalency)
                    ])
                    one_batch_cvalency_index = np.array([
                        v for h in range(len(sentence_map[s]))
                        for c in range(len(sentence_map[s]))
                        for v in range(cvalency)
                    ])
                    predicted_trans_param, predicted_trans_param_word = m_model.forward_(
                        one_batch_input_pos,
                        one_batch_input_word,
                        one_batch_input_word_index,
                        one_batch_dir,
                        one_batch_cvalency,
                        None,
                        None,
                        None,
                        True,
                        'child',
                        options.em_type,
                        batch_target_lan_v,
                        batch_input_sen_v_mlist,
                        batch_input_sen_v,
                        batch_input_len,
                        epoch=epoch)
                    # eval_sentence_trans_param[eval_batch_sen[s]][one_batch_input_word_index, one_batch_output_word_index, one_batch_cvalency_index] = predicted_trans_param_word.detach().numpy()[range((len(sentence_map[
                    #                                                       s]) ** 2) * cvalency), one_batch_output_word]  # .reshape(one_batch_size, target_pos_num, 1, 1)
                    if options.full_lex:
                        eval_sentence_trans_param[s][
                            one_batch_input_word_index, one_batch_output_word_index, one_batch_cvalency_index] = \
                        predicted_trans_param_word.detach().numpy()[range((len(sentence_word_map[
                                                                                   s]) ** 2) * cvalency), one_batch_output_word]  # .reshape(one_batch_size, target_pos_num, 1, 1)
                    else:
                        eval_sentence_trans_param[s][
                            one_batch_input_tag_index, one_batch_output_tag_index, one_batch_cvalency_index] = \
                        predicted_trans_param.detach().numpy()[range((len(sentence_map[
                                                                              s]) ** 2) * cvalency), one_batch_output_pos]  # .reshape(one_batch_size, target_pos_num, 1, 1)

                    batch_score, batch_root_score, batch_decision_score = dmv_model.evaluate_batch_score(
                        eval_batch_token, eval_batch_sen,
                        eval_sentence_trans_param, epoch)

            batch_size, sentence_length, _, v_c_num = batch_score.shape
            _, _, _, v_d_num, _ = batch_decision_score.shape

            batch_score = np.concatenate(
                (np.full((batch_size, 1, sentence_length, v_c_num),
                         -np.inf), batch_score),
                axis=1)  # for eisner
            batch_score = np.concatenate(
                (np.full((batch_size, sentence_length + 1, 1, v_c_num),
                         -np.inf), batch_score),
                axis=2)  # for eisner
            batch_score[:, 0, 1:, 0] = batch_root_score
            batch_decision_score = np.concatenate((np.zeros(
                (batch_size, 1, 2, v_d_num, 2)), batch_decision_score),
                                                  axis=1)

            batch_score = np.expand_dims(batch_score, 3)
            batch_score = np.expand_dims(batch_score, 4)
            batch_decision_score = np.expand_dims(batch_decision_score, 2)
            batch_parse = eisner_for_dmv.batch_parse(batch_score,
                                                     batch_decision_score,
                                                     dmv_model.dvalency,
                                                     dmv_model.cvalency)
            for i in range(len(eval_batch_token)):
                parse_results[eval_batch_sen[i]] = (batch_parse[0][i],
                                                    batch_parse[1][i])
        utils.eval(parse_results, eval_sentences, devpath,
                   options.log + '_dev' + str(options.sample_idx), epoch)
        # utils.write_distribution(dmv_model)
        print("====================================")