def predict(args): """ predicts answers for test files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') # with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: with open(args.vocab_path, 'rb') as fin: vocab = pickle.load(fin) assert len(args.test_files) > 0, 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def predict(args): """ 预测测试文件的答案 """ logger = logging.getLogger("brc") logger.info('加载数据集和词汇表...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert len(args.test_files) > 0, '找不到测试文件.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('把文本转化为id序列...') brc_data.convert_to_ids(vocab) logger.info('重载模型...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('预测测试集的答案...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def predict(args): """ predicts answers for test files """ logger = logging.getLogger("Military AI") logger.info('Load data_set and vocab...') mai_data = MilitaryAiDataset(args.train_files, args.train_raw_files, args.test_files, args.test_raw_files, args.char_embed_file, args.token_embed_file, args.elmo_dict_file, args.elmo_embed_file, char_min_cnt=1, token_min_cnt=3) logger.info('Assigning embeddings...') if not args.use_embe: mai_data.token_vocab.randomly_init_embeddings(args.embed_size) mai_data.char_vocab.randomly_init_embeddings(args.embed_size) logger.info('Restoring the model...') rc_model = RCModel(mai_data.char_vocab, mai_data.token_vocab, mai_data.flag_vocab, mai_data.elmo_vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo + args.suffix) logger.info('Predicting answers for test set...') test_batches = mai_data.gen_mini_batches('test', args.batch_size, shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def train(args): """ trains the reading comprehension model """ logger = logging.getLogger("brc") logger.info('Loading vocab...') with open(os.path.join(args.vocab_dir, 'vocab.pkl'), 'rb') as fin: vocab = pickle.load(fin) fin.close() pad_id = vocab.get_id(vocab.pad_token) brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.prepared_dir, args.train_files, args.dev_files, args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) g = tf.Graph() with g.as_default(): rc_model = RCModel(vocab.embeddings, pad_id, args) del vocab # Train with tf.name_scope("Train"): logger.info('Training the model...') rc_model.train(brc_data, args.epochs, args.batch_size, save_dir=args.result_dir, save_prefix='test.predicted', dropout_keep_prob=args.dropout_keep_prob) tf.summary.FileWriter(args.summary_dir, g).close() with tf.name_scope('Valid'): assert len(args.dev_files) > 0, 'No dev files are provided.' logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=pad_id, shuffle=False) dev_loss, dev_bleu_rouge = rc_model.evaluate( dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format( os.path.join(args.result_dir))) with tf.name_scope('Test'): assert len(args.test_files) > 0, 'No test files are provided.' logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=pad_id, shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def evaluate(args): """ evaluate the trained model on dev files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') # with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: with open(args.vocab_path, 'rb') as fin: vocab = pickle.load(fin) assert len(args.dev_files) > 0, 'No dev files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) dev_loss, dev_bleu_rouge = rc_model.evaluate(dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format( os.path.join(args.result_dir)))
def evaluate(args): """ 对训练好的模型进行验证 """ logger = logging.getLogger("brc") logger.info('加wudi...') logger.info('加载数据集和词汇表...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert len(args.dev_files) > 0, '找不到验证文件.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('把文本转化为id序列...') brc_data.convert_to_ids(vocab) logger.info('重载模型...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('验证模型...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) dev_loss, dev_bleu_rouge = rc_model.evaluate(dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('验证集上的损失为: {}'.format(dev_loss)) logger.info('验证集的结果: {}'.format(dev_bleu_rouge)) logger.info('预测的答案证保存到 {}'.format(os.path.join(args.result_dir)))
def predict(args): """ predicts answers for test files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert len(args.test_files) > 0, 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def predict(args): """ predicts answers for test files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert len(args.test_files) > 0, 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.max_word_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) rc_model.finalize() # 增加完所有操作后采用sess.graph.finalize() # 来使得整个graph变为只读的 # 注意:tf.train.Saver() # 也算是往graph中添加node, 所以也必须放在finilize前 # 但是,,tf.train.Saver() # 只会存储 # 在该Saver声明时已经存在的变量!!! logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def evaluate(args): """ evaluate the trained model on dev files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert len(args.dev_files) > 0, 'No dev files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) dev_loss, dev_bleu_rouge = rc_model.evaluate( dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format(os.path.join(args.result_dir)))
def evaluate(args): """ evaluate the trained model on dev files """ logger = logging.getLogger("Military AI") logger.info('Load data_set and vocab...') mai_data = MilitaryAiDataset(args.train_files, args.train_raw_files, args.test_files, args.test_raw_files, args.char_embed_file, args.token_embed_file, args.elmo_dict_file, args.elmo_embed_file, char_min_cnt=1, token_min_cnt=3) logger.info('Assigning embeddings...') if not args.use_embe: mai_data.token_vocab.randomly_init_embeddings(args.embed_size) mai_data.char_vocab.randomly_init_embeddings(args.embed_size) logger.info('Restoring the model...') rc_model = RCModel(mai_data.char_vocab, mai_data.token_vocab, mai_data.flag_vocab, mai_data.elmo_vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo + args.suffix) logger.info('Evaluating the model on dev set...') dev_batches = mai_data.gen_mini_batches('dev', args.batch_size, shuffle=False) dev_loss, dev_main_loss, dev_bleu_rouge = rc_model.evaluate( dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_main_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format( os.path.join(args.result_dir)))
def predict(args): """ predicts answers for test files """ logger = logging.getLogger("brc") logger.info('Load data_set and vocab...') with open(args.vocab_path, 'rb') as fin: vocab = pickle.load(fin) assert len(args.test_files) > 0, 'No test files are provided.' brc_data = BRCDataset(args.algo, args.max_p_num, args.max_p_len, args.max_q_len, args.max_a_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id( vocab.pad_token), shuffle=False) if args.algo == 'YESNO': qa_resultPath = args.test_files[0] #只会有一个文件! (filepath, tempfilename) = os.path.split(qa_resultPath) (qarst_filename, extension) = os.path.splitext(tempfilename) result_prefix = qarst_filename else: result_prefix = 'test.predicted.qa' rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix=result_prefix) if args.algo == 'YESNO': #将YESNO结果合并入QA结果 qa_resultPath = args.test_files[0] #只会有一个文件! yesno_resultPath = args.result_dir + '/' + result_prefix + '.YESNO.json' out_file_path = args.result_dir + '/' + result_prefix + '.134.class.' + str( args.run_id) + '.json' #首先载入YESNO部分的预测结果 yesno_records = {} with open(yesno_resultPath, 'r') as f_in: for line in f_in: sample = json.loads(line) yesno_records[sample['question_id']] = line total_rst_num = 0 with open(qa_resultPath, 'r') as f_in: with open(out_file_path, 'w') as f_out: for line in f_in: total_rst_num += 1 sample = json.loads(line) if sample['question_id'] in yesno_records: line = yesno_records[sample['question_id']] f_out.write(line) print('total rst num : ', total_rst_num) print('yes no label combining done!')