def test_save(model, config, tf_transition_params_test, dataset): step = 1 ans = [] t = 0 if dataset == "test": iters = model.testing_iters else: iters = model.deving_iters tp = 0 fn = 0 fp = 0 model.qa.batch_index_dev = 0 model.qa.batch_index_test = 0 while (step - 1) * config.batch_size < iters: x_lens, test_x, y = model.qa.load_data(config.batch_size, dataset) t += len(x_lens) tf_unary_scores = model.sess.run(model.unary_scores, feed_dict={model.question_ids: test_x, model.x_lens: x_lens}) for tf_unary_scores_, y_, sequence_lengths_t_ in zip(tf_unary_scores, y, x_lens): tf_unary_scores_ = tf_unary_scores_[:sequence_lengths_t_] viterbi_sequence, _ = tf.contrib.crf.viterbi_decode( tf_unary_scores_, tf_transition_params_test) ans.append(viterbi_sequence) for i, p in enumerate(viterbi_sequence): if p == 1 and y_[i] == 1: tp += 1 elif p == 0 and y_[i] == 1: fn += 1 elif p == 1 and y_[i] == 0: fp += 1 step += 1 pred = tp * 1.0 / (tp + fp) recall = tp * 1.0 / (tp + fn) f1 = 2 * pred * recall / (pred + recall) print("{} pred: {} recall: {} f1: {}".format(dataset, pred, recall, f1)) questions = model.qa.get_questions(dataset) print("ans len:\t", t) print("question:\t", len(questions)) output = [] for i in range(len(questions)): output.append("qid: {}".format(i)) output.append(FileUtil.list2str(label2entity(questions[i], ans[i]), split="\t")) FileUtil.writeFile(output, "{}/sq.{}.label".format(config.answer_path, dataset))