def evaluate_model(config: Config, model: NNCRF, batch_insts_ids, name: str, insts: List[Instance]): ## evaluation metrics = np.asarray([0, 0, 0], dtype=int) batch_id = 0 batch_size = config.batch_size for batch in batch_insts_ids: one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) * batch_size] batch_max_scores, batch_max_ids = model.decode(batch) metrics += evaluate_batch_insts(batch_insts=one_batch_insts, batch_pred_ids=batch_max_ids, batch_gold_ids=batch[-1], word_seq_lens=batch[1], idx2label=config.idx2labels) batch_id += 1 p, total_predict, total_entity = metrics[0], metrics[1], metrics[2] precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0 recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0 fscore = 2.0 * precision * recall / ( precision + recall) if precision != 0 or recall != 0 else 0 print("[%s set] Precision: %.2f, Recall: %.2f, F1: %.2f" % (name, precision, recall, fscore), flush=True) return [precision, recall, fscore]
def predict_with_constraints(config: Config, model: NNCRF, fold_batches: List[Tuple], folded_insts: List[Instance]): batch_id = 0 batch_size = config.batch_size model.eval() for batch in fold_batches: one_batch_insts = folded_insts[batch_id * batch_size:(batch_id + 1) * batch_size] word_seq_lens = batch[1].cpu().numpy() if config.variant == "hard": with torch.no_grad(): batch_max_scores, batch_max_ids = model.decode(batch) batch_max_ids = batch_max_ids.cpu().numpy() for idx in range(len(batch_max_ids)): length = word_seq_lens[idx] prediction = batch_max_ids[idx][:length].tolist() prediction = prediction[::-1] one_batch_insts[idx].output_ids = prediction else: ## means soft model, assign soft probabilit with torch.no_grad(): marginals = model.get_marginal(batch) marginals = marginals.cpu().numpy() for idx in range(len(marginals)): length = word_seq_lens[idx] one_batch_insts[idx].marginals = marginals[idx, :length, :] batch_id += 1
def hard_constraint_predict(config: Config, model: NNCRF, fold_batches: List[Tuple], folded_insts: List[Instance], model_type: str = "hard"): batch_id = 0 batch_size = config.batch_size model.eval() for batch in fold_batches: one_batch_insts = folded_insts[batch_id * batch_size:(batch_id + 1) * batch_size] _, batch_max_ids = model.decode(batch) batch_max_ids = batch_max_ids.cpu().numpy() word_seq_lens = batch[1].cpu().numpy() for idx in range(len(batch_max_ids)): length = word_seq_lens[idx] prediction = batch_max_ids[idx][:length].tolist() prediction = prediction[::-1] one_batch_insts[idx].output_ids = prediction batch_id += 1
def evaluate_model(config: Config, model: NNCRF, batch_insts_ids, name: str, insts: List[Instance]): ## evaluation p_dict, total_predict_dict, total_entity_dict = Counter(), Counter( ), Counter() batch_id = 0 batch_size = config.batch_size for batch in batch_insts_ids: one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) * batch_size] batch_max_scores, batch_max_ids = model.decode(batch) batch_p, batch_predict, batch_total = evaluate_batch_insts( one_batch_insts, batch_max_ids, batch[-1], batch[1], config.idx2labels, config.use_crf_layer) p_dict += batch_p total_predict_dict += batch_predict total_entity_dict += batch_total batch_id += 1 for key in total_entity_dict: precision_key, recall_key, fscore_key = get_metric( p_dict[key], total_entity_dict[key], total_predict_dict[key]) print("[%s] Prec.: %.2f, Rec.: %.2f, F1: %.2f" % (key, precision_key, recall_key, fscore_key)) if key == config.new_type: precision_new_type, recall_new_type, fscore_new_type = get_metric( p_dict[key], total_entity_dict[key], total_predict_dict[key]) total_p = sum(list(p_dict.values())) total_predict = sum(list(total_predict_dict.values())) total_entity = sum(list(total_entity_dict.values())) precision, recall, fscore = get_metric(total_p, total_entity, total_predict) print(colored( "[%s set Total] Prec.: %.2f, Rec.: %.2f, F1: %.2f" % (name, precision, recall, fscore), 'blue'), flush=True) if config.choose_by_new_type: return [precision_new_type, recall_new_type, fscore_new_type] else: return [precision, recall, fscore]
def evaluate_model(config: Config, model: NNCRF, batch_insts_ids, name: str, insts: List[Instance]): ## evaluation metrics_exact = np.asarray([0, 0, 0], dtype=int) metrics_overlap = np.asarray([0, 0, 0], dtype=int) dict_exact = {} dict_overlap = {} batch_id = 0 batch_size = config.batch_size for batch in batch_insts_ids: one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) * batch_size] batch_max_scores, batch_max_ids = model.decode(batch) results = evaluate_batch_insts(one_batch_insts, batch_max_ids, batch[-1], batch[1], config.idx2labels) metrics_exact += results[0] metrics_overlap += results[1] for key in results[2]: if key not in dict_exact: dict_exact[key] = [0, 0, 0] dict_exact[key][0] += results[2][key][0] dict_exact[key][1] += results[2][key][1] dict_exact[key][2] += results[2][key][2] for key in results[3]: if key not in dict_overlap: dict_overlap[key] = [0, 0, 0] dict_overlap[key][0] += results[3][key][0] dict_overlap[key][1] += results[3][key][1] dict_overlap[key][2] += results[3][key][2] batch_id += 1 p_exact, total_predict, total_entity = metrics_exact[0], metrics_exact[ 1], metrics_exact[2] precision_exact = p_exact * 1.0 / total_predict * 100 if total_predict != 0 else 0 recall_exact = p_exact * 1.0 / total_entity * 100 if total_entity != 0 else 0 fscore_exact = 2.0 * precision_exact * recall_exact / ( precision_exact + recall_exact) if precision_exact != 0 or recall_exact != 0 else 0 print("[%s set - Exact] Precision: %.2f, Recall: %.2f, F1: %.2f" % (name, precision_exact, recall_exact, fscore_exact), flush=True) #print_report(dict_exact) p_overlap, total_predict, total_entity = metrics_overlap[ 0], metrics_overlap[1], metrics_overlap[2] precision_overlap = p_overlap * 1.0 / total_predict * 100 if total_predict != 0 else 0 recall_overlap = p_overlap * 1.0 / total_entity * 100 if total_entity != 0 else 0 fscore_overlap = 2.0 * precision_overlap * recall_overlap / ( precision_overlap + recall_overlap) if precision_overlap != 0 or recall_overlap != 0 else 0 print("[%s set - Overlap] Precision: %.2f, Recall: %.2f, F1: %.2f" % (name, precision_overlap, recall_overlap, fscore_overlap), flush=True) #print_report(dict_overlap) return [precision_exact, recall_exact, fscore_exact], [precision_overlap, recall_overlap, fscore_overlap], dict_exact, dict_overlap
def evaluate_model(config: Config, model: NNCRF, batch_insts_ids, name: str, insts: List[Instance]): ## evaluation i = 0 metrics = np.asarray([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], dtype=int) batch_id = 0 batch_size = config.batch_size for batch in batch_insts_ids: i += 1 flag = 0 one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) * batch_size] batch_max_scores, batch_max_ids = model.decode(batch) if i == len(batch_insts_ids) - 1: flag = 1 metrics += evaluate_batch_insts(one_batch_insts, batch_max_ids, batch[-1], batch[1], config.idx2labels, config.use_crf_layer, config.test_kind, flag) batch_id += 1 p, p_special, total_predict, total_entity, special_predict, special_entity = metrics[ 0], metrics[1], metrics[2], metrics[3], metrics[4], metrics[5] wrong_prediction = {} wrong_prediction["BLater"] = metrics[6] wrong_prediction["BEarlier"] = metrics[7] wrong_prediction["ILater"] = metrics[8] wrong_prediction["IEarlier"] = metrics[9] wrong_prediction["O2misc"] = metrics[10] wrong_prediction["misc2O"] = metrics[11] wrong_prediction[1] = metrics[12] wrong_prediction[2] = metrics[13] wrong_prediction[3] = metrics[14] wrong_prediction[4] = metrics[15] wrong_prediction[5] = metrics[16] wrong_prediction[6] = metrics[17] wrong_prediction[7] = metrics[18] wrong_prediction["length1"] = metrics[19] wrong_prediction["length2"] = metrics[20] wrong_prediction["length3"] = metrics[21] wrong_prediction["length4"] = metrics[22] wrong_prediction["length5"] = metrics[23] wrong_prediction["length6"] = metrics[24] wrong_prediction["length7"] = metrics[25] precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0 recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0 fscore = 2.0 * precision * recall / ( precision + recall) if precision != 0 or recall != 0 else 0 precision_special = p_special * 1.0 / special_predict * 100 if special_predict != 0 else 0 recall_special = p_special * 1.0 / special_entity * 100 if special_entity != 0 else 0 fscore_special = 2.0 * precision_special * recall_special / (precision_special + recall_special) \ if precision_special != 0 or recall_special != 0 else 0 print("---[%s set] Precision: %.2f, Recall: %.2f, F1: %.2f" % (name, precision, recall, fscore), flush=True) print("---[%s of %s set] Precision: %.2f, Recall: %.2f, F1: %.2f" % (config.test_kind, name, precision_special, recall_special, fscore_special), flush=True) print(p_special, special_entity, special_predict) for inn in wrong_prediction.keys(): if str(inn).startswith("length"): print(wrong_prediction[inn], end=" ") print() print(wrong_prediction) if name == "test": print() return [precision, recall, fscore]