def Evaluation_all(gold_label, predict_label): binary_alphabet = Alphabet() for i in range(20): binary_alphabet.add(DICT_INDEX_TO_LABEL[i]) cm = ConfusionMatrix(binary_alphabet) cm.add_list(predict_label, gold_label) macro_p, macro_r, macro_f1 = cm.get_average_prf() overall_accuracy = cm.get_accuracy() return overall_accuracy, macro_p, macro_r, macro_f1
def Evaluation(gold_file_path, predict_file_path): with open(gold_file_path) as gold_file, open(predict_file_path) as predict_file: gold_list = [int(line.strip().split('\t')[0]) for line in gold_file] predicted_list = [int(line.strip().split("\t")[0]) for line in predict_file] predict_labels = [config.id2category[int(predict)] for predict in predicted_list] gold_labels = [config.id2category[int(gold)] for gold in gold_list] binary_alphabet = Alphabet() for i in range(20): binary_alphabet.add(DICT_INDEX_TO_LABEL[i]) cm = ConfusionMatrix(binary_alphabet) cm.add_list(predict_labels, gold_labels) confusion_matrix(gold_list, predicted_list) cm.print_summary() macro_p, macro_r, macro_f1 = cm.get_average_prf() overall_accuracy = cm.get_accuracy() return overall_accuracy, macro_p, macro_r, macro_f1
def Evaluation(gold_file_path, predict_file_path): with open(gold_file_path) as gold_file, open(predict_file_path) as predict_file: gold_list = [ line.strip().split('\t')[0] for line in gold_file] predicted_list = [line.strip().split("\t#\t")[0] for line in predict_file] binary_alphabet = Alphabet() for i in range(18): binary_alphabet.add(DICT_INDEX_TO_LABEL[i]) cm = ConfusionMatrix(binary_alphabet) cm.add_list(predicted_list, gold_list) cm.print_out() macro_p, macro_r, macro_f1 = cm.get_average_prf() overall_accuracy = cm.get_accuracy() return overall_accuracy, macro_p, macro_r, macro_f1