Beispiel #1
0
def get_eval_string(true_prediction):
  """
  Given a list of (gold, prediction)s, generate output string.
  """
  count, pred_count, avg_pred_count, p, r, f1 = eval_metric.micro(true_prediction)
  _, _, _, ma_p, ma_r, ma_f1 = eval_metric.macro(true_prediction)
  output_str = "Eval: {0} {1} {2:.3f} P:{3:.3f} R:{4:.3f} F1:{5:.3f} Ma_P:{6:.3f} Ma_R:{7:.3f} Ma_F1:{8:.3f}".format(
    count, pred_count, avg_pred_count, p, r, f1, ma_p, ma_r, ma_f1)
  accuracy = sum([set(y) == set(yp) for y, yp in true_prediction]) * 1.0 / len(true_prediction)
  output_str += '\t Dev accuracy: {0:.1f}%'.format(accuracy * 100)
  return output_str
Beispiel #2
0
def metric_dicts(true_prediction):
    count, pred_count, avg_pred_count, p, r, f1 = eval_metric.micro(
        true_prediction)
    _, _, _, ma_p, ma_r, ma_f1 = eval_metric.macro(true_prediction)
    output_str = "Eval: {0} {1} {2:.3f} P:{3:.3f} R:{4:.3f} F1:{5:.3f} Ma_P:{6:.3f} Ma_R:{7:.3f} Ma_F1:{8:.3f}".format(
        count, pred_count, avg_pred_count, p, r, f1, ma_p, ma_r, ma_f1)
    accuracy = sum([set(y) == set(yp)
                    for y, yp in true_prediction]) * 1.0 / len(true_prediction)
    output_str += '\t Dev accuracy: {0:.1f}%'.format(accuracy * 100)
    result = {
        "precision": p,
        "recall": r,
        'f1': f1,
        "ma_precision": ma_p,
        "ma_recall": ma_r,
        "ma_f1": ma_f1,
        "accu": accuracy
    }
    return result, output_str