def main(): args = parseArgs() print args argv = ['', args.groundTruth, args.predictions] print "Loading data" gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv) print "# gt frames :", len(gtFramesAll) print "# pred frames:", len(prFramesAll) if (not os.path.exists(args.outputDir)): os.makedirs(args.outputDir) if (args.evalPoseEstimation): ##################################################### # evaluate per-frame multi-person pose estimation (AP) # compute AP print "Evaluation of per-frame multi-person pose estimation" apAll, preAll, recAll = evaluateAP(gtFramesAll, prFramesAll, args.outputDir, True, args.saveEvalPerSequence) # print AP print "Average Precision (AP) metric:" eval_helpers.printTable(apAll) if (args.evalPoseTracking): ##################################################### # evaluate multi-person pose tracking in video (MOTA) # compute MOTA print "Evaluation of video-based multi-person pose tracking" metricsAll, metricsMidAll = evaluateTracking(gtFramesAll, prFramesAll, args.outputDir, True, args.saveEvalPerSequence) metrics = np.zeros([Joint().count + 4, 1]) for i in range(Joint().count + 1): metrics[i, 0] = metricsAll['mota'][0, i] metrics[Joint().count + 1, 0] = metricsAll['motp'][0, Joint().count] metrics[Joint().count + 2, 0] = metricsAll['pre'][0, Joint().count] metrics[Joint().count + 3, 0] = metricsAll['rec'][0, Joint().count] # print AP print "Multiple Object Tracking (MOT) metrics:" eval_helpers.printTable(metrics, motHeader=True) eval_helpers.printStatistics(metricsMidAll)
def main(): args = parseArgs() print args argv = ['',args.groundTruth,args.predictions] print "Loading data" gtFramesAll,prFramesAll = eval_helpers.load_data_dir(argv) print "# gt frames :", len(gtFramesAll) print "# pred frames:", len(prFramesAll) if (args.evalPoseEstimation): ##################################################### # evaluate per-frame multi-person pose estimation (AP) # compute AP print "Evaluation of per-frame multi-person pose estimation" apAll,preAll,recAll = evaluateAP(gtFramesAll,prFramesAll) # print AP print "Average Precision (AP) metric:" eval_helpers.printTable(apAll) if (args.evalPoseTracking): ##################################################### # evaluate multi-person pose tracking in video (MOTA) # compute MOTA print "Evaluation of video-based multi-person pose tracking" metricsAll = evaluateTracking(gtFramesAll,prFramesAll, args.trackUpperBound) metrics = np.zeros([Joint().count + 4,1]) for i in range(Joint().count+1): metrics[i,0] = metricsAll['mota'][0,i] metrics[Joint().count+1,0] = metricsAll['motp'][0,Joint().count] metrics[Joint().count+2,0] = metricsAll['pre'][0,Joint().count] metrics[Joint().count+3,0] = metricsAll['rec'][0,Joint().count] # print AP print "Multiple Object Tracking (MOT) metrics:" eval_helpers.printTable(metrics,motHeader=True)
def evaluation(list_): # file_name fname_list = list_[0] # gt_key points gt_kps_list = list_[1] # humans humans_list = list_[2] # scores scores_list = list_[3] # gt bboxes gt_bboxes_list = list_[4] # is_visible is_visible_list = list_[5] # size size_list = list_[6] # Mean Average Precision gtFramesAll = [] prFramesAll = [] for fname, humans, scores, gt_kps, gt_bboxes in zip( fname_list, humans_list, scores_list, gt_kps_list, gt_bboxes_list): pred_list = {"annolist": []} gt_list = {"annolist": []} pred_data = {"image": [], "annorect": []} gt_data = {"image": [], "annorect": []} pred_data["image"].append(fname) gt_data["image"].append(fname) for person, score in zip(humans, scores): # head keypoints y1, x1, y2, x2 = person[0] pp = { 'x1': [x1], 'y1': [y1], 'x2': [x2], 'y2': [y2], 'score': [score[0]], 'annopoints': [{ "point": [] }] } for num in range(1, len(KEYPOINT_NAMES)): # others if num in person: # Restore center point y = (person[num][0] + person[num][2]) / 2 x = (person[num][1] + person[num][3]) / 2 s = score[num] else: y, x, s = 0, 0, 0 p = {'id': [num - 1], 'x': [x], 'y': [y], 'score': [s]} pp['annopoints'][0]['point'].append(p) pred_data['annorect'].append(pp) for person, keypoints in zip(gt_bboxes, gt_kps): cx, cy, w, h = person x1 = cx - w // 2 x2 = cx + w // 2 y1 = cy - h // 2 y2 = cy + h // 2 gtperson = { 'x1': [x1], 'y1': [y1], 'x2': [x2], 'y2': [y2], 'annopoints': [{ "point": [] }] } for idx, pt in enumerate(keypoints): gtpt = {'id': [idx], 'x': [pt[0]], 'y': [pt[1]]} gtperson['annopoints'][0]['point'].append(gtpt) gt_data['annorect'].append(gtperson) pred_list['annolist'].append(pred_data) gt_list['annolist'].append(gt_data) gtFramesAll += gt_list['annolist'] prFramesAll += pred_list['annolist'] # Poseeval code gtFramesAll, prFramesAll = eval_helpers.load_data(gtFramesAll, prFramesAll) print("# gt frames :", len(gtFramesAll)) print("# pred frames:", len(prFramesAll)) ##################################################### # evaluate per-frame multi-person pose estimation (AP) # compute AP print("Evaluation of per-frame multi-person pose estimation") apAll, preAll, recAll = evaluateAP(gtFramesAll, prFramesAll, False) # print AP print("Average Precision (AP) metric:") eval_helpers.printTable(apAll) ap_vals = eval_helpers.getCum(apAll) return ap_vals