def run_eval(image_dir, anno_dir, vis_dir, image_list_txt, model, preprocess): """Run the evaluation on the test set and report mAP score :param model: the model to test :returns: float, the reported mAP score """ # This txt file is fount in the caffe_rtpose repository: # https://github.com/CMU-Perceptual-Computing-Lab/caffe_rtpose/blob/master img_ids, img_paths, img_heights, img_widths = get_coco_val( image_list_txt) # img_ids = img_ids[81:82] # img_paths = img_paths[81:82] print("Total number of validation images {}".format(len(img_ids))) # iterate all val images outputs = [] print("Processing Images in validation set") for i in range(len(img_ids)): if i % 10 == 0 and i != 0: print("Processed {} images".format(i)) oriImg = cv2.imread(os.path.join(image_dir, 'val2014/' + img_paths[i])) # Get the shortest side of the image (either height or width) shape_dst = np.min(oriImg.shape[0:2]) # Get results of original image multiplier = get_multiplier(oriImg) orig_paf, orig_heat = get_outputs( multiplier, oriImg, model, preprocess) # Get results of flipped image swapped_img = oriImg[:, ::-1, :] flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img, model, preprocess) # compute averaged heatmap and paf paf, heatmap = handle_paf_and_heat( orig_heat, flipped_heat, orig_paf, flipped_paf) # choose which post-processing to use, our_post_processing # got slightly higher AP but is slow. param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5} canvas, to_plot, candidate, subset = decode_pose( oriImg, param, heatmap, paf) vis_path = os.path.join(vis_dir, img_paths[i]) cv2.imwrite(vis_path, to_plot) # subset indicated how many peoples foun in this image. append_result(img_ids[i], subset, candidate, outputs) # cv2.imshow('test', canvas) # cv2.waitKey(0) # Eval and show the final result! return eval_coco(outputs=outputs, dataDir=anno_dir, imgIds=img_ids)
def viz_keypoints(oriImg, p_heatmap, p_paf, name): # multiplier = get_multiplier(oriImg, 368.) # scale_search = [1.] # multiplier = [x * 481. / float(oriImg.shape[0]) for x in scale_search] multiplier = [1.] paf, heatmap = _get_outputs(multiplier, oriImg, p_heatmap, p_paf, numkeypoints=NUM_KEYPOINTS, numlims=NUM_LIMBS) # Given a (grayscale) image, find local maxima whose value is above a given threshold (param['thre1']) # Criterion 1: At least 80% of the intermediate points have a score higher than thre2 param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5} to_plot, canvas, joint_list, person_to_joint_assoc = decode_pose(oriImg, param, heatmap, paf) cv2.imwrite(name, canvas)
def extract_pose(video, oriImg, name, filestream, model): # Get results of original image multiplier = get_multiplier(oriImg) with torch.no_grad(): paf, heatmap = get_outputs(multiplier, oriImg, model, 'rtpose') pose_cords = compute_cordinates(heatmap, paf, oriImg) # coordinate print("{}: {}: {}".format( str(name) + ".jpg", str(list(pose_cords[:, 0])), str(list(pose_cords[:, 1]))), file=filestream) filestream.flush() canvas, to_plot, candidate, subset = decode_pose(oriImg, param, heatmap, paf) save_result(video, oriImg, to_plot, name)
def process(model, oriImg, process_speed): # Get results of original image multiplier = get_multiplier(oriImg, process_speed) with torch.no_grad(): orig_paf, orig_heat = get_outputs(multiplier, oriImg, model, 'rtpose') # Get results of flipped image swapped_img = oriImg[:, ::-1, :] flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img, model, 'rtpose') # compute averaged heatmap and paf paf, heatmap = handle_paf_and_heat(orig_heat, flipped_heat, orig_paf, flipped_paf) param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5} to_plot, canvas, joint_list, person_to_joint_assoc = decode_pose( oriImg, param, heatmap, paf) return to_plot, canvas, joint_list, person_to_joint_assoc
model = get_model(trunk='vgg19') model = torch.nn.DataParallel(model).cuda() model.load_state_dict(torch.load(weight_name)) model.float() model.eval() model = model.cuda() test_image = 'kids.jpg' oriImg = cv2.imread(test_image) # B,G,R order shape_dst = np.min(oriImg.shape[0:2]) # Get results of original image multiplier = get_multiplier(oriImg) with torch.no_grad(): orig_paf, orig_heat = get_outputs(multiplier, oriImg, model, 'rtpose') # Get results of flipped image swapped_img = oriImg[:, ::-1, :] flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img, model, 'rtpose') # compute averaged heatmap and paf paf, heatmap = handle_paf_and_heat(orig_heat, flipped_heat, orig_paf, flipped_paf) param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5} canvas, to_plot, candidate, subset = decode_pose(oriImg, param, heatmap, paf) cv2.imwrite('result_photo.png', to_plot)
model.eval() for i, fname in enumerate(loader): oriImg = cv2.imread(fname) # B,G,R order shape_dst = np.min(oriImg.shape[0:2]) # Get results of original image multiplier = get_multiplier(oriImg) with torch.no_grad(): orig_paf, orig_heat = get_outputs(multiplier, oriImg, model, 'rtpose') # Get results of flipped image swapped_img = oriImg[:, ::-1, :] flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img, model, 'rtpose') # compute averaged heatmap and paf paf, heatmap = handle_paf_and_heat(orig_heat, flipped_heat, orig_paf, flipped_paf) param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5} canvas, to_plot, joint_list, person_to_joint_assoc = decode_pose( oriImg, param, heatmap, paf) saver.crawl(fname, joint_list, person_to_joint_assoc) cv2.imwrite(dir + 'done\\' + str(i) + '.png', to_plot) print('%d images have been annotated!' % i) print('Annotation completed!') saver.distribute() exit()