def show_boxes_from_standard_json(json_file_path, classes, img_folder_path=None, output_folder_path=None, track_id=-1): dets = read_json_from_file(json_file_path) for det in dets: python_data = det if img_folder_path is None: img_path = os.path.join(python_data["image"]["folder"], python_data["image"]["name"]) else: img_path = os.path.join(img_folder_path, python_data["image"]["name"]) if is_image(img_path): img = cv2.imread(img_path) candidates = python_data["candidates"] for candidate in candidates: bbox = np.array(candidate["det_bbox"]).astype(int) score = candidate["det_score"] if score >= bbox_thresh: img = draw_bbox(img, bbox, score, classes, track_id=track_id) if output_folder_path is not None: create_folder(output_folder_path) img_output_path = os.path.join(output_folder_path, python_data["image"]["name"]) cv2.imwrite(img_output_path, img) return True
def show_poses_from_standard_json(json_file_path, joint_pairs, joint_names, img_folder_path=None, output_folder_path=None): poses = read_json_from_file(json_file_path) for pose in poses: python_data = pose if img_folder_path is None: img_path = os.path.join(python_data["image"]["folder"], python_data["image"]["name"]) else: img_path = os.path.join(img_folder_path, python_data["image"]["name"]) if is_image(img_path): img = cv2.imread(img_path) candidates = python_data["candidates"] for candidate in candidates: pose_keypoints_2d = candidate["pose_keypoints_2d"] joints = reshape_keypoints_into_joints(pose_keypoints_2d) img = show_poses_from_python_data(img, joints, joint_pairs, joint_names) if output_folder_path is not None: create_folder(output_folder_path) img_output_path = os.path.join(output_folder_path, python_data["image"]["name"]) cv2.imwrite(img_output_path, img) return
def batch_test_images(folder_path_in, folder_path_out): joint_names = [ 'head', 'upper neck', 'right shoulder', 'right elbow', 'right wrist', 'left shoulder', 'left elbow', 'left wrist', 'right pelvis', 'right knee', 'right ankle', 'left pelvis', 'left knee', 'left ankle', 'background' ] joint_pairs = [['head', 'upper neck', 'purple'], ['upper neck', 'right shoulder', 'yellow'], ['upper neck', 'left shoulder', 'yellow'], ['right shoulder', 'right elbow', 'blue'], ['right elbow', 'right wrist', 'green'], ['left shoulder', 'left elbow', 'blue'], ['left elbow', 'left wrist', 'green'], ['right shoulder', 'right pelvis', 'yellow'], ['left shoulder', 'left pelvis', 'yellow'], ['right pelvis', 'right knee', 'red'], ['right knee', 'right ankle', 'skyblue'], ['left pelvis', 'left knee', 'red'], ['left knee', 'left ankle', 'skyblue']] # setup paths deployFile = deploy_proto_path caffemodel = caffe_model_prefix + '.caffemodel' norm_size = img_size print('deployFile = %s\n', deployFile) print('caffemodel = %s\n', caffemodel) # load network if flag_GPU is True: caffe.set_mode_gpu() caffe.set_device(flag_GPU_id) else: caffe.set_mode_cpu() net = caffe.Net(deployFile, caffemodel, caffe.TEST) print('testing scales: ', scales) print('output_image_folder_path: ', output_image_folder_path) # Do the batch test subfolder_names = get_immediate_subfolder_names(folder_path_in) for subfolder_name in subfolder_names: subfolder_path_in = os.path.join(folder_path_in, subfolder_name) img_names = get_immediate_childfile_names(subfolder_path_in) for ct, img_name in enumerate(img_names): if flag_selective is True and img_name not in IMG_NAMES_TO_SHOW: continue print("processing: ", img_name, ct, len(img_names)) # load image img_path_in = os.path.join(subfolder_path_in, img_name) if is_image(img_path_in): img_raw = cv2.imread(img_path_in) # find heatmaps # OPTIONAL: flip and zoom if flag_flip is True and flag_zoom is True: heatmaps_from_multi_res = process_img_scales_and_flips( net, img_raw, norm_size, scales, heatmap_layer_name) # OPTIONAL: flip elif flag_flip is True: heatmaps_from_multi_res = [ process_img_flip(net, img_raw, norm_size), img_to_heatmaps(net, img_raw, norm_size, heatmap_layer_name) ] # OPTIONAL: zoom elif flag_zoom is True: heatmaps_from_multi_res = process_img_scales( net, img_raw, norm_size, scales) else: heatmaps = img_to_heatmaps(net, img_raw, norm_size, heatmap_layer_name) # Average heatmaps if flag_flip is True or flag_zoom is True: heatmaps = average_multiple_heatmap_sets( heatmaps_from_multi_res) # Find joints from heatmaps if flag_nms is True: joints = find_joints_in_heatmaps_nms(heatmaps) else: joints = find_joints_in_heatmaps(heatmaps) # Draw joints on image img_demo = cv2.resize(img_raw, (norm_size, norm_size)) img_demo = demo_poses_in_img(img_demo, joints, joint_pairs, joint_names) # Save images to a path if flag_save_images is True: create_folder(os.path.join(folder_path_out, subfolder_name)) img_name_out = os.path.splitext(img_name)[0] + '.png' img_path_out = os.path.join(folder_path_out, subfolder_name, img_name_out) cv2.imwrite(img_path_out, img_demo) # Demo Heatmaps if flag_demo_heatmaps is True: demo_heatmaps(heatmaps, joint_names) # Save joint predictions for MPII dataset evaluation if flag_save_evaluation is True: save_folder = os.path.join(output_image_folder_path, 'quantitative') create_folder(save_folder) if test_dataset == 'test_MPII' or test_dataset == 'validate_MPII': rect_id = find_rect_id(img_name) save_pose_preditions(joints, save_folder, img_name, rect_id) elif test_dataset == 'test_LSP': save_pose_preditions(joints, save_folder, img_name)
def draw_shelf_bbox(img, joints, joint_pairs, joint_names, track_id = -1, trigger = False, img_id = -1, img_path=None, pick_results=[]): shelf_bbox_pixels = [[[[706, 342],[707, 313],[823, 463],[817, 494]], [[709, 308],[713, 275],[833, 416],[825, 454]], [[713, 266],[716, 226],[849, 365],[838, 408]], [[719, 221],[722, 172],[868, 297],[853, 357]], [[723, 170],[728, 107],[888, 214],[875, 288]], [[729, 104],[738, 20],[934, 110],[887, 213]]], [[[ 818, 496],[ 827, 459],[ 987, 661],[ 964, 698]], [[ 831, 450],[ 839, 415],[1015, 614],[ 994, 650]], [[ 846, 407],[ 851, 368],[1044, 560],[1023, 606]], [[ 858, 356],[ 869, 299],[1088, 484],[1052, 551]], [[ 877, 290],[ 889, 231],[1141, 405],[1098, 474]], [[ 896, 221],[ 924, 115],[1248, 263],[1154, 392]]]] color = (0, 255, 0) #green font = cv2.FONT_HERSHEY_SIMPLEX for i, shelf in enumerate(shelf_bbox_pixels): for j, pixels in enumerate(shelf): pts = np.array(pixels,np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img,[pts],isClosed = True,color = color,thickness = 2) cv2.putText(img, #'{:s} {:.2f}'.format("ID:"+str(track_id), score), '{:s}'.format(str(i+1)+"-"+str(j+1)), (pts[0][0][0]-10, pts[0][0][1]-5), font, fontScale=0.8, color=color, thickness = 2, lineType = cv2.LINE_AA) if trigger: ind_1 = joint_names.index('right elbow') ind_2 = joint_names.index('right wrist') x1, y1, sure1 = joints[ind_1] x2, y2, sure2 = joints[ind_2] x3 = int(x2 + (x2-x1) / 4) y3 = int(y2 + (y2-y1) / 4) cv2.circle(img, (x3, y3), radius=3, color=(255,255,255), thickness=2) for i, shelf in enumerate(shelf_bbox_pixels): for j, pixels in enumerate(shelf): if isPoiWithinPoly([x3,y3], [pixels]): pts = np.array(pixels,np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img,[pts],isClosed = True,color = (0,0,255),thickness = 2) memory_img_id = img_id-50 #img of 50 frame before memory_img_path = img_path[:-7] + str(memory_img_id) + img_path[-4:] if is_image(memory_img_path): memory_img = cv2.imread(memory_img_path) cropImg = memory_img[y3-30:y3+30,x3-15:x3+45] else: print("No memory img {}.".format(memory_img_id)) pick_results.append({'track_id':track_id, 'shelf_id':[i+1,j+1], 'img_id':img_id, 'hand_pos':[x3,y3], 'memory_item_img':cropImg}) print("ID:"+str(track_id)+" picks on shelf "+ str(i+1)+ "-" + str(j+1) +" at time "+str(img_id)) break vis_result_x = 30 vis_result_y = 30 for i, result in enumerate(pick_results): cv2.putText(img, '{:s}'.format("ID:"+str(result['track_id'])+" picks on shelf "+ str(result['shelf_id'][0])+ "-" + str(result['shelf_id'][1]) +" at time "+str(result['img_id'])), (vis_result_x, vis_result_y), font, fontScale=0.8, color=(255,255,255), thickness = 2, lineType = cv2.LINE_AA) vis_result_y += 70 img[vis_result_y-60:vis_result_y, vis_result_x:vis_result_x+60] = result['memory_item_img'] vis_result_y += 30 return img, pick_results