def keypoint_detection(img, detector, pose_net, ctx=mx.cpu(), axes=None): x, img = gcv.data.transforms.presets.yolo.transform_test(img, short=512, max_size=350) x = x.as_in_context(ctx) class_IDs, scores, bounding_boxs = detector(x) plt.cla() pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs, ctx=ctx) if len(upscale_bbox) > 0: predicted_heatmap = pose_net(pose_input) pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox) axes = plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores, box_thresh=0.5, keypoint_thresh=0.2, ax=axes) plt.draw() plt.pause(0.001) else: axes = plot_image(frame, ax=axes) plt.draw() plt.pause(0.001) return axes
def main(): args = parse_args() network = None scale = 1.0 detector = get_model('yolo_darknet53_coco', pretrained=True) detector.reset_class(['person'], reuse_weights=['person']) if args.type == 'ONNX': network = cv2.dnn.readNetFromONNX(args.model) elif args.type == 'OpenVINO': network = cv2.dnn.readNetFromModelOptimizer(args.xml, args.model) # default backend if wasn`t specified if not args.backend: network.setPreferableBackend(cv2.dnn.DNN_BACKEND_DEFAULT) # in case you are going to use CUDA backend in OpenCV, make sure that opencv built with CUDA support elif args.backend == 'CUDA': network.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) network.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) # in case you are going to use OpenVINO model, make sure that inference engine already installed and opencv built with IE support elif args.backend == 'INFERENCE': network.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE) network.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) x, img = data.transforms.presets.yolo.load_test(args.img, short=512) class_IDs, scores, bounding_boxes = detector(x) pose_input, upscaled_bbox = detector_to_simple_pose( img, class_IDs, scores, bounding_boxes) pose_input = pose_input.asnumpy() bs = [] for i in range(pose_input.shape[0]): input = cv2.dnn.blobFromImage( np.transpose(np.squeeze(pose_input[i, :, :, :]), (1, 2, 0)), scale, (args.width, args.height), (0, 0, 0), False) network.setInput(input) temp = network.forward() bs.append(temp) output = np.concatenate(bs, axis=0) output = mx.nd.array(output) pred_coords, confidence = heatmap_to_coord(output, upscaled_bbox) ax = plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxes, scores, box_thresh=0.5, keypoint_thresh=0.2) plt.show()
def keypoint_detection(img_path, detector, pose_net): x, img = data.transforms.presets.yolo.load_test(img_path, short=512) class_IDs, scores, bounding_boxs = detector(x) pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs) predicted_heatmap = pose_net(pose_input) pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox) ax = plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores, box_thresh=0.5, keypoint_thresh=0.2) plt.show()
def keypoint_detection(i, frame, imagepath, detector, pose_net, ctx=mx.cpu(), axes=None): global pause_time x, img = gcv.data.transforms.presets.yolo.transform_test(frame, short=512, max_size=1024) x = x.as_in_context(ctx) class_IDs, scores, bounding_boxs = detector(x) plt.cla() pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs, output_shape=(1024, 768), ctx=ctx) #print(pose_input,"\n") if len(upscale_bbox) > 0: predicted_heatmap = pose_net(pose_input) pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox) hackathon_action(i, frame, imagepath, pred_coords, confidence, class_IDs, bounding_boxs, scores) axes = plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores, box_thresh=0.5, keypoint_thresh=0.2, ax=axes) plt.draw() plt.pause(pause_time) #plt.pause(1.0) else: axes = plot_image(frame, ax=axes) plt.draw() plt.pause(pause_time) return axes
def keypoint_detection(img, detector, pose_net, ctx=mx.cpu(), axes=None): x, img = gcv.data.transforms.presets.yolo.transform_test(img, short=512, max_size=350) x = x.as_in_context(ctx) class_IDs, scores, bounding_boxs = detector(x) plt.cla() pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs, output_shape=(128, 96), ctx=ctx) if len(upscale_bbox) > 0: predicted_heatmap = pose_net(pose_input) pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox) axes = plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores, box_thresh=0.5, keypoint_thresh=0.2, ax=axes) plt.draw() plt.pause(0.001) else: axes = plot_image(frame, ax=axes) plt.draw() plt.pause(0.001) return axes