def main(): params = set_params() #Constructing OpenPose object allocates GPU memory openpose = op.OpenPose(params) #Opening OpenCV stream stream = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX while True: ret, img = stream.read() # 转为灰度图 #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 改变图片大小 img = cv2.resize(img, (320, 180)) # Output keypoints and the image with the human skeleton blended on it # keypoints, output_image = openpose.forward(img, True) keypoints, output_image = openpose.forward(img, True) # Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image if len(keypoints) > 0: print('Human(s) Pose Estimated!') print(keypoints) else: print('No humans detected!') # Display the stream #cv2.putText(output_image,'OpenPose Tello-Gesture-Control',(5,15), font, 0.5,(255,255,255),1,cv2.LINE_AA) cv2.putText(output_image, 'Tello-Gesture-Control', (55, 15), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow('Human Pose Estimation', output_image) if cv2.waitKey(10) & 0xFF == ord('q'): print("I'm done end **********************************") break stream.release() cv2.destroyAllWindows()
def main(): params = set_params() #Constructing OpenPose object allocates GPU memory openpose = op.OpenPose(params) #Opening OpenCV stream stream = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX while True: ret, img = stream.read() # Output keypoints and the image with the human skeleton blended on it # keypoints, output_image = openpose.forward(img, True) keypoints, output_image = openpose.forward(img, True) # Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image if len(keypoints) > 0: print('Human(s) Pose Estimated!') print(keypoints) else: print('No humans detected!') # Display the stream cv2.putText(output_image, 'OpenPose using Python-OpenCV', (20, 30), font, 1, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow('Human Pose Estimation', output_image) key = cv2.waitKey(1) if key == ord('q'): break stream.release() cv2.destroyAllWindows()
params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x368" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 # If GPU version is built, and multiple GPUs are available, set the ID here params["num_gpu_start"] = 0 params["disable_blending"] = False # Ensure you point to the correct path where models are located params["default_model_folder"] = dir_path + "/../../../models/" # Construct OpenPose object allocates GPU memory openpose = op.OpenPose(params) def gesture_recognize(keypoints): # define function to recognize gesture flags = np.zeros((1,4)) # initial count of each gesture are all 0 v_56 = keypoints[6, 0:2] - keypoints[5, 0:2] # left shoulder and arm v_67 = keypoints[7, 0:2] - keypoints[6, 0:2] # left arm and hand v_15 = keypoints[5, 0:2] - keypoints[1, 0:2] # neck and leftshoulder # normalize vector nv_56 = v_56 / np.linalg.norm(v_56, ord=1) nv_67 = v_67 / np.linalg.norm(v_67, ord=1) nv_15 = v_15 / np.linalg.norm(v_15, ord=1) dv_15_56 = np.vdot(nv_15,nv_56) if dv_15_56<=0 and dv_15_56>=-0.7: # the angle btw arm_shoulder flags[0,0] = 1 # and the neck_arm is 90-135 degree return flags
def __init__(self, model_path): # Initialise openpose params = self._openpose_parameters(model_path) # Constructing OpenPose object allocates GPU memory self.openpose = op.OpenPose(params)