def __init__(self, gpu_id=0, n_people=1, net_resolution=(256, 256), scale_number=4, scale_gap=0.25, keypoint_scale=3): '''Sets up an openpose object, which can be shared. Args: gpu_id (int): Id of the gpu to be used. Default to `0`. No effect when used with CPU_ONLY build of OpenPose. n_people (int): Maximum number of people to be detected. Ignored. net_resolution (2-tuple of ints): Resolution (width, height) at which model runs. Higher resolution might give better results but probably only for images containing small people. Should match AR of input image, scale_number (int): Number of scales to average over. scale_gap (float): Ratio between different scales. keypoint_scale (int): Scaling of keypoints. From OpenPose: 0 to scale it to the original source resolution; 1 to scale it to the net output size (set with net_resolution); 2 to scale it to the final output size (set with resolution); 3 to scale it in the range [0,1], where (0,0) would be the top-left corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Returns: object: The Pose estimator ''' sys.path.append(PYTHON_OPENPOSE) from openpose import OpenPose gpu_id = int(os.environ.get("GPUID_OPENPOSE", gpu_id)) params = { 'logging_level': 3, 'net_resolution': "x".join(map(str, net_resolution)), 'model_pose': 'BODY_25', 'scale_number': scale_number, 'scale_gap': scale_gap, 'num_gpu_start': gpu_id, 'default_model_folder': MODEL_OPENPOSE, # 'number_people_max': n_people, # seems to be ignored # rendering related - not so interesting for us but # required 'output_resolution': '-1x-1', 'alpha_pose': 0.6, 'render_threshold': 0.05, 'disable_blending': False, } self.net_resolution = net_resolution self.keypoint_scale = keypoint_scale self.n_keypoints = 25 self.openpose = OpenPose(params)
def generate_frames_and_keypoints(self, ui_progress_bar=None): if not os.path.exists(self.generationDirectory): os.mkdir(self.generationDirectory) dst = self.generationDirectory + self.batchName + "/" if not os.path.exists(dst): os.mkdir(dst) # Do Openpose on batch on each video and save keypoints i = 0 progress = 5 for video in os.listdir(self.temporaryDirectory): dump_path = dst + "Video" + str(i) + "/" while os.path.exists(dump_path): i += 1 dump_path = dst + "Video" + str(i) + "/" os.mkdir(dump_path) vidcap = cv2.VideoCapture(self.temporaryDirectory + video) success, image = vidcap.read() count = 0 progress += len(os.listdir(self.temporaryDirectory)) while success: progress += 10 / progress if ui_progress_bar is not None: ui_progress_bar.setValue(progress) cv2.imwrite(dump_path + "frame%d.jpg" % count, image) # save frame as JPEG file success, image = vidcap.read() count += 1 i += 1 vidcap.release() if os.path.exists(self.temporaryDirectory): sh.rmtree(self.temporaryDirectory) op = OpenPose() for vidBatch in os.listdir(dst): progress += 100 / progress if ui_progress_bar is not None: ui_progress_bar.setValue(progress) keypoint_dst = dst + vidBatch + "/Keypoints" if not os.path.exists(keypoint_dst): os.mkdir(keypoint_dst) print("Processing Batch: " + vidBatch) op.pose(dst + vidBatch, keypoint_dst) # for each frame Go through openpose and dump output into keypointdst if ui_progress_bar is not None: ui_progress_bar.setValue(100) return dst + "Video" + str(len(os.listdir(dst)) - 1) + "/"
def generate_pose_heatmap(frame, nets): imagesForNet, imagesOrig = OpenPose.process_frames(frame, Param.defRes, Param.scales) global first_run if first_run: for i in range(0, len(Param.scales)): net = nets[i] imageForNet = imagesForNet[i] in_shape = net.blobs['image'].data.shape in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2]) net.blobs['image'].reshape(*in_shape) net.reshape() first_run = False print("Reshaped") heatmaps = [] for i in range(0, len(Param.scales)): net = nets[i] imageForNet = imagesForNet[i] net.blobs['image'].data[0,:,:,:] = imageForNet net.forward() heatmaps.append(net.blobs['net_output'].data[:,:,:,:]) # array, frame = openpose.poseFromHM(frame, heatmaps, Param.scales) # hm = heatmaps[0][:,0:18,:,:]; frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True) paf = heatmaps[0][:,20:,:,:]; frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False) return frame
def __init__(self): rospy.init_node('show_img') params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x368" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 # If GPU version is built, and multiple GPUs are available, set the ID here params["num_gpu_start"] = 0 params["disable_blending"] = False # Ensure you point to the correct path where models are located params["default_model_folder"] = "/home/sassbot/openpose/models/" # Construct OpenPose object allocates GPU memory self.op = OpenPose(params) self.bridge = CvBridge() # convert img to cv2 rospy.Subscriber("/camera/rgb/image_color", Image, self.callback_camera) print "Ready"
def create_dataset_knn(num_of_frames): op = OpenPose() time.sleep(3) frame_number = 0 for i in range(num_of_frames): keypoints, image = op.estimate_3d_picture(use_table_data=False) cv2.imwrite(f'dataset/img/occluded_test_data{frame_number}.jpg', image) create_json_knn(keypoints['keypoints'], frame_number, True) print(f'Frame with number {frame_number} created...') frame_number += 1 time.sleep(1)
def get_from_openpose(): op = OpenPose() #op.get_from_openpose(videosdir='/home/junkado/Desktop/keio/hard/focusright', extension=".mp4") #op.manual_videofile("/home/junkado/Desktop/keio/hard/focusright/12.mp4") #editLists = [8,9,10,11,12,13,14,16,17,23,24,25,26,27,30,50,51,65,99,100,101,102,103,125,129,130,131,132,133,134,135,137,138,139,140,141,143,145,160,163] editLists = [ 170, 261, 263, 264, 266, 269, 270, 271, 272, 273, 274, 275, 276, 277, 306, 307, 308, 309, 310, 311, 312, 314, 315, 316, 317, 318, 319, 320, 323, 325, 326, 327, 328, 329, 338, 339, 340, 341, 342, 343 ] videopaths = [ "/home/junkado/Desktop/keio/hard/focusright/{0}.mp4".format(editfile) for editfile in editLists ] op.manual_videofiles(videopaths)
def __init__(self): super(pose_detection, self).__init__() params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x1072" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 params["num_gpu_start"] = 0 params["disable_blending"] = False params[ "default_model_folder"] = "/home/immersivemidiaopenposeclone/openpose/models/" self.openpose = OpenPose(params)
class pose_detection(object): """docstring for pose_detection.""" def __init__(self): super(pose_detection, self).__init__() params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x1072" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 params["num_gpu_start"] = 0 params["disable_blending"] = False params[ "default_model_folder"] = "/home/immersivemidiaopenposeclone/openpose/models/" self.openpose = OpenPose(params) def detect_pose(self, fname): img = cv2.imread(fname) if img is None: return "image not found" else: joints, output_image = self.openpose.forward(img, display=True) # print joints.shape, output_image.shape #print fname #with open(fname+".json", 'w') as outfile: # json.dump(joints.tolist(), outfile) #dirname = os.path.dirname(fname) #cv2.imwrite(fname+".jpg", output_image) return output_image, joints
def __init__(self): config = {} config["dir"] = openpose_install_path config["logging_level"] = 3 config["output_resolution"] = "-1x-1" # 320x176 config["net_resolution"] = "-1x768" # 320x176 config["model_pose"] = "BODY_25" config["alpha_pose"] = 0.6 config["scale_gap"] = 0.3 config["scale_number"] = 1 config["render_threshold"] = 0.05 config[ "num_gpu_start"] = 0 # If GPU version is built, and multiple GPUs are available, set the ID here config["disable_blending"] = False openpose_dir = config["dir"] sys.path.append(openpose_dir + "/build/python/openpose") from openpose import OpenPose # noqa config["default_model_folder"] = openpose_dir + "/models/" self.detector = OpenPose(config)
def __init__(self): rospy.init_node('get_keypoints_server') rospy.Service('get_keypoints', GetKeyPoints, self.handle_get_keypoints) params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x368" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 # If GPU version is built, and multiple GPUs are available, set the ID here params["num_gpu_start"] = 0 params["disable_blending"] = False # Ensure you point to the correct path where models are located params["default_model_folder"] = "/home/sassbot/openpose/models/" # Construct OpenPose object allocates GPU memory self.op = OpenPose(params) print "Ready to get_keypoints."
def func(frame): # Get image processed for network, and scaled image imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales) # Reshape global first_run if first_run: for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] in_shape = net.blobs['image'].data.shape in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2]) net.blobs['image'].reshape(*in_shape) net.reshape() first_run = False #print("Reshaped") # Forward pass to get heatmaps heatmaps = [] for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] net.blobs['image'].data[0, :, :, :] = imageForNet net.forward() heatmaps.append(net.blobs['net_output'].data[:, :, :, :]) # Pose from HM Test #combined = openpose.draw_all(frame, heatmaps, 1) #array, frame = openpose.poseFromHM(frame, heatmaps, scales) # Draw Heatmaps instead hm = heatmaps[0][:, 0:18, :, :] frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True) paf = heatmaps[0][:, 20:, :, :] frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False) return frame
class Run: def __init__(self): rospy.init_node('show_img') params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x368" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 # If GPU version is built, and multiple GPUs are available, set the ID here params["num_gpu_start"] = 0 params["disable_blending"] = False # Ensure you point to the correct path where models are located params["default_model_folder"] = "/home/sassbot/openpose/models/" # Construct OpenPose object allocates GPU memory self.op = OpenPose(params) self.bridge = CvBridge() # convert img to cv2 rospy.Subscriber("/camera/rgb/image_color", Image, self.callback_camera) print "Ready" def callback_camera(self, cam_data): try: img = self.bridge.imgmsg_to_cv2(cam_data, "bgr8") # get image height, width #(h, w) = img0.shape[:2] # calculate the center of the image #center = (w / 2, h / 2) # rotate #M = cv2.getRotationMatrix2D(center, 90, 1.0) #img = cv2.warpAffine(img0, M, (h, w)) except CvBridgeError as e: print(e) cv2.imshow("input", img) cv2.waitKey(15) self.show_img(img) def show_img(self, img): keypoints, output_image = self.op.forward(img, True) cv2.imshow("output", output_image)
class GetKeyPointsServer: def __init__(self): rospy.init_node('get_keypoints_server') rospy.Service('get_keypoints', GetKeyPoints, self.handle_get_keypoints) params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x368" params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.3 params["scale_number"] = 1 params["render_threshold"] = 0.05 # If GPU version is built, and multiple GPUs are available, set the ID here params["num_gpu_start"] = 0 params["disable_blending"] = False # Ensure you point to the correct path where models are located params["default_model_folder"] = "/home/sassbot/openpose/models/" # Construct OpenPose object allocates GPU memory self.op = OpenPose(params) print "Ready to get_keypoints." def handle_get_keypoints(self, req): print("received req", len(req.imgdata)) #img = np.reshape( np.array(req.imgdata, dtype="uint8") , (480,640,3) ) img = cv2.imread("../media/COCO_val2014_000000000192.jpg") keypoints, output_image = self.op.forward(img, True) # Display the image cv2.imshow("output", output_image) cv2.waitKey(15) keypoints = req.imgdata print("return keypoints", keypoints) return GetKeyPointsResponse(keypoints)
def func(frame): # Get image processed for network, and scaled image imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales) # Reshape global first_run if first_run: for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] in_shape = net.blobs['image'].data.shape in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2]) net.blobs['image'].reshape(*in_shape) net.reshape() first_run = False print("Reshaped") # Forward pass to get heatmaps heatmaps = [] for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] net.blobs['image'].data[0,:,:,:] = imageForNet net.forward() heatmaps.append(net.blobs['net_output'].data[:,:,:,:]) # Pose from HM Test array, frame = openpose.poseFromHM(frame, heatmaps, scales) # Draw Heatmaps instead #hm = heatmaps[0][:,0:18,:,:]; frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True) #paf = heatmaps[0][:,20:,:,:]; frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False) return frame
def load_pose_model(params=None): if params is None: params = AllParams() if params.pose.use_openpose: from openpose import OpenPose params.pose.model = OpenPose(vars(params.pose)) else: import torch import torch.nn as nn from torch.autograd import Variable from network.Ying_model import get_ying_model if params.pose.use_gpu_postprocess: from cpm.cpm_layer import rtpose_postprocess model = get_ying_model(stages=5, have_bn=False, have_bias=True) model.load_state_dict( torch.load('./models/dilated3_5stage_merged.pth')) model = nn.DataParallel(model, device_ids=params.pose.gpus) params.pose.model = model.cuda(params.pose.gpus[0]) params.pose.model.eval() return params
# Load OpenPose object and Caffe Nets params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x" + str(defRes) params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.25 params["scale_number"] = len(scales) params["render_threshold"] = 0.05 params["num_gpu_start"] = 0 params["disable_blending"] = False params["default_model_folder"] = dir_path + "/../../../models/" openpose = OpenPose(params) caffe.set_mode_gpu() caffe.set_device(0) nets = [] for scale in scales: nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST)) print("Net loaded") # Test Function first_run = True def func(frame): # Get image processed for network, and scaled image imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales)
sys.path.insert(0, '../debugging') from mvpose.data import epfl_campus from time import time root = Settings['CMU']['data_root'] tmp = Settings['tmp'] import mvpose.data.kth_football2 as kth from mvpose.evaluation import pcp from mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d from mvpose.data import cmu_panoptic from openpose import OpenPose print("TMP", tmp) pe = OpenPose(tmp=tmp) Frames_Pizza = list(range(1000, 8000)) Frames_Ultimatum = list(range(300, 6880)) Frames_Haggling = list(range(4209, 5315)) + list(range(6440, 8200)) # pizza: 1000 - 4458 # ultimatum: 300 - 3758 # haggling 4209 - 5315 + 6440 - 8200 sequences = ['160906_pizza1', '160422_ultimatum1', '160224_haggling1'] print(len(Frames_Haggling)) def apply(seq_name, frame):
tmp = Settings['tmp'] import mvpose.data.kth_football2 as kth from mvpose import pose from mvpose.settings import get_settings from paf_loader import Loader from mvpose.evaluation import pcp from mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d from mvpose.baseline.baseline import estimate from openpose import OpenPose seq1_zipname = 'player2sequence1.zip' seq1_dir = 'Sequence 1' peak_threshold = 0.08 pe = OpenPose(tmp=tmp, peak_threshold=peak_threshold) from mvpose.baseline.tracking import tracking from time import time Calib = [] poses_per_frame = [] Pos3d = [] Ims = [] _start = time() # for frame in range(0, 214): end_frame = 100 for frame in range(0, end_frame): Im, calib, pos2d, pos3d = kth.get( data_root, seq1_zipname, seq1_dir, frame, player=2)
ax.set_xlim([0, 1920]) ax.set_ylim([1080, 0]) for pid, points3d_weighted in Y: pts3d = points3d_weighted[:, 0:3].astype('float32') pts2d = cam.projectPoints(pts3d) pts2dT = pts2d.transpose() ax.scatter(pts2d[:, 0], pts2d[:, 1], color=colors[pid]) for edge in body_edges: ax.plot(pts2dT[0, edge], pts2dT[1, edge], color=colors[pid]) plt.tight_layout() plt.pause(2) pe = OpenPose(tmp=tmp) predictions = pe.predict(Im, 'cvpr_cmu', FRAME) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ fig = plt.figure(figsize=(16, 8)) for idx, (im, pred) in enumerate(zip(Im, predictions)): ax = fig.add_subplot(2, 3, idx + 1) ax.axis('off') ax.imshow(im) ax.set_xlim([0, 1920]) ax.set_ylim([1080, 0]) for human in pred: draw_mscoco_human2d(ax, human[:, 0:2], color='red') plt.tight_layout()
Im, Calib, pos2d, pos3d = kth.get(data_root, seq1_zipname, seq1_dir, FRAME) from mvpose.algorithm.candidate_selection import project_human_to_2d from mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d # heatmaps, pafs = paf_loader.load_confidence_map_and_paf( # 'kth' + seq1_zipname[0:-4], Im, FRAME, dir=tmp) # _start = time() # Debug, detections = pose.estimate(Calib, heatmaps, pafs, # settings=params, debug=True) # _end = time() # print('elapsed', _end - _start) from openpose import OpenPose pe = OpenPose(tmp=tmp) name = 'cvpr_kth_' + seq1_zipname predictions = pe.predict(Im, name, FRAME) Humans = predictions from mvpose.baseline.baseline import estimate H, Hypothesis = estimate(Calib, predictions, epi_threshold=100, scale_to_mm=1000, get_hypothesis=True) fig = plt.figure(figsize=(16, 12))
import mvpose.data.kth_football2 as kth from mvpose import pose from mvpose.settings import get_settings from paf_loader import Loader from mvpose.evaluation import pcp import mvpose.data.kth_football2 as kth from mvpose import pose from mvpose.settings import get_settings from paf_loader import Loader from mvpose.evaluation import pcp from mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d from openpose import OpenPose pe = OpenPose(tmp=tmp, peak_threshold=0.05) valid_frames = list(range(350, 470)) + list(range(650, 750)) Calib = [] poses_per_frame = [] Pos3d = {} Imgs = {} peak_threshold = 0.05 _start = time() for frame in valid_frames: Im, Y, calib = epfl_campus.get(root, frame) Imgs[frame] = Im Calib.append(calib) Pos3d[frame] = Y predictions = pe.predict(Im, 'cvpr_campus' + str(peak_threshold),
class KeyPointer(object): joint_order = [ 'cnose', 'cneck', 'rshoulder', 'relbow', 'rwrist', 'lshoulder', 'lelbow', 'lwrist', 'chip', 'rhip', 'rknee', 'rankle', 'lhip', 'lknee', 'lankle', 'reye', 'leye', 'rear', 'lear', 'lbigtoe', 'lsmalltoe', 'lheel', 'rbigtoe', 'rsmalltoe', 'rheel', # TODO I guess it stops here? 'rankle', 'rknee', 'rhip', 'lhip', 'lknee', 'lankle', 'rwrist', 'relbow', 'rshoulder', 'lshoulder', 'lelbow', 'lwrist', 'cnose', 'leye', 'reye' ] def __init__(self, gpu_id=0, n_people=1, net_resolution=(256, 256), scale_number=4, scale_gap=0.25, keypoint_scale=3): '''Sets up an openpose object, which can be shared. Args: gpu_id (int): Id of the gpu to be used. Default to `0`. No effect when used with CPU_ONLY build of OpenPose. n_people (int): Maximum number of people to be detected. Ignored. net_resolution (2-tuple of ints): Resolution (width, height) at which model runs. Higher resolution might give better results but probably only for images containing small people. Should match AR of input image, scale_number (int): Number of scales to average over. scale_gap (float): Ratio between different scales. keypoint_scale (int): Scaling of keypoints. From OpenPose: 0 to scale it to the original source resolution; 1 to scale it to the net output size (set with net_resolution); 2 to scale it to the final output size (set with resolution); 3 to scale it in the range [0,1], where (0,0) would be the top-left corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Returns: object: The Pose estimator ''' sys.path.append(PYTHON_OPENPOSE) from openpose import OpenPose gpu_id = int(os.environ.get("GPUID_OPENPOSE", gpu_id)) params = { 'logging_level': 3, 'net_resolution': "x".join(map(str, net_resolution)), 'model_pose': 'BODY_25', 'scale_number': scale_number, 'scale_gap': scale_gap, 'num_gpu_start': gpu_id, 'default_model_folder': MODEL_OPENPOSE, # 'number_people_max': n_people, # seems to be ignored # rendering related - not so interesting for us but # required 'output_resolution': '-1x-1', 'alpha_pose': 0.6, 'render_threshold': 0.05, 'disable_blending': False, } self.net_resolution = net_resolution self.keypoint_scale = keypoint_scale self.n_keypoints = 25 self.openpose = OpenPose(params) def rescale_keypoints(self, image, keypoints): """OpenPose python wrapper does not support keypoint_scale so we do it ourselves""" # reshape if no detections if keypoints.size == 0 and keypoints.shape[1:] != (self.n_keypoints, 3): keypoints = np.reshape(keypoints, (0, self.n_keypoints, 3)) if self.keypoint_scale == 0: return keypoints # rescale to [0,1] h, w = image.shape[:2] keypoints = keypoints / np.array([[[(w - 1), (h - 1), 1.0]]]) if self.keypoint_scale == 1: net_w, net_h = self.net_resolution return keypoints * np.array([[[(net_w - 1), (net_h - 1), 1.0]]]) elif self.keypoint_scale == 2: raise NotImplementedError("Python wrapper does not support " "resolution.") elif self.keypoint_scale == 3: return keypoints elif self.keypoint_scale == 4: return np.array([[[2.0, 2.0, 1.0]]]) * keypoints - np.array( [[[1.0, 1.0, 0.0]]]) raise NotImplementedError("Invalid keypoint_scale: {}".format( self.keypoint_scale)) def __call__(self, image, render=False): '''Returns the keypoints in an image for any number of people in it. Args: image (np.array): 3d Array. (height, width, 3) - last axis is assumed to be BGR as this is based on Caffe! render (bool): Whether or not to return rendered image as well. Returns: np.array: keypoints as specified in the openpose documentation. Array of shape [n_persons, n_joints, 3] where last axis is x,y,confidence. ''' if not image.flags['C_CONTIGUOUS']: raise ValueError("OpenPose can not handle views!") result = self.openpose.forward(image, render) if render: keypoints, rendered_image = result else: keypoints = result keypoints = self.rescale_keypoints(image, keypoints) if render: return keypoints, rendered_image else: return keypoints
tmp = Settings['tmp'] import mvpose.data.kth_football2 as kth from mvpose import pose from mvpose.settings import get_settings from paf_loader import Loader from mvpose.evaluation import pcp from mvpose.plot.limbs import draw_mscoco_human3d from mvpose.baseline.baseline import estimate from openpose import OpenPose from mvpose.baseline.tracking import Track seq1_zipname = 'player2sequence1.zip' seq1_dir = 'Sequence 1' pe = OpenPose(tmp=tmp) from mvpose.baseline.tracking import tracking from time import time Calib = [] poses_per_frame = [] _start = time() #for frame in range(0, 214): end_frame = 60 for frame in range(0, end_frame): Im, calib, pos2d, pos3d = kth.get(data_root, seq1_zipname, seq1_dir, frame,
import sys sys.path.append('/usr/local/python') import cv2 import json from openpose import OpenPose def round16(number): ''' Round number to multiples of 16. ''' return number // 16 * 16 if __name__ == '__main__': ''' Python wrapper can detect only body keypoints. ''' img = cv2.imread("input.png") h, w, _ = img.shape config = json.load(open('config.json')) config['net_resolution'] = '{}x{}'.format(round16(w), round16(h)) openpose = OpenPose(config) arr, out_img = openpose.forward(img, True) print(arr) cv2.imwrite('output.png', out_img)
def get_from_openpose(): op = OpenPose() #op.get_from_openpose(videosdir='/home/junkado/Desktop/keio/hard/focusright', extension=".mp4") op.manual_videofile( "/home/junkado/Desktop/keio/hard/allcutvideo/C1234.mp4")
sys.path.insert(0,'../debugging') from mvpose.data import shelf from time import time import shutil from os.path import isdir, join from os import makedirs root = join(Settings['data_root'], 'pak') tmp = Settings['tmp'] import mvpose.data.kth_football2 as kth from mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d from openpose import OpenPose pe = OpenPose(tmp=tmp) poses_per_frame = [] start_frame = 300 end_frame = 600 actual_frames = list(range(start_frame, end_frame)) calib = None _start = time() Imgs = {} for t in range(start_frame, end_frame): Im, _, calib = shelf.get(root, t) Imgs[t] = Im predictions = pe.predict(Im, 'cvpr_shelf', t) poses_per_frame.append(predictions) print('extract poses from ' +\