def main(VideoName): cap, cap_length, cap_fps = videoInfo(VideoName) kpt2Ds = [] for i in tqdm(range(cap_length)): _, frame = cap.read() frame, W, H = resize_img(frame) try: t0 = time.time() joint2D = interface2D(bboxModel, poseModel, frame) print('HrNet comsume {:0.3f} s'.format(time.time() - t0)) except Exception as e: print(e) continue if i == 0: for _ in range(30): kpt2Ds.append(joint2D) elif i < 30: kpt2Ds.append(joint2D) kpt2Ds.pop(0) else: kpt2Ds.append(joint2D) joint3D = interface3D(model3D, np.array(kpt2Ds), W, H) joint3D_item = joint3D[-1] #(17, 3) draw_3Dimg(joint3D_item, frame, display=1, kpt2D=joint2D)
def main(VideoName): cap, cap_length = videoInfo(VideoName) kpt2Ds = [] for i in tqdm(range(cap_length)): _, frame = cap.read() frame, W, H = resize_img(frame) try: joint2D = interface2D(frame, model2D) except Exception as e: print(e) continue if i == 0: for _ in range(30): kpt2Ds.append(joint2D) elif i < 30: kpt2Ds.append(joint2D) kpt2Ds.pop(0) else: kpt2Ds.append(joint2D) joint3D = interface3D(model3D, np.array(kpt2Ds), W, H) joint3D_item = joint3D[-1] #(17, 3) draw_3Dimg(joint3D_item, frame, display=1, kpt2D=joint2D)
def main(VideoName, model_layes): model = posenet.load_model(args.model) model = model.cuda() output_stride = model.output_stride cap, cap_length = videoInfo(VideoName) kpt2Ds = [] pose_3d = [] #annotator = AnnotatorInterface.build(max_persons=1) for i in range(cap_length): #tqdm(range(cap_length)): #if i < 90: continue if i > 300: break _, frame = cap.read() input_image, display_image, output_scale = posenet.process_input( frame, 1 / 3.0, output_stride) frame, W, H = resize_img(frame) time0 = time.time() joint2D = get_2d_pose_torch(input_image, output_stride, model) #get_2d_pose_1(frame) time1 = time.time() #print(output_scale) #joint2 = 0#get_2d_pose_2(sess, input_image, output_stride, model_outputs) #persons = annotator.update(frame) #poses_2d = [p['pose_2d'].get_joints() for p in persons] #joint2D2 = poses_2d[0] #print(joint2D) #joint2D = np.vstack((joint2D[0:1, :], joint2D[5:17, :])) #print(joint2D3.shape) time2 = time.time() #raise KeyboardInterrupt if i == 0: for _ in range(30): kpt2Ds.append(joint2D) else: kpt2Ds.append(joint2D) kpt2Ds.pop(0) #if i < 15: # kpt2Ds.append(joint2D) # kpt2Ds.pop(0) #else: # kpt2Ds.append(joint2D) #print(len(kpt2Ds)) joint3D = interface3D(model3D, np.array(kpt2Ds), W, H) joint3D_item = joint3D[-1] #(17, 3) time3 = time.time() pose_3d.append((joint3D_item, joint2D)) print(time1 - time0, time2 - time1, time3 - time2, time3 - time1) #draw_3Dimg(joint3D_item, frame, display=1, kpt2D=joint2D) save_pose(pose_3d)
def VideoPoseJoints(VideoName): cap, cap_length = videoInfo(VideoName) kpt2Ds = [] for i in tqdm(range(cap_length)): _, frame = cap.read() frame, W, H = resize_img(frame) try: joint2D = interface2D(frame, model2D) except Exception as e: print(e) continue draw_2Dimg(frame, joint2D, 1) kpt2Ds.append(joint2D) joint3D = interface3D(model3D, np.array(kpt2Ds), W, H) return joint3D
def main(VideoName): cap, cap_length = videoInfo(VideoName) # cap=cv2.VideoCapture(0) kpt2Ds = [] queueSize = 30 for i in tqdm(range(cap_length)): # i=0 # while(True): _, frame = cap.read() frame, W, H = resize_img(frame) try: t0 = time.time() joint2D = interface2D(bboxModel, poseModel, frame) except Exception as e: print(e) continue if i == 0: for _ in range(queueSize): kpt2Ds.append(joint2D) elif i < queueSize: kpt2Ds.append(joint2D) kpt2Ds.pop(0) else: kpt2Ds.append(joint2D) joint3D = interface3D(model3D, np.array(kpt2Ds), W, H) joint3D_item = joint3D[-1] #(17, 3) draw_3Dimg(joint3D_item, frame, display=1, kpt2D=joint2D) # i = i+1 if cv2.waitKey(1) & 0xff == ord('q'): cap.release() break print('total comsume {:0.3f} s'.format(time.time() - t0))