def upload_file(): if request.method == 'POST': code = request.get_data() img = base64.b64decode(code.decode().split(',')[1]) fstr = IMAGE_FILE_DIR + '/' + time.strftime("%Y%m%d%H%M%S") + '.jpg' file = open(fstr, 'wb') file.write(img) image = cv2.imread(fstr) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb # create pose estimator image_size = image.shape pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH) # load model pose_estimator.initialise() # estimation pose_2d, visibility, pose_3d = pose_estimator.estimate(image) # close model pose_estimator.close() # Show 2D and 3D poses file.close() print(pose_2d) draw_limbs(in_image, data_2d, joint_visibility) return (base64.b64encode(in_image), 200)
def display_results(in_image, data_2d, joint_visibility, data_3d): """Plot 2D and 3D poses for each of the people in the image.""" plt.figure() draw_limbs(in_image, data_2d, joint_visibility) plt.imshow(in_image) plt.axis('off') # Show 3D poses for single_3D in data_3d: # or plot_pose(Prob3dPose.centre_all(single_3D)) plot_pose(single_3D) plt.show()
def display_results(in_image, data_2d, joint_visibility, data_3d, frame): """Plot 2D and 3D poses for each of the people in the image.""" plt.figure() draw_limbs(in_image, data_2d, joint_visibility) # plt.imshow(in_image) plt.axis('off') # Show 3D poses for single_3D in data_3d: # or plot_pose(Prob3dPose.centre_all(single_3D)) plot_pose(single_3D) pngName = 'png/test_{0}.jpg'.format(str(frame).zfill(12)) plt.savefig(pngName)
def display_results(in_image, data_2d, joint_visibility, data_3d): """ FOR VISUALIZATION OF 'LIFTING FROM THE DEEP' OUTPUT FOR ONE FRAME Plot 2D and 3D poses for each of the people in the image. """ plt.figure() draw_limbs(in_image, data_2d, joint_visibility) plt.imshow(in_image) plt.axis('off') # Show 3D poses for single_3D in data_3d: # or plot_pose(Prob3dPose.centre_all(single_3D)) plot_pose(single_3D) plt.show()
def display_results(mode, in_image, data_2d, visibilities, data_3d): """Plot 2D and 3D poses for each of the people in the image.""" n_poses = len(data_3d) if mode == 'openpose': color_im = OpPoseEstimator.draw_humans(in_image, data_2d, imgcopy=False) else: draw_limbs(in_image, data_2d, visibilities) plt.subplot(1, n_poses + 1, 1) plt.imshow(in_image) plt.axis('off') # Show 3D poses for idx, single_3D in enumerate(data_3d): plot_pose(Prob3dPose.centre_all(single_3D), visibility_to_3d(visibilities[idx]), n_poses + 1, idx + 2)
def display_results(in_image, data_2d, joint_visibility, data_3d, i): """ Plot 2D and 3D poses for each of the people in the image; :param in_image: :param data_2d: :param joint_visibility: :param data_3d: :param i: :return: """ plt.figure(i) draw_limbs(in_image, data_2d, joint_visibility, i) plt.imshow(in_image) # plt.axis('off') # Show 3D poses for single_3D in data_3d: # or plot_pose(Prob3dPose.centre_all(single_3D)) plot_pose(single_3D) plt.show()
def estimate_video(): fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') video_writer1 = cv2.VideoWriter( PROJECT_PATH + '/data/videos/test_result_{}.mp4'.format(str(time.time())), fourcc, 30.0, (640, 480)) cap = cv2.VideoCapture(PROJECT_PATH + '/data/videos/test_video.mp4') import mpl_toolkits.mplot3d.axes3d as p3 fig = plt.figure() ax = fig.gca(projection='3d') pose_list = [] def animate(pose): assert (pose.ndim == 2) assert (pose.shape[0] == 3) ax.clear() for c in _CONNECTION: col = '#%02x%02x%02x' % joint_color(c[0]) ax.plot([pose[0, c[0]], pose[0, c[1]]], [pose[1, c[0]], pose[1, c[1]]], [pose[2, c[0]], pose[2, c[1]]], c=col) for j in range(pose.shape[1]): col = '#%02x%02x%02x' % joint_color(j) ax.scatter(pose[0, j], pose[1, j], pose[2, j], c=col, marker='o', edgecolor=col) # smallest = pose.min() # largest = pose.max() # print('smallest:', smallest) # -885.36476784 # print('largest:', largest) # 809.97294291 # # ax.set_xlim3d(smallest, largest) # ax.set_ylim3d(smallest, largest) # ax.set_zlim3d(smallest, largest) ax.set_xlim3d(-1000, 1000) ax.set_ylim3d(-1000, 1000) ax.set_zlim3d(-1000, 1000) if (cap.isOpened() == False): print("Error opening video stream or file") import matplotlib.animation as manimation FFMpegWriter = manimation.writers['ffmpeg'] metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!') writer = FFMpegWriter(fps=30, metadata=metadata) # create pose estimator pose_estimator = PoseEstimator((480, 640, 3), SESSION_PATH, PROB_MODEL_PATH) # load model pose_estimator.initialise() count = 0 while cap.isOpened(): count += 1 if count == 300: break print('count:{}'.format(str(count))) ret_val, frame = cap.read() frame = cv2.resize(frame, (640, 480)) try: # estimation start = time.time() pose_2d, visibility, pose_3d = pose_estimator.estimate(frame) print(time.time() - start) except: continue pose_list.append(pose_3d[0]) draw_limbs(frame, pose_2d, visibility) video_writer1.write(frame) # close model pose_estimator.close() with writer.saving( fig, PROJECT_PATH + '/data/videos/test_result_3d_{}.mp4'.format(str(time.time())), 100): for i in range(len(pose_list)): animate(pose_list[i]) writer.grab_frame()