def test_image(renderer: Renderer): print('test image...') print('stress test...') start = time.perf_counter() i = 0 for _ in range(NUM_ITERATIONS): if i > 0 and i % PRINT_EVERY == 0: print(f'{i} out of {2 * NUM_ITERATIONS}') image = renderer.draw_skeleton() i += 1 for _ in range(NUM_ITERATIONS): if i > 0 and i % PRINT_EVERY == 0: print(f'{i} out of {2 * NUM_ITERATIONS}') image = renderer.draw_model() i += 1 end = time.perf_counter() print(f'time={(end - start)} seconds.') image = renderer.draw_skeleton() view_image_blocking(image) image = renderer.draw_model() view_image_blocking(image) image = renderer.draw_model_silhouette() view_image_blocking(image)
def test_model_rendering(): raw = read_livecap_model(*config.read_livecap_model_args) model = LivecapModel.from_raw_model_data(raw) renderer = Renderer('blocking', model, show_axes=True) renderer.draw_model(with_texture=True) renderer.draw_skeleton() motion = read_motion(config.motion_path) # each row is a motion plan for i in range(0, len(motion), 100): vector = motion[i] vector[:3] *= model.scale model.apply_pose_vector(vector) renderer.draw_skeleton() renderer.draw_model(with_texture=True)
def test_video(renderer: Renderer): print('test video...') start = time.perf_counter() i = 0 for _ in range(NUM_ITERATIONS): if i > 0 and i % PRINT_EVERY == 0: print(f'{i} out of {2 * NUM_ITERATIONS}') image = renderer.draw_skeleton() i += 1 for _ in range(NUM_ITERATIONS): if i > 0 and i % PRINT_EVERY == 0: print(f'{i} out of {2 * NUM_ITERATIONS}') image = renderer.draw_model() i += 1 renderer.close() end = time.perf_counter() print(f'time={(end - start)} seconds.')
model, camera=camera, joint_indices=dataset.joint_indices, **config.scale) # initialization initial_pose = model.get_initial_pose() initial_pose.root_translation = dataset.get_initial_translation() model.apply_livecap_pose(initial_pose) # validating on the first entry entry = dataset[0] # draw the frame view_image_blocking(entry.frame, 'frame') # draw the 3d key points, and the the model's skeleton blocking_renderer.draw_skeleton(entry.kp_3d + entry.kp_3d_translation, show_both=True) # project the 3d points and compare the 2d points of vibe p_3d = model.get_joints_positions() projected = camera.project(p_3d) image = np.zeros((camera.image_h, camera.image_w, 3), dtype=np.uint8) draw_pixels_on_image(image, entry.kp_2d, Color.red) draw_pixels_on_image(image, projected, Color.blue) view_image_blocking(image, 'red=vibe, blue=projected 3d points') image += renderer.draw_model() view_image_blocking(image, 'on top of the rendered model') # draw only the face vertices: face_veritces = model.get_face_vertices() projected = camera.project(face_veritces) image = np.zeros_like(image) draw_pixels_on_image(image, projected, Color.blue) view_image_blocking(image, 'blue=projected face vertices')
def test_blocking(renderer: Renderer): print('test blocking...') cam, image = renderer.draw_skeleton() view_image_blocking(image) cam, image = renderer.draw_model() view_image_blocking(image)