Example #1
0
def test_adapter_match():
    raw = read_livecap_model(*config.read_livecap_model_args)
    model = LivecapModel.from_raw_model_data(raw)
    dataset = LiveCapAdapter(config.original_dataset_path, model,
                             config.original_camera_path,
                             get_vibe_to_original_joints())

    cam_params = np.load(config.intrinsic_params_path)
    fx = cam_params['fx']
    fy = cam_params['fy']
    u = cam_params['u']
    v = cam_params['v']
    h = cam_params['h']
    w = cam_params['w']

    renderer = DebugRenderer(w, h, model, dataset.joint_indices)

    # project points with the original with the dataset parameters
    camera = Camera(config.camera_to_world_matrix, h, w, fx, fy, u, v)
    datapoint = dataset[0]
    kp3d = datapoint.kp_3d
    kp3d_translation = datapoint.kp_3d_translation
    p3d = model.get_p3d()[dataset.joint_indices] + kp3d_translation
    renderer.debug_3d(p3d, kp3d, kp3d_translation)
    kp3d = kp3d + kp3d_translation
    kp2d = camera.project(kp3d)
    kp2d_true = datapoint.kp_2d
    p2d = camera.project(p3d)
    frame = datapoint.frame
    draw_pixels_on_image(frame, p2d, 'red')
    draw_pixels_on_image(frame, kp2d, 'blue')
    draw_pixels_on_image(frame, kp2d_true, 'green')
    view_image_blocking(frame)
Example #2
0
def test_idt():
    for i in sample_indices:
        entry = dataset[i]
        silhouette = entry.silhouette
        view_image_blocking(silhouette, 'silhouette')
        idt = image_distance_transform(silhouette)
        print('idt stats: ', idt.shape, idt.dtype, idt.max())
        view_image_blocking(idt, 'idt')
def test_kp_2d_are_pixels():
    ds = LiveCapDataset(config.livecap_dataset_path)
    indices = [10, 20, 100, 500]
    for i in indices:
        item = ds[i]
        frame = item.frame
        kp_2d = item.vibe.kp_2d
        draw_pixels_on_image(frame, kp_2d)
        view_image_blocking(frame, "kp_2d on frame")
Example #4
0
def test_keypoints():
    print(f'the kp indices {dataset.kp_indices.shape}, {dataset.kp_indices}')
    print(f'the joints indices {dataset.joint_indices.shape}, {dataset.joint_indices}')
    for i in sample_indices:
        entry = dataset[i]
        print(f'kp2d shape: {entry.kp_2d.shape}')
        image = np.zeros_like(entry.frame)
        draw_pixels_on_image(image, entry.kp_2d)
        view_image_blocking(image)
        print(f'kp3d shape: {entry.kp_3d.shape}')
        renderer.draw_skeleton(entry.kp_3d, show_both=True)
Example #5
0
def test_find_contour_vertices():
    model, dataset, camera, renderer, initial_pose = load_optimization_settings(
        renderer=True)
    model.apply_pose_vector(initial_pose)

    cv, normals = get_contour_vertices(model, camera, 50)
    print(normals.shape)
    print(cv.shape)
    image = np.zeros((camera.image_h, camera.image_w, 3), dtype=np.uint8)
    draw_pixels_on_image(image, cv)
    view_image_blocking(image)
Example #6
0
def test_image(renderer: Renderer):
    print('test image...')

    print('stress test...')
    start = time.perf_counter()
    i = 0
    for _ in range(NUM_ITERATIONS):
        if i > 0 and i % PRINT_EVERY == 0:
            print(f'{i} out of {2 * NUM_ITERATIONS}')
        image = renderer.draw_skeleton()
        i += 1
    for _ in range(NUM_ITERATIONS):
        if i > 0 and i % PRINT_EVERY == 0:
            print(f'{i} out of {2 * NUM_ITERATIONS}')
        image = renderer.draw_model()
        i += 1

    end = time.perf_counter()
    print(f'time={(end - start)} seconds.')

    image = renderer.draw_skeleton()
    view_image_blocking(image)
    image = renderer.draw_model()
    view_image_blocking(image)
    image = renderer.draw_model_silhouette()
    view_image_blocking(image)
model, dataset, camera, renderer, initial_pose_vector = load_optimization_settings(
)
blocking_renderer = load_renderer(model,
                                  camera,
                                  dataset,
                                  use_scale=True,
                                  mode='blocking',
                                  filename=None)

# initialization
model.apply_pose_vector(initial_pose_vector)

# validating on the first entry
entry = dataset[0]
# draw the frame
view_image_blocking(entry.frame, 'frame')
# draw the 3d key points, and the the model's skeleton
blocking_renderer.draw_skeleton(entry.kp_3d + entry.kp_3d_translation,
                                show_both=True)
# project the 3d points and compare the 2d points of vibe
p_3d = model.get_p3d()
projected = camera.project(p_3d)
image = np.zeros((camera.image_h, camera.image_w, 3), dtype=np.uint8)
draw_pixels_on_image(image, entry.kp_2d, 'red')
draw_pixels_on_image(image, projected, 'blue')
view_image_blocking(image, 'red=vibe, blue=projected 3d points')
image += renderer.draw_model()
view_image_blocking(image, 'on top of the rendered model')

for i in range(10):
    initial_pose_vector[2] = 3 + 4 * (i + 1) / 10
Example #8
0
def test_blocking(renderer: Renderer):
    print('test blocking...')
    cam, image = renderer.draw_skeleton()
    view_image_blocking(image)
    cam, image = renderer.draw_model()
    view_image_blocking(image)