Beispiel #1
0
def visualize_dataset(dataset_dir):
    from matplotlib import pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  # NOQA
    from chainercv.utils import read_image
    from pose.visualizations import vis_point, vis_edge

    dataset = GANeratedBaseDataset(dataset_dir, debug=False, mode="train")
    idx = np.random.choice(len(dataset))
    print(idx, len(dataset))
    example = dataset.get_example(idx)
    rgb_joint = example["rgb_joint"]
    rgb_path = example["rgb_path"]
    rgb = read_image(rgb_path)
    fig = plt.figure(figsize=(5, 10))
    ax2 = fig.add_subplot(211)
    ax4 = fig.add_subplot(212, projection="3d")
    color = [COLOR_MAP[k] for k in KEYPOINT_NAMES]
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    rgb_vu = example["rgb_camera"].zyx2vu(rgb_joint)
    vis_point(rgb_vu, img=rgb, color=color, ax=ax2)
    vis_edge(rgb_vu, indices=EDGES, color=edge_color, ax=ax2)

    vis_point(rgb_joint, color=color, ax=ax4)
    vis_edge(rgb_joint, indices=EDGES, color=edge_color, ax=ax4)

    for ax in [ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)

    plt.show()
Beispiel #2
0
def visualize_dataset(dataset_dir):
    from matplotlib import pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  # NOQA
    from pose.visualizations import vis_point, vis_edge

    dataset = MSRA15BaseDataset(dataset_dir, debug=True, mode="train")
    example = dataset.get_example(128)
    depth_joint = example["depth_joint"]
    depth_path = example["depth_path"]
    depth = dataset.read_depth(depth_path)
    fig = plt.figure(figsize=(5, 10))
    ax2 = fig.add_subplot(211)
    ax4 = fig.add_subplot(212, projection="3d")
    color = [COLOR_MAP[k] for k in KEYPOINT_NAMES]
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    depth_vu = DEPTH_CAMERA_INTR.zyx2vu(depth_joint)

    vis_point(depth_vu, img=depth, color=color, ax=ax2)
    vis_edge(depth_vu, indices=EDGES, color=edge_color, ax=ax2)

    vis_point(depth_joint, color=color, ax=ax4)
    vis_edge(depth_joint, indices=EDGES, color=edge_color, ax=ax4)

    for ax in [ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)

    plt.show()
def visualize_dataset(dataset_dir):
    from matplotlib import pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  # NOQA
    from chainercv.utils import read_image
    from pose.visualizations import vis_point, vis_edge

    dataset = MultiViewBaseDataset(dataset_dir, debug=False)
    print(len(dataset))
    i = np.random.choice(len(dataset))
    example = dataset.get_example(i)
    rgb_path = example["rgb_path"]
    rgb_joint = example["rgb_joint"]
    print(rgb_path)
    img = read_image(rgb_path)
    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(211)
    ax3 = fig.add_subplot(212, projection="3d")
    rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint)
    color = [COLOR_MAP[k] for k in STANDARD_KEYPOINT_NAMES]
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    vis_point(rgb_vu, img=img, color=color, ax=ax1)
    vis_edge(rgb_vu, indices=EDGES, color=edge_color, ax=ax1)

    vis_point(rgb_joint, color=color, ax=ax3)
    vis_edge(rgb_joint, indices=EDGES, color=edge_color, ax=ax3)

    for ax in [ax3]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)

    plt.show()
def visualize_crop(i):
    example = dataset.get_example(i)
    rgb_joint_zyx = example["rgb_joint"]
    depth_joint_zyx = example["depth_joint"]
    rgb = chainercv.utils.read_image(example["rgb_path"])
    depth = dataset.read_depth(example["depth_path"])

    depth_cropped, depth_vu_cropped, depth_camera_cropped, depth_crop_param = crop(
        depth, depth_joint_zyx, dataset.depth_camera, return_param=True)

    rgb_cropped, rgb_vu_cropped, rgb_camera_cropped, rgb_crop_param = crop(
        rgb, rgb_joint_zyx, dataset.rgb_camera, return_param=True)

    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")

    vis_image(depth_cropped, ax1)
    vis_pose(depth_vu_cropped,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax1)

    vis_image(rgb_cropped, ax2)
    vis_pose(rgb_vu_cropped,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax2)

    # plot 3D
    # pull back depth map
    uvd = convert_depth_to_uvd(depth_cropped)
    u, v, d = uvd[:, ::10, ::10]
    u = u.reshape(-1, 1)
    v = v.reshape(-1, 1)
    z = d.reshape(-1, 1)
    vu = np.concatenate([v, u], axis=1)
    zyx = depth_camera_cropped.vu2zyx(vu, z)
    vis_point(zyx, ax=ax3)
    zyx_com = depth_crop_param["zyx_com"]
    vis_pose(depth_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax3)
    vis_pose(rgb_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax4)
    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
def visualize_dataset(dataset_dir):
    from matplotlib import pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  # NOQA
    from chainercv.utils import read_image
    from pose.visualizations import vis_point, vis_edge

    dataset = NYUBaseDataset(dataset_dir, debug=True, mode="train")
    example = dataset.get_example(0)
    camera_joint = example["rgb_joint"]
    depth_joint = example["depth_joint"]
    rgb_path = example["rgb_path"]
    depth_path = example["depth_path"]
    logger.info("> read {}".format(rgb_path))
    img = read_image(rgb_path)
    depth = dataset.read_depth(depth_path)
    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")
    rgb_vu = RGB_CAMERA_INTR.zyx2vu(camera_joint)
    color = [COLOR_MAP[k] for k in STANDARD_KEYPOINT_NAMES]
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    depth_vu = DEPTH_CAMERA_INTR.zyx2vu(depth_joint)
    vis_point(rgb_vu, img=img, color=color, ax=ax1)
    vis_edge(rgb_vu, indices=EDGES, color=edge_color, ax=ax1)

    vis_point(depth_vu, img=depth, color=color, ax=ax2)
    vis_edge(depth_vu, indices=EDGES, color=edge_color, ax=ax2)

    vis_point(camera_joint, color=color, ax=ax3)
    vis_edge(camera_joint, indices=EDGES, color=edge_color, ax=ax3)

    vis_point(depth_joint, color=color, ax=ax4)
    vis_edge(depth_joint, indices=EDGES, color=edge_color, ax=ax4)

    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)

    plt.show()