def visualize_crop(i):
    example = dataset.get_example(i)
    rgb_joint_zyx = example["rgb_joint"]
    depth_joint_zyx = example["depth_joint"]
    rgb = chainercv.utils.read_image(example["rgb_path"])
    depth = dataset.read_depth(example["depth_path"])

    depth_cropped, depth_vu_cropped, depth_camera_cropped, depth_crop_param = crop(
        depth, depth_joint_zyx, dataset.depth_camera, return_param=True)

    rgb_cropped, rgb_vu_cropped, rgb_camera_cropped, rgb_crop_param = crop(
        rgb, rgb_joint_zyx, dataset.rgb_camera, return_param=True)

    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")

    vis_image(depth_cropped, ax1)
    vis_pose(depth_vu_cropped,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax1)

    vis_image(rgb_cropped, ax2)
    vis_pose(rgb_vu_cropped,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax2)

    # plot 3D
    # pull back depth map
    uvd = convert_depth_to_uvd(depth_cropped)
    u, v, d = uvd[:, ::10, ::10]
    u = u.reshape(-1, 1)
    v = v.reshape(-1, 1)
    z = d.reshape(-1, 1)
    vu = np.concatenate([v, u], axis=1)
    zyx = depth_camera_cropped.vu2zyx(vu, z)
    vis_point(zyx, ax=ax3)
    zyx_com = depth_crop_param["zyx_com"]
    vis_pose(depth_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax3)
    vis_pose(rgb_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax4)
    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
def visualize_resize_contain(i):
    example = dataset.get_example(i)
    rgb_joint_zyx = example["rgb_joint"]
    depth_joint_zyx = example["depth_joint"]
    rgb = chainercv.utils.read_image(example["rgb_path"])
    depth = dataset.read_depth(example["depth_path"])

    depth_resized, depth_vu_resized, depth_camera_resized = resize_contain(
        depth, depth_joint_zyx, dataset.depth_camera, size=(crop2dH, crop2dW))

    rgb_resized, rgb_vu_resized, rgb_camera_resized = resize_contain(
        rgb, rgb_joint_zyx, dataset.rgb_camera, size=(crop2dH, crop2dW))

    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")

    vis_image(depth_resized, ax1)
    vis_pose(depth_vu_resized,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax1)

    vis_image(rgb_resized, ax2)
    vis_pose(rgb_vu_resized,
             EDGES,
             point_color=POINT_COLOR,
             edge_color=EDGE_COLOR,
             ax=ax2)

    # plot 3D
    # pull back depth map
    uvd = convert_depth_to_uvd(depth_resized)
    u, v, d = uvd[:, ::10, ::10]
    u = u.reshape(-1, 1)
    v = v.reshape(-1, 1)
    z = d.reshape(-1, 1)
    uv = np.concatenate([u, v], axis=1)
    xyz = depth_camera_resized.uv2xyz(uv, z)

    ax3.scatter(*xyz.transpose(), alpha=0.4)
    vis_pose(depth_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax3)
    vis_pose(rgb_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax4)
    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
def visualize_flip(idx, y_flip=False, x_flip=False):
    example = dataset.get_example(idx)
    rgb_joint_zyx = example["rgb_joint"]
    depth_joint_zyx = example["depth_joint"]
    rgb = chainercv.utils.read_image(example["rgb_path"])
    depth = dataset.read_depth(example["depth_path"])
    rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint_zyx)
    depth_vu = dataset.depth_camera.zyx2vu(depth_joint_zyx)
    rgb_vu = np.expand_dims(rgb_vu, axis=0)
    depth_vu = np.expand_dims(depth_vu, axis=0)

    depth_flipped, depth_joint_zyx_flipped, depth_vu_flipped = flip(
        depth,
        depth_joint_zyx,
        depth_vu,
        dataset.depth_camera,
        x_flip=x_flip,
        y_flip=y_flip,
    )

    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")

    vis_pose(depth_vu,
             EDGES,
             img=depth,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax1)
    debug_vu = np.expand_dims(
        dataset.depth_camera.zyx2vu(depth_joint_zyx_flipped), axis=0)
    vis_pose(debug_vu,
             EDGES,
             img=depth_flipped,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax2)
    # plot 3D
    vis_pose(depth_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax3)
    vis_pose(depth_joint_zyx_flipped,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax4)
    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
def predict_ppn(model, dataset, hand_param):
    fig = plt.figure(figsize=(10, 5))
    ax = fig.add_subplot(121)
    ax3 = fig.add_subplot(122, projection="3d")
    logger.info("> use ppn")
    idx = np.random.randint(0, len(dataset))
    example = dataset.get_example(idx)
    image = example["rgb"]
    gt_kp_zyx = example["rgb_joint"]
    gt_kp_vu = example["rgb_camera"].zyx2vu(example["rgb_joint"])
    gt_kp_zyx = gt_kp_zyx - gt_kp_zyx[ROOT_IDX]
    scaleH = hand_param["inH"] / model.outsize[0]
    scaleW = hand_param["inW"] / model.outsize[1]

    kp_vu, kp_zyx = get_result_ppn(model, dataset, hand_param, idx)

    color_map = hand_param["color_map"]
    keypoint_names = hand_param["keypoint_names"]
    edges = hand_param["edges"]
    point_color = [color_map[k] for k in keypoint_names]
    edge_color = [color_map[s, t] for s, t in edges]

    kp_zyx = kp_zyx
    vis_pose(kp_vu, edges, image, point_color, edge_color, ax=ax)
    vis_pose(
        kp_zyx,
        edges,
        point_color=point_color,
        edge_color=edge_color,
        ax=ax3,
    )

    vis_pose(gt_kp_vu,
             edges,
             image,
             point_color,
             edge_color=[(0, 0, 0) for e in edges],
             ax=ax)
    vis_pose(
        gt_kp_zyx,
        edges,
        point_color=point_color,
        edge_color=[(0, 0, 0) for _ in edges],
        ax=ax3,
    )

    for i in range(model.outsize[0]):
        ax.plot([0, hand_param["inH"]], [i * scaleW, i * scaleW], color='w')
    for i in range(model.outsize[1]):
        ax.plot([i * scaleH, i * scaleH], [0, hand_param["inW"]], color='w')

    ax3.set_xlabel("x")
    ax3.set_ylabel("y")
    ax3.set_zlabel("z")
    ax3.view_init(-80, -90)
    plt.show()
def predict_heatmap(model, dataset, hand_param):
    from model_rhd import variable_rodrigues
    fig = plt.figure(figsize=(10, 5))
    ax = fig.add_subplot(121)
    ax3 = fig.add_subplot(122, projection="3d")

    keypoint_names = hand_param["keypoint_names"]
    edges = hand_param["edges"]
    color_map = hand_param["color_map"]

    idx = np.random.randint(0, len(dataset))
    example = dataset.get_example(idx)
    inp = example["rgb"] / 255
    with chainer.using_config('train', False):
        heatmaps = model.pose.forward(np.expand_dims(inp, axis=0))
        # pred_canonical_joint = model.prior(heatmaps).reshape(-1, 3)
        # pred_R = variable_rodrigues(model.rot(heatmaps))
        heatmaps = heatmaps[-1].array.squeeze()
        # pred_canonical_joint = pred_canonical_joint.array
        # pred_R = pred_R.array
        pts2d = []
        for i in range(len(heatmaps)):
            hm = heatmaps[i]
            y, x = np.unravel_index(np.argmax(hm), hm.shape)
            pts2d.append([
                hand_param["inH"] / hm.shape[0] * y,
                hand_param["inW"] / hm.shape[1] * x
            ])
        pts2d = np.array(pts2d)
        point_color = [color_map[k] for k in keypoint_names]
        edge_color = [color_map[(s, t)] for (s, t) in edges]
        vis_pose(pts2d,
                 edges,
                 img=example["rgb"],
                 point_color=point_color,
                 edge_color=edge_color,
                 ax=ax)
        # vis_pose(
        #    pred_canonical_joint, edges,
        #    point_color=point_color,
        #    edge_color=edge_color,
        #    ax=ax3
        # )
    plt.show()
def predict_ganerated(model, dataset, hand_param):
    fig = plt.figure(figsize=(10, 5))
    ax = fig.add_subplot(121)
    ax3 = fig.add_subplot(122, projection="3d")

    keypoint_names = hand_param["keypoint_names"]
    edges = hand_param["edges"]
    color_map = hand_param["color_map"]

    idx = np.random.randint(0, len(dataset))
    example = dataset.get_example(idx)
    inp = example["rgb"] / 255
    with chainer.using_config('train', False):
        heatmaps = model.predict(np.expand_dims(inp, axis=0))
        heatmaps = heatmaps[-1].array.squeeze()
        pts2d = []
        for i in range(len(heatmaps)):
            hm = heatmaps[i]
            logger.info(hm.shape)
            y, x = np.unravel_index(np.argmax(hm), hm.shape)
            print(y, x)
            y = hand_param["inH"] / hm.shape[0] * y
            x = hand_param["inW"] / hm.shape[1] * x
            pts2d.append([y, x])
        pts2d = np.array(pts2d)
        point_color = [color_map[k] for k in keypoint_names]
        edge_color = [color_map[(s, t)] for (s, t) in edges]
        vis_pose(pts2d,
                 edges,
                 img=example["rgb"],
                 point_color=point_color,
                 edge_color=edge_color,
                 ax=ax)
        # vis_pose(
        #    pred_canonical_joint, edges,
        #    point_color=point_color,
        #    edge_color=edge_color,
        #    ax=ax3
        # )
    plt.show()
def visualize_dataset(idx):
    example = dataset.get_example(idx)
    print(example.keys())
    rgb_joint_zyx = example["rgb_joint"]
    depth_joint_zyx = example["depth_joint"]
    rgb = chainercv.utils.read_image(example["rgb_path"])
    depth = dataset.read_depth(example["depth_path"])
    rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint_zyx)
    depth_vu = dataset.depth_camera.zyx2vu(depth_joint_zyx)

    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    ax4 = fig.add_subplot(224, projection="3d")

    vis_pose(depth_vu,
             EDGES,
             img=depth,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax1)
    vis_pose(rgb_vu,
             EDGES,
             img=rgb,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax2)

    vis_pose(depth_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax3)
    vis_pose(rgb_joint_zyx,
             indices=EDGES,
             edge_color=EDGE_COLOR,
             point_color=POINT_COLOR,
             ax=ax4)
    for ax in [ax3, ax4]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
def predict_sample(model, dataset, hand_param):
    transformed_dataset = TransformDataset(dataset, model.encode)
    idx = np.random.randint(0, len(transformed_dataset))
    image, gt_2dj, gt_3dj = transformed_dataset.get_example(idx)
    example = dataset.get_example(idx)

    vis_vu = gt_2dj * np.array([[hand_param["inH"], hand_param["inW"]]])
    pred_j = model.predict(np.array([image], dtype=np.float32))
    with chainer.using_config('train', False):
        loss = model.forward(
            np.expand_dims(image, axis=0),
            np.expand_dims(gt_3dj, axis=0),
            np.expand_dims(gt_2dj, axis=0),
        )
    pred_j = pred_j.array.reshape(hand_param["n_joints"], -1)
    dim = pred_j.shape[-1]
    if dim == 5:
        pred_3d = pred_j[:, :3]
        pred_2d = pred_j[:, 3:]
        pred_2d = pred_2d * np.array([[hand_param["inH"], hand_param["inW"]]])
    else:
        pred_3d = pred_j
    logger.info("> {}".format(pred_j))
    logger.info("> loss {}".format(loss))
    logger.info("> visualize pred_joint")
    plot_direction = "horizontal"
    if plot_direction == "horizontal":
        space = (1, 2)
        figsize = (10, 5)
    else:
        space = (2, 1)
        figsize = (5, 10)
    z_half = hand_param["cube"][0] / 2
    pred_3d = z_half * pred_3d
    gt_3dj = example["rgb_joint"] if hand_param["use_rgb"] else example[
        "depth_joint"]
    gt_3dj = gt_3dj - calc_com(gt_3dj)
    distance = np.sqrt(np.sum(np.square(pred_3d - gt_3dj), axis=1)).mean()
    logger.info("> mean distance {:0.2f}".format(distance))
    fig = plt.figure(figsize=figsize)
    fig.suptitle("mean distance = {:0.2f}".format(distance))
    ax1 = fig.add_subplot(*space, 1)
    ax1.set_title("result 2D")
    ax2 = fig.add_subplot(*space, 2, projection="3d")
    ax2.set_title("result 3D")
    color_map = hand_param["color_map"]
    keypoint_names = hand_param["keypoint_names"]
    edges = hand_param["edges"]
    color = [color_map[k] for k in keypoint_names]
    pred_color = [color_map[s, t] for s, t in edges]
    gt2_color = [[255, 255, 255] for k in keypoint_names]
    gt3_color = [[50, 50, 50] for k in keypoint_names]
    if hand_param["use_rgb"]:
        image = denormalize_rgb(image)
        chainercv.visualizations.vis_image(image, ax=ax1)
    else:
        image = image.squeeze()
        ax1.imshow(image, cmap="gray")
    vis_pose(vis_vu, edges, point_color=color, edge_color=gt2_color, ax=ax1)
    vis_pose(pred_2d, edges, point_color=color, edge_color=pred_color, ax=ax1)
    vis_pose(gt_3dj, edges, point_color=color, edge_color=gt3_color, ax=ax2)
    if dim != 2:
        vis_pose(pred_3d,
                 edges,
                 point_color=color,
                 edge_color=pred_color,
                 ax=ax2)
    # set layout
    for ax in [ax2]:
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        ax.set_zlabel("z")
        ax.view_init(-65, -90)
    # show
    plt.show()
Ejemplo n.º 9
0
def main():
    args = parse_args()
    logging.basicConfig(level=logging.INFO)

    config = configparser.ConfigParser()

    path = os.path.expanduser(
        os.path.join(args.trained, "result", "config.ini"))
    logger.info("read {}".format(path))
    config.read(path, 'UTF-8')

    logger.info("setup devices")
    chainer.global_config.autotune = True
    chainer.config.cudnn_fast_batch_normalization = True

    dataset_type = config["dataset"]["type"]
    use_rgb = config.getboolean("dataset", "use_rgb")
    use_depth = config.getboolean("dataset", "use_depth")
    assert use_rgb
    assert use_rgb ^ use_depth, "XOR(use_rgb, use_depth) must be True"
    hand_param = select_dataset(config, return_data=["hand_param"])
    model_path = os.path.expanduser(
        os.path.join(args.trained, "result", "bestmodel.npz"))

    logger.info("> restore model")
    model = select_model(config, hand_param)
    logger.info("> model.device = {}".format(model.device))

    logger.info("> restore models")
    chainer.serializers.load_npz(model_path, model)

    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(111)

    color_map = hand_param["color_map"]
    color = [color_map[k] for k in hand_param["keypoint_names"]]
    edge_color = [color_map[s, t] for s, t in hand_param["edges"]]
    pred_color = [[255, 255, 255] for k in hand_param["keypoint_names"]]

    cap = cv2.VideoCapture(args.camera)
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    try:
        while cap.isOpened():
            # Wait for a coherent pair of frames: depth and color
            ret_val, image = cap.read()
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = image.transpose(2, 0, 1).astype(np.float32)
            _, cH, cW = image.shape
            sz = min(cH, cW)
            image = chainercv.transforms.center_crop(image, (sz, sz))
            image = chainercv.transforms.resize(
                image, (hand_param["inH"], hand_param["inW"]))
            with chainer.using_config('train', False):
                heatmaps = model.pose.forward(
                    np.expand_dims(normalize_rgb(image), axis=0))
                heatmaps = heatmaps[-1].array.squeeze()
                pts2d = []
                for i in range(len(heatmaps)):
                    hm = heatmaps[i]
                    y, x = np.unravel_index(np.argmax(hm), hm.shape)
                    pts2d.append([8 * y, 8 * x])
                joint2d = np.array(pts2d)
            color_map = hand_param["color_map"]
            keypoint_names = hand_param["keypoint_names"]
            edges = hand_param["edges"]
            color = [color_map[k] for k in keypoint_names]
            edge_color = [color_map[s, t] for s, t in edges]
            vis_pose(np.array(joint2d), edges, image, color, edge_color, ax=ax)
            fig.canvas.draw()
            buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
            buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
            buf = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
            # buf = cv2.resize(buf, (dW, dH))
            ax.clear()
            images = np.hstack((buf, ))

            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)
            if cv2.waitKey(1) == 27:
                break
            cv2.waitKey(1)
    finally:
        print("Exit")
Ejemplo n.º 10
0
def main():
    args = parse_args()
    logging.basicConfig(level=logging.INFO)

    config = configparser.ConfigParser()

    path = os.path.expanduser(os.path.join(args.trained, "src", "config.ini"))
    logger.info("read {}".format(path))
    config.read(path, 'UTF-8')

    logger.info("setup devices")
    chainer.global_config.autotune = True
    chainer.config.cudnn_fast_batch_normalization = True

    dataset_type = config["dataset"]["type"]
    use_rgb = config.getboolean("dataset", "use_rgb")
    use_depth = config.getboolean("dataset", "use_depth")
    assert use_rgb
    assert use_rgb ^ use_depth, "XOR(use_rgb, use_depth) must be True"
    hand_param = select_dataset(config, return_data=["hand_param"])
    model_path = os.path.expanduser(os.path.join(args.trained,
                                                 "bestmodel.npz"))

    logger.info("> restore model")
    model = create_model(config, hand_param)
    logger.info("> model.device = {}".format(model.device))
    chainer.serializers.load_npz(model_path, model)

    plot_direction = "horizontal"
    if plot_direction == "horizontal":
        space = (1, 2)
        figsize = (10, 5)
    else:
        space = (2, 1)
        figsize = (5, 10)

    fig = plt.figure(figsize=figsize)
    ax1 = fig.add_subplot(*space, 1)
    ax3 = fig.add_subplot(*space, 2, projection="3d")

    color_map = hand_param["color_map"]
    color = [color_map[k] for k in hand_param["keypoint_names"]]
    edge_color = [color_map[s, t] for s, t in hand_param["edges"]]
    pred_color = [[255, 255, 255] for k in hand_param["keypoint_names"]]

    cap = cv2.VideoCapture(args.camera)
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    try:
        while cap.isOpened():
            # Wait for a coherent pair of frames: depth and color
            ret_val, image = cap.read()
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = image.transpose(2, 0, 1).astype(np.float32)
            _, cH, cW = image.shape
            sz = min(cH, cW)
            image = chainercv.transforms.center_crop(image, (sz, sz))
            image = chainercv.transforms.resize(
                image, (hand_param["inH"], hand_param["inW"]))
            pred_j = model.predict(np.expand_dims(normalize_rgb(image),
                                                  axis=0))
            pred_j = pred_j.array.reshape(hand_param["n_joints"], -1)
            dim = pred_j.shape[-1]
            if dim == 5:
                pred_3d = pred_j[:, :3]
                pred_2d = pred_j[:, 3:]
                pred_2d = pred_2d * np.array(
                    [[hand_param["inH"], hand_param["inW"]]])
            else:
                pred_3d = pred_j

            vis_pose(pred_2d,
                     hand_param["edges"],
                     img=image,
                     point_color=color,
                     edge_color=pred_color,
                     ax=ax1)
            if dim != 2:
                vis_pose(pred_3d,
                         hand_param["edges"],
                         point_color=color,
                         edge_color=edge_color,
                         ax=ax3)
            # set layout
            for ax in [ax3]:
                ax.set_xlabel("x")
                ax.set_ylabel("y")
                ax.set_zlabel("z")
                ax.view_init(-65, -90)

            fig.canvas.draw()
            buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
            buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
            buf = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
            # buf = cv2.resize(buf, (dW, dH))
            ax1.clear()
            ax3.clear()

            images = np.hstack((buf, ))

            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)
            if cv2.waitKey(1) == 27:
                break
            cv2.waitKey(1)
    finally:
        print("Exit")
Ejemplo n.º 11
0
def main():
    args = parse_args()
    logging.basicConfig(level=logging.INFO)

    config = configparser.ConfigParser()

    path = os.path.expanduser(
        os.path.join(args.trained, "result", "config.ini"))
    logger.info("read {}".format(path))
    config.read(path, 'UTF-8')

    logger.info("setup devices")
    chainer.global_config.autotune = True
    chainer.config.cudnn_fast_batch_normalization = True

    dataset_type = config.get("dataset", "type")
    use_rgb = config.getboolean("dataset", "use_rgb")
    use_depth = config.getboolean("dataset", "use_depth")
    assert use_rgb ^ use_depth, "XOR(use_rgb, use_depth) must be True"
    cube = parse_cube(config[dataset_type]["cube"], style="DHW")
    hand_param = select_dataset(config, return_data=["hand_param"])
    model_path = os.path.expanduser(
        os.path.join(args.trained, "result", "bestmodel.npz"))

    logger.info("> restore model")
    model = select_model(config, hand_param)
    print(model)
    logger.info("> model.device = {}".format(model.device))
    chainer.serializers.load_npz(model_path, model)

    plot_direction = "horizontal"
    if plot_direction == "horizontal":
        space = (1, 3)
        figsize = (15, 5)
    else:
        space = (3, 1)
        figsize = (5, 15)

    fig = plt.figure(figsize=figsize)
    ax1 = fig.add_subplot(*space, 1)
    ax2 = fig.add_subplot(*space, 2)
    ax3 = fig.add_subplot(*space, 3, projection="3d")

    color_map = hand_param["color_map"]
    color = [color_map[k] for k in hand_param["keypoint_names"]]
    edge_color = [color_map[s, t] for s, t in hand_param["edges"]]
    pred_color = [[255, 255, 255] for k in hand_param["keypoint_names"]]

    try:
        while True:
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            #color_frame = frames.get_color_frame()
            if not depth_frame:
                continue
            # if not color_frame:
            #    continue

            # Convert images to numpy arrays
            depth_image = np.asanyarray(depth_frame.get_data())
            #color_image = np.asanyarray(color_frame.get_data())
            logger.info("> depth_image {} {} {}".format(
                depth_image.min(), depth_image.max(), depth_image.dtype))
            dH, dW = depth_image.shape
            #cH, cW, _ = color_image.shape

            size = 448  # hard coded
            dhslice = slice(dH // 2 - size // 2, dH // 2 - size // 2 + size)
            dwslice = slice(dW // 2 - size // 2, dW // 2 - size // 2 + size)
            depth_image = depth_image[dhslice, dwslice]

            inp = chainercv.transforms.center_crop(
                np.expand_dims(depth_image, axis=0),
                (224, 224),
                copy=True,
            ).astype(np.float32)
            #inp = chainercv.transforms.resize(inp, (224, 224))
            _, inpH, inpW = inp.shape
            z_com = inp[0, inpH // 2, inpW // 2]

            logger.info("> com size {} {}".format(z_com,
                                                  hand_param["cube"][0]))

            inp = normalize_depth(
                inp,
                z_com=z_com,
                z_size=hand_param["cube"][0],
            )
            logger.info("> normalized depth {} {} {}".format(
                inp.min(), inp[0, inpH // 2, inpW // 2], inp.max()))

            inp = chainercv.transforms.resize(
                inp, (hand_param["inH"], hand_param["inW"]))

            ax2.imshow(inp.squeeze(), cmap="gray", vmin=-1, vmax=1)
            pred_j = model.predict(
                np.expand_dims(inp, axis=0).astype(np.float32))

            pred_j = pred_j.array.reshape(hand_param["n_joints"], -1)
            dim = pred_j.shape[-1]
            if dim == 5:
                pred_3d = pred_j[:, :3]
                pred_2d = pred_j[:, 3:]
                pred_2d = pred_2d * np.array(
                    [[hand_param["inH"], hand_param["inW"]]])
            else:
                pred_3d = pred_j

            ax1.imshow(np.asarray(depth_image), cmap="gray")
            vis_pose(pred_2d,
                     hand_param["edges"],
                     point_color=color,
                     edge_color=pred_color,
                     ax=ax2)
            if dim != 2:
                vis_pose(pred_3d,
                         hand_param["edges"],
                         point_color=color,
                         edge_color=edge_color,
                         ax=ax3)
            # set layout
            for ax in [ax3]:
                ax.set_xlabel("x")
                ax.set_ylabel("y")
                ax.set_zlabel("z")
                ax.view_init(-65, -90)
            ax2.set_xlim(0, hand_param["inW"])
            ax2.set_ylim(0, hand_param["inH"])
            ax2.invert_yaxis()
            fig.canvas.draw()
            buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
            buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
            buf = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
            # buf = cv2.resize(buf, (dW, dH))
            ax1.clear()
            ax2.clear()
            ax3.clear()

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            # depth_colormap = cv2.applyColorMap(
            #    cv2.convertScaleAbs(depth_image, alpha=0.03),
            #    cv2.COLORMAP_JET
            # )

            # Stack both images horizontally
            # images = np.hstack((color_image, depth_colormap))
            # images = np.hstack((buf, depth_colormap))
            images = np.hstack((buf, ))

            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)
            if cv2.waitKey(1) == 27:
                break
            cv2.waitKey(1)
    finally:
        # Stop streaming
        pipeline.stop()