print('Frame-to-model icp')
            T_world_to_cam = np.linalg.inv(T_cam_to_world)
            T_world_to_cam = icp(m.points[::down_factor],
                                 m.normals[::down_factor],
                                 vertex_map,
                                 normal_map,
                                 intrinsic,
                                 T_world_to_cam,
                                 debug_association=False)
            T_cam_to_world = np.linalg.inv(T_world_to_cam)
        print('Point-based fusion')
        m.fuse(vertex_map, normal_map, color_map, intrinsic, T_cam_to_world, time=i)

        # A shift is required as gt starts from 1
        T_gt.append(gt_poses[i - 1])
        T_est.append(T_cam_to_world)

    global_pcd = o3d_utility.make_point_cloud(m.points, colors=m.colors, normals=m.normals)
    o3d.visualization.draw_geometries([global_pcd.transform(o3d_utility.flip_transform)])

    # Visualize the trajectories
    T_gt = np.stack(T_gt)
    T_est = np.stack(T_est)
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    pos_gt = T_gt[:, :3, 3]
    pos_est = T_est[:, :3, 3]
    ax.plot3D(pos_gt[:, 0], pos_gt[:, 1], pos_gt[:, 2])
    ax.plot3D(pos_est[:, 0], pos_est[:, 1], pos_est[:, 2])
    plt.show()
예제 #2
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'path', help='path to the dataset folder containing rgb/ and depth/')
    args = parser.parse_args()

    # Load intrinsics and gt poses for evaluation
    intrinsic_struct = o3d.io.read_pinhole_camera_intrinsic('intrinsics.json')
    intrinsic = np.array(intrinsic_struct.intrinsic_matrix)
    indices, gt_poses = load_gt_poses(
        os.path.join(args.path, 'livingRoom2.gt.freiburg'))
    depth_scale = 5000.0

    depth_path = os.path.join(args.path, 'depth')
    normal_path = os.path.join(args.path, 'normal')
    os.makedirs(normal_path, exist_ok=True)

    # Generate normal maps
    # WARNING: please start from index 1, as ground truth poses are provided starting from index 1.
    for i in indices:
        print('Preprocessing frame {:03d}'.format(i))
        depth = np.asarray(o3d.io.read_image('{}/{}.png'.format(
            depth_path, i))) / depth_scale
        vertex_map = transforms.unproject(depth, intrinsic)

        pcd = o3d_utility.make_point_cloud(vertex_map.reshape((-1, 3)))
        pcd.estimate_normals()

        normal_map = np.asarray(pcd.normals).reshape(vertex_map.shape)
        np.save('{}/{}.npy'.format(normal_path, i), normal_map)