resolution = config['resolution']  # resolution of grid is 20 cm
 offset = config['offset']  # for each frame, we generate grids inside a 1m*1m square
 num_frames = config['num_frames']
 
 # specify the output folders
 virtual_scan_folder = '../' + config['virtual_scan_folder']
 map_file = '../' + config['map_file']
 
 # load poses
 pose_file = '../' + config['pose_file']
 poses = np.array(utils.load_poses(pose_file))
 inv_frame0 = np.linalg.inv(poses[0])
 
 # load calibrations
 calib_file = '../' + config['calib_file']
 T_cam_velo = utils.load_calib(calib_file)
 T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))
 T_velo_cam = np.linalg.inv(T_cam_velo)
 
 # convert kitti poses from camera coord to LiDAR coord
 new_poses = []
 for pose in poses:
   new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))
 poses = np.array(new_poses)
 
 # load LiDAR scans
 scan_folder = config['scan_folder']
 scan_paths = utils.load_files(scan_folder)
 
 # test for the first N scans
 if num_frames >= len(poses) or num_frames <= 0:
Ejemplo n.º 2
0
    config = yaml.safe_load(open(config_filename))

    # load setups
    plot_loc_traj = config['plot_loc_traj']
    save_evaluation_results = config['save_evaluation_results']

    # load ground truth files
    pose_file = config['pose_file']
    calib_file = config['calib_file']

    # load ground truth poses
    poses = np.array(load_poses(pose_file))
    inv_frame0 = np.linalg.inv(poses[0])

    # load calibrations
    T_cam_velo = load_calib(calib_file)
    T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))
    T_velo_cam = np.linalg.inv(T_cam_velo)

    # convert poses in LiDAR coordinate system
    new_poses = []
    for pose in poses:
        new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))
    new_poses = np.array(new_poses)
    gt_poses = new_poses

    gt_xy_raw = gt_poses[:, :2, 3]
    gt_yaw = []

    for pose in gt_poses:
        gt_yaw.append(euler_angles_from_rotation_matrix(pose[:3, :3])[2])
Ejemplo n.º 3
0
def main(config):
    """ This script can be used to create mesh maps using LiDAR scans with GT poses.
  It assumes you have the data in the kitti-like format like:

  data
  └── sequences
      └── 00
          ├── calib.txt
          ├── poses.txt
          └── velodyne
              ├── 000000.bin
              ├── 000001.bin
              └── ...

  How to run it and check a quick example:
  $ ./build_gt_map.py /path/to/config.yaml
  """
    # load scans and poses
    scan_folder = config['scan_folder']
    scan_paths = load_files(scan_folder)

    # load poses
    pose_file = config['pose_file']
    poses = load_poses(pose_file)
    inv_frame0 = np.linalg.inv(poses[0])

    # load calibrations
    # Note that if your poses are already in the LiDAR coordinate system, you
    # just need to set T_cam_velo as a 4x4 identity matrix
    calib_file = config['calib_file']
    T_cam_velo = load_calib(calib_file)
    T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))
    T_velo_cam = np.linalg.inv(T_cam_velo)

    # convert poses into LiDAR coordinate system
    new_poses = []
    for pose in poses:
        new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))
    new_poses = np.array(new_poses)
    gt_poses = new_poses

    # Use the whole sequence if -1 is specified
    n_scans = len(scan_paths) if config['n_scans'] == -1 else config['n_scans']

    # init mesh map
    mesh_file = config['mesh_file']
    if os.path.exists(mesh_file):
        exit(print('The mesh map already exists at:', mesh_file))
    global_mesh = o3d.geometry.TriangleMesh()
    cloud_map = o3d.geometry.PointCloud()

    # counter for local map
    count = 1
    local_map_size = config['local_map_size']

    # config for range images
    range_config = config['range_image']

    for idx in tqdm(range(n_scans)):
        # load the point cloud
        curren_points = load_vertex(scan_paths[idx])

        # get rid of invalid points
        dist = np.linalg.norm(curren_points[:, :3], 2, axis=1)
        curren_points = curren_points[(dist < range_config['max_range'])
                                      & (dist > range_config['min_range'])]

        # convert into open3d format and preprocess the point cloud
        local_cloud = o3d.geometry.PointCloud()
        local_cloud.points = o3d.utility.Vector3dVector(curren_points[:, :3])

        # estimated normals
        local_cloud = compute_normals_range(local_cloud,
                                            range_config['fov_up'],
                                            range_config['fov_down'],
                                            range_config['height'],
                                            range_config['width'],
                                            range_config['max_range'])

        # preprocess point clouds
        local_cloud = preprocess_cloud(local_cloud,
                                       config['voxel_size'],
                                       config['crop_x'],
                                       config['crop_y'],
                                       config['crop_z'],
                                       downsample=True)

        # integrate the local point cloud
        local_cloud.transform(gt_poses[idx])
        cloud_map += local_cloud

        if idx > 0:
            # if the car stops, we don't count the frame
            relative_pose = np.linalg.inv(gt_poses[idx - 1]).dot(gt_poses[idx])
            traj_dist = np.linalg.norm(relative_pose[:3, 3])
            if traj_dist > 0.2:
                count += 1

            # build a local mesh map
            if count % local_map_size == 0:
                # segment the ground
                ground, rest = pcd_ground_seg_open3d(cloud_map, config)

                # build the local poisson mesh
                mesh = run_poisson(ground + rest,
                                   depth=config['depth'],
                                   min_density=config['min_density'])

                # simply the ground to save space
                mesh = mesh_simplify(mesh, config)
                mesh.compute_vertex_normals()
                mesh.compute_triangle_normals()

                # integrate the local mesh into global mesh
                global_mesh += mesh

                # re-init cloud map
                cloud_map = o3d.geometry.PointCloud()

    # save the mesh map
    print("Saving mesh to " + mesh_file)
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
    o3d.io.write_triangle_mesh(mesh_file, global_mesh)

    # visualize the mesh map
    if config['visualize']:
        o3d.visualization.draw_geometries([global_mesh])
Ejemplo n.º 4
0
    parser.add_argument("img_path",
                        help="Path to image (.jpg, .png)",
                        type=str)
    args = parser.parse_args()

    if args.t is None:
        exit("Transformation was not given")

    if not exists(args.img_path):
        exit(args.img_path + " could not be found")

    yaw, pitch, roll, tx, ty, tz = [None] * 6
    calib = None

    if isfile(args.t) and basename(args.t) == "calib.txt":
        calib = load_calib(args.t)
        # calib.P_rect_20
        # 3x4 project 3D points in the camera coordinate frame to 2D pixel coordinates
        # calib.T_cam2_velo, velodyne to rectified camera coordinate transforms
    elif len(args.t.split(',')) == 6:
        yaw, pitch, roll, tx, ty, tz = [
            float(item) for item in args.t.split(',')
        ]
    else:
        exit(
            "There need to be 6 comma-delimited values in transformation or path to the calib.txt file"
        )

    # Load image
    img = imread(args.img_path)