def get_lidar(self,
                  idx: int,
                  log_id: Optional[str] = None,
                  load: bool = True) -> Union[str, np.ndarray]:
        """Get lidar corresponding to frame index idx (in lidar frame).

        Args:
            idx: Lidar frame index
            log_id: ID of log to search (default: current log)
            load: whether to load up the data, will return path to the lidar file if set to false

        Returns:
            Either path to lidar at a specific index, or point cloud data if load is set to True
        """
        assert self.lidar_timestamp_list is not None
        assert self._lidar_timestamp_list is not None
        assert self.lidar_list is not None
        assert self._lidar_list is not None

        if log_id is None:
            log_id = self.current_log

        assert idx < len(self._lidar_timestamp_list[log_id])

        if load:
            return load_ply(self._lidar_list[log_id][idx])
        return self._lidar_list[log_id][idx]
def test_get_pc_inside_box():
    """
        test get_pc_inside_box(pc_raw,bbox) *bbox format is [p0,p1,p2,h]

                    - -------- -
                   /|         /|
                  - -------- - . h
                  | |        | |
                  . p2 --------
                  |/         |/
                 p0 -------- p1


        :test lidar data:
        x    y    z  intensity  laser_number
        0  0.0  0.0  5.0        4.0          31.0
        1  1.0  0.0  5.0        1.0          14.0
        2  2.0  0.0  5.0        0.0          16.0
        3  3.0  0.0  5.0       20.0          30.0
        4  4.0  0.0  5.0        3.0          29.0
        5  5.0  0.0  5.0        1.0          11.0
        6  6.0  0.0  5.0       31.0          13.0
        7  7.0  0.0  5.0        2.0          28.0
        8  8.0  0.0  5.0        5.0          27.0
        9  9.0  0.0  5.0        6.0          10.0
        """
    bbox = np.array([np.array([[0], [0], [0]]), np.array([[2], [0], [0]]), np.array([[0], [5], [0]]), np.array(10)])

    pc = ply_loader.load_ply(TEST_DATA_LOC / "1/lidar/PC_0.ply")

    pc_inside = eval_utils.get_pc_inside_bbox(pc, bbox)

    assert len(pc_inside) == 3
Example #3
0
def visualize_ground_lidar_pts(log_id: str, dataset_dir: str,
                               experiment_prefix: str):
    """Process a log by drawing the LiDAR returns that are classified as belonging
    to the ground surface in a red to green colormap in the image.

    Args:
        log_id: The ID of a log
        dataset_dir: Where the dataset is stored
        experiment_prefix: Output prefix
    """
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    city_info_fpath = f"{dataset_dir}/{log_id}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    avm = ArgoverseMap()

    ply_fpaths = sorted(glob.glob(f"{dataset_dir}/{log_id}/lidar/PC_*.ply"))

    for i, ply_fpath in enumerate(ply_fpaths):
        if i % 500 == 0:
            print(f"\tOn file {i} of {log_id}")
        lidar_timestamp_ns = ply_fpath.split("/")[-1].split(".")[0].split(
            "_")[-1]

        pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{lidar_timestamp_ns}.json"
        if not Path(pose_fpath).exists():
            continue

        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)

        lidar_pts = load_ply(ply_fpath)

        lidar_timestamp_ns = int(lidar_timestamp_ns)
        draw_ground_pts_in_image(
            sdb,
            lidar_pts,
            city_to_egovehicle_se3,
            avm,
            log_id,
            lidar_timestamp_ns,
            city_name,
            dataset_dir,
            experiment_prefix,
        )

    for camera_name in CAMERA_LIST:
        if "stereo" in camera_name:
            fps = 5
        else:
            fps = 10
        cmd = f"ffmpeg -r {fps} -f image2 -i '{experiment_prefix}_ground_viz/{log_id}/{camera_name}/%*.jpg' {experiment_prefix}_ground_viz/{experiment_prefix}_{log_id}_{camera_name}_{fps}fps.mp4"

        print(cmd)
        run_command(cmd)
def load_all_clouds(dataset=DATASET_DIR, log_id=LOG_ID):
    cloud_dict = {}
    file_path = dataset + "/" + log_id + "/lidar"
    for file in os.listdir(dataset + "/" + log_id + "/lidar"):
        point_cloud = ply_loader.load_ply(file_path + "/" + file)
        timestamp = int(file[3:-4])
        cloud_dict[timestamp] = point_cloud
    return cloud_dict
def test_dump_point_cloud():
    points = np.array([[3, 4, 5], [2, 4, 1], [1, 5, 2], [5, 2, 1]])
    test_dir = "test_dir"
    timestamp = 0
    log_id = 123
    dump_point_cloud(points, timestamp, log_id, test_dir)
    file_name = "test_dir/123/lidar/PC_0.ply"
    ret_pts = load_ply(file_name)
    assert np.array_equal(points, ret_pts)
def test_leave_only_roi_region():
    """
        test leave_only_roi_region function
        (lidar_pts,egovehicle_to_city_se3,ground_removal_method, city_name='MIA')
        """
    pc = ply_loader.load_ply(TEST_DATA_LOC / "1/lidar/PC_0.ply")
    pose_data = read_json_file(TEST_DATA_LOC / "1/poses/city_SE3_egovehicle_0.json")
    rotation = np.array(pose_data["rotation"])
    translation = np.array(pose_data["translation"])
    ego_R = quat2rotmat(rotation)
    ego_t = translation
    egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
    pts = eval_utils.leave_only_roi_region(pc, egovehicle_to_city_se3, ground_removal_method="map")
Example #7
0
def test_load_ply_by_attrib_real_sweep():
    """ Load an actual LiDAR sweep, with valid attribute specification """
    ply_fpath = os.path.join(
        _TEST_DIR, "test_data",
        "d60558d2-d1aa-34ee-a902-e061e346e02a__PC_315971347819783000.ply")
    pc = load_ply_by_attrib(ply_fpath, attrib_spec="xyzil")
    assert pc.shape == (91083, 5)
    # intensities should be bounded between [0,255]
    assert np.all(np.logical_and(pc[:, 3] >= 0, pc[:, 3] <= 255))

    # laser numbers should be bounded between [0,31]
    assert np.all(np.logical_and(pc[:, 4] >= 0, pc[:, 4] <= 31))

    # make sure it matches the "xyz"-only API
    assert np.allclose(load_ply_by_attrib(ply_fpath, attrib_spec="xyz"),
                       load_ply(ply_fpath))
Example #8
0
def test_load_ply() -> None:
    ply_fpath = _TEST_DIR / "test_data/tracking/1/lidar/PC_0.ply"
    pc = load_ply(ply_fpath)
    pc_gt = np.array([
        [0.0, 0.0, 5.0],
        [1.0, 0.0, 5.0],
        [2.0, 0.0, 5.0],
        [3.0, 0.0, 5.0],
        [4.0, 0.0, 5.0],
        [5.0, 0.0, 5.0],
        [6.0, 0.0, 5.0],
        [7.0, 0.0, 5.0],
        [8.0, 0.0, 5.0],
        [9.0, 0.0, 5.0],
    ])

    assert (pc == pc_gt).all()
Example #9
0
def project_and_save(lidar_filepath, output_base_path, calib_data, cameras, db,
                     acc_sweeps, ip_basic):

    log = lidar_filepath.parents[1].stem
    lidar_timestamp = lidar_filepath.stem[3:]

    neighbouring_timestamps = get_neighbouring_lidar_timestamps(
        db, log, lidar_timestamp, acc_sweeps)

    pts = load_ply(
        lidar_filepath)  # point cloud, numpy array Nx3 -> N 3D coords

    points_h = point_cloud_to_homogeneous(pts).T

    uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
        points_h,  # these are recorded at lidar_time
        copy.deepcopy(calib),
        camera_name,
        int(cam_timestamp),
        int(lidar_timestamp),
        str(split_dir),
        log,
    )

    for camera_name in cameras:
        uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
            points_h, calib_data, camera_name)
        uv = uv[valid_pts_bool].astype(
            np.int32)  # Nx2 projected coords in pixels
        uv_cam = uv_cam.T[valid_pts_bool]  # Nx3 projected coords in meters

        img_width, img_height = get_image_dims_for_camera(camera_name)
        img = np.zeros((img_height, img_width))
        img[uv[:, 1],
            uv[:, 0]] = uv_cam[:, 2]  # image of projected lidar measurements

        lidar_filename = lidar_filepath.stem
        output_dir = output_base_path / camera_name
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / (lidar_filename + ".npz")
        savez_compressed(str(output_path), img)
    return None
Example #10
0
def project_and_save(lidar_filepath, output_base_path, calib_data, cameras):

    pts = load_ply(lidar_filepath)  # point cloud, numpy array Nx3 -> N 3D coords

    points_h = point_cloud_to_homogeneous(pts).T

    for camera_name in cameras:
        uv, uv_cam, valid_pts_bool = project_lidar_to_img(points_h, calib_data, camera_name)
        uv = uv[valid_pts_bool].astype(np.int32)  # Nx2 projected coords in pixels
        uv_cam = uv_cam.T[valid_pts_bool]  # Nx3 projected coords in meters

        img_width, img_height = get_image_dims_for_camera(camera_name)
        img = np.zeros((img_height, img_width))
        img[uv[:, 1], uv[:, 0]] = uv_cam[:, 2]  # image of projected lidar measurements

        lidar_filename = lidar_filepath.stem
        output_dir = output_base_path / camera_name
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / (lidar_filename + ".npz")
        savez_compressed(str(output_path), img)
    return None
Example #11
0
    def plot_log_one_at_a_time(self,
                               log_id="",
                               idx=-1,
                               save_video=True,
                               city=""):
        """
        Playback a log in the static context of a map.
        In the far left frame, we show the car moving in the map. In the middle frame, we show
        the car's LiDAR returns (in the map frame). In the far right frame, we show the front camera's
        RGB image.
        """
        avm = ArgoverseMap()
        for city_name, trajs in self.per_city_traj_dict.items():
            if city != "":
                if city != city_name:
                    continue
            if city_name not in ["PIT", "MIA"]:
                logger.info("Unknown city")
                continue

            log_ids = []
            logger.info(f"{city_name} has {len(trajs)} tracks")

            if log_id == "":
                # first iterate over the instance axis
                for traj_idx, (traj, log_id) in enumerate(trajs):
                    log_ids += [log_id]
            else:
                log_ids = [log_id]

            # eliminate the duplicates
            for log_id in set(log_ids):
                logger.info(f"Log: {log_id}")

                ply_fpaths = sorted(
                    glob.glob(f"{self.dataset_dir}/{log_id}/lidar/PC_*.ply"))

                # then iterate over the time axis
                for i, ply_fpath in enumerate(ply_fpaths):
                    if idx != -1:
                        if i != idx:
                            continue
                    if i % 500 == 0:
                        logger.info(f"\tOn file {i} of {log_id}")
                    lidar_timestamp = ply_fpath.split("/")[-1].split(
                        ".")[0].split("_")[-1]
                    lidar_timestamp = int(lidar_timestamp)
                    print("Lidar timestamp = ",
                          lidar_timestamp)  # added by Yike
                    print("Log Egopose Dict = ", self.log_egopose_dict)

                    if lidar_timestamp not in self.log_egopose_dict[log_id]:
                        all_available_timestamps = sorted(
                            self.log_egopose_dict[log_id].keys())
                        diff = (all_available_timestamps[0] -
                                lidar_timestamp) / 1e9
                        logger.info(
                            f"{diff:.2f} sec before first labeled sweep")
                        continue

                    logger.info(f"\tt={lidar_timestamp}")
                    if self.plot_lidar_bev:
                        fig = plt.figure(figsize=(15, 15))
                        plt.title(
                            f"Log {log_id} @t={lidar_timestamp} in {city_name}"
                        )
                        plt.axis("off")
                        # ax_map = fig.add_subplot(131)
                        ax_3d = fig.add_subplot(111)
                        # ax_rgb = fig.add_subplot(133)
                        plt.ion()  # added by Yike

                    # need the ego-track here
                    pose_city_to_ego = self.log_egopose_dict[log_id][
                        lidar_timestamp]
                    xcenter = pose_city_to_ego["translation"][0]
                    ycenter = pose_city_to_ego["translation"][1]
                    ego_center_xyz = np.array(pose_city_to_ego["translation"])

                    city_to_egovehicle_se3 = SE3(
                        rotation=pose_city_to_ego["rotation"],
                        translation=ego_center_xyz)

                    if self.plot_lidar_bev:
                        xmin = xcenter - 80  # 150
                        xmax = xcenter + 80  # 150
                        ymin = ycenter - 80  # 150
                        ymax = ycenter + 80  # 150
                        # ax_map.scatter(xcenter, ycenter, 200, color="g", marker=".", zorder=2)
                        # ax_map.set_xlim([xmin, xmax])
                        # ax_map.set_ylim([ymin, ymax])
                        local_lane_polygons = avm.find_local_lane_polygons(
                            [xmin, xmax, ymin, ymax], city_name)
                        local_das = avm.find_local_driveable_areas(
                            [xmin, xmax, ymin, ymax], city_name)

                    lidar_pts = load_ply(ply_fpath)
                    if self.plot_lidar_in_img:
                        draw_ground_pts_in_image(
                            self.sdb,
                            copy.deepcopy(lidar_pts),
                            city_to_egovehicle_se3,
                            avm,
                            log_id,
                            lidar_timestamp,
                            city_name,
                            self.dataset_dir,
                            self.experiment_prefix,
                            plot_ground=True,
                        )

                    if self.plot_lidar_bev:
                        driveable_area_pts = copy.deepcopy(lidar_pts)
                        driveable_area_pts = city_to_egovehicle_se3.transform_point_cloud(
                            driveable_area_pts)  # put into city coords
                        driveable_area_pts = avm.remove_non_driveable_area_points(
                            driveable_area_pts, city_name)
                        driveable_area_pts = avm.remove_ground_surface(
                            driveable_area_pts, city_name)
                        driveable_area_pts = city_to_egovehicle_se3.inverse_transform_point_cloud(
                            driveable_area_pts
                        )  # put back into ego-vehicle coords
                        self.render_bev_labels_mpl(
                            city_name,
                            ax_3d,
                            "ego_axis",
                            lidar_pts,
                            copy.deepcopy(local_lane_polygons),
                            copy.deepcopy(local_das),
                            log_id,
                            lidar_timestamp,
                            city_to_egovehicle_se3,
                            avm,
                        )

                        fig.tight_layout()
                        if not Path(
                                f"{self.experiment_prefix}_per_log_viz/{log_id}"
                        ).exists():
                            os.makedirs(
                                f"{self.experiment_prefix}_per_log_viz/{log_id}"
                            )

                        plt.savefig(
                            f"{self.experiment_prefix}_per_log_viz/{log_id}/{city_name}_{log_id}_{lidar_timestamp}.png",
                            dpi=400,
                        )
                    # plt.show()
                    # plt.close("all")

                # after all frames are processed, write video with saved images
                if save_video:
                    if self.plot_lidar_bev:
                        fps = 10
                        img_wildcard = f"{self.experiment_prefix}_per_log_viz/{log_id}/{city_name}_{log_id}_%*.png"
                        output_fpath = f"{self.experiment_prefix}_per_log_viz/{log_id}_lidar_roi_nonground.mp4"
                        write_nonsequential_idx_video(img_wildcard,
                                                      output_fpath, fps)

                    if self.plot_lidar_in_img:
                        for camera_name in RING_CAMERA_LIST + STEREO_CAMERA_LIST:
                            image_prefix = (
                                f"{self.experiment_prefix}_per_log_viz/{log_id}/{camera_name}/{camera_name}_%d.jpg"
                            )
                            output_prefix = f"{self.experiment_prefix}_per_log_viz/{log_id}_{camera_name}"
                            write_video(image_prefix, output_prefix)
def format_lidar_data(src, dst):
    x = load_ply(src)
    # set the reflectance to 1.0 for every point
    x = np.concatenate([x, np.ones((x.shape[0], 1), dtype=np.float32)], axis=1)
    x = x.reshape(-1)
    x.tofile(dst)
Example #13
0
def eval_tracks(
    path_tracker_output: str,
    path_dataset: _PathLike,
    d_min: float,
    d_max: float,
    out_file: TextIO,
    centroid_method: str,
) -> None:
    """Evaluate tracking output.

    Args:
        path_tracker_output: path to tracker output
        path_dataset: path to dataset
        d_min: minimum distance range
        d_max: maximum distance range
        out_file: output file object
        centroid_method: method for ground truth centroid estimation
    """
    acc = mm.MOTAccumulator(auto_id=True)

    path_track_data = sorted(glob.glob(os.fspath(path_tracker_output) + "/*"))

    log_id = pathlib.Path(path_dataset).name
    logger.info("log_id = %s", log_id)

    city_info_fpath = f"{path_dataset}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    logger.info("city name = %s", city_name)

    ID_gt_all: List[str] = []

    for ind_frame in range(len(path_track_data)):
        if ind_frame % 50 == 0:
            logger.info("%d/%d" % (ind_frame, len(path_track_data)))

        timestamp_lidar = int(path_track_data[ind_frame].split("/")[-1].split("_")[-1].split(".")[0])
        path_gt = os.path.join(
            path_dataset, "per_sweep_annotations_amodal", f"tracked_object_labels_{timestamp_lidar}.json"
        )

        if not os.path.exists(path_gt):
            logger.warning("Missing ", path_gt)
            continue

        gt_data = read_json_file(path_gt)

        pose_data = read_json_file(f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json")
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        ego_R = quat2rotmat(rotation)
        ego_t = translation
        egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)

        pc_raw0 = load_ply(os.path.join(path_dataset, f"lidar/PC_{timestamp_lidar}.ply"))
        pc_raw_roi = leave_only_roi_region(
            pc_raw0, egovehicle_to_city_se3, ground_removal_method="no", city_name=city_name
        )

        gt: Dict[str, Dict[str, Any]] = {}
        id_gts = []
        for i in range(len(gt_data)):

            if gt_data[i]["label_class"] != "VEHICLE":
                continue

            bbox, orientation = label_to_bbox(gt_data[i])
            pc_segment = get_pc_inside_bbox(pc_raw_roi, bbox)

            center = np.array([gt_data[i]["center"]["x"], gt_data[i]["center"]["y"], gt_data[i]["center"]["z"]])
            if (
                len(pc_segment) >= min_point_num
                and bbox[3] > 0
                and in_distance_range_pose(np.zeros(3), center, d_min, d_max)
            ):
                track_label_uuid = gt_data[i]["track_label_uuid"]
                gt[track_label_uuid] = {}
                if centroid_method == "average":
                    gt[track_label_uuid]["centroid"] = pc_segment.sum(axis=0) / len(pc_segment)
                elif centroid_method == "label_center":
                    gt[track_label_uuid]["centroid"] = center

                else:
                    logger.warning("Not implemented")

                gt[track_label_uuid]["bbox"] = bbox
                gt[track_label_uuid]["orientation"] = orientation

                if track_label_uuid not in ID_gt_all:
                    ID_gt_all.append(track_label_uuid)

                id_gts.append(track_label_uuid)

        tracks: Dict[str, Dict[str, Any]] = {}
        id_tracks: List[str] = []

        track_data = read_json_file(path_track_data[ind_frame])

        for track in track_data:
            key = track["track_label_uuid"]

            if track["label_class"] != "VEHICLE" or track["height"] == 0:
                continue

            center = np.array([track["center"]["x"], track["center"]["y"], track["center"]["z"]])

            if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
                tracks[key] = {}
                tracks[key]["centroid"] = center

                id_tracks.append(key)

        dists: List[List[float]] = []
        for gt_key, gt_value in gt.items():
            gt_track_data: List[float] = []
            dists.append(gt_track_data)
            for track_key, track_value in tracks.items():
                gt_track_data.append(get_distance(gt_value, track_value, "centroid"))

        acc.update(id_gts, id_tracks, dists)

    mh = mm.metrics.create()
    summary = mh.compute(
        acc,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    logger.info("summary = %s", summary)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_flag = summary["num_fragmentations"][0]

    out_string = (
        f"{fn} {num_frames} {mota:.2f} {motp:.2f} {idf1:.2f} {most_track:.2f} "
        f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
    )
    out_file.write(out_string)
Example #14
0
def dump_clipped_3d_cuboids_to_images(
    log_ids: Sequence[str],
    max_num_images_to_render: int,
    data_dir: str,
    experiment_prefix: str,
    motion_compensate: bool = True,
) -> List[str]:
    """
    We bring the 3D points into each camera coordinate system, and do the clipping there in 3D.

    Args:
        log_ids: A list of log IDs
        max_num_images_to_render: maximum numbers of images to render.
        data_dir: path to dataset with the latest data
        experiment_prefix: Output directory
        motion_compensate: Whether to motion compensate when projecting

    Returns:
        saved_img_fpaths
    """
    saved_img_fpaths = []
    dl = SimpleArgoverseTrackingDataLoader(data_dir=data_dir,
                                           labels_dir=data_dir)
    avm = ArgoverseMap()

    for log_id in log_ids:
        save_dir = f"{experiment_prefix}_{log_id}"
        if not Path(save_dir).exists():
            os.makedirs(save_dir)

        city_name = dl.get_city_name(log_id)
        log_calib_data = dl.get_log_calibration_data(log_id)

        flag_done = False
        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            cam_im_fpaths = dl.get_ordered_log_cam_fpaths(log_id, camera_name)
            for i, im_fpath in enumerate(cam_im_fpaths):
                if i % 50 == 0:
                    logging.info("\tOn file %s of camera %s of %s", i,
                                 camera_name, log_id)

                cam_timestamp = Path(im_fpath).stem.split("_")[-1]
                cam_timestamp = int(cam_timestamp)

                # load PLY file path, e.g. 'PC_315978406032859416.ply'
                ply_fpath = dl.get_closest_lidar_fpath(log_id, cam_timestamp)
                if ply_fpath is None:
                    continue
                lidar_pts = load_ply(ply_fpath)
                save_img_fpath = f"{save_dir}/{camera_name}_{cam_timestamp}.jpg"
                if Path(save_img_fpath).exists():
                    saved_img_fpaths += [save_img_fpath]
                    if max_num_images_to_render != -1 and len(
                            saved_img_fpaths) > max_num_images_to_render:
                        flag_done = True
                        break
                    continue

                city_to_egovehicle_se3 = dl.get_city_to_egovehicle_se3(
                    log_id, cam_timestamp)
                if city_to_egovehicle_se3 is None:
                    continue

                lidar_timestamp = Path(ply_fpath).stem.split("_")[-1]
                lidar_timestamp = int(lidar_timestamp)
                labels = dl.get_labels_at_lidar_timestamp(
                    log_id, lidar_timestamp)
                if labels is None:
                    logging.info("\tLabels missing at t=%s", lidar_timestamp)
                    continue

                # Swap channel order as OpenCV expects it -- BGR not RGB
                # must make a copy to make memory contiguous
                img = imageio.imread(im_fpath)[:, :, ::-1].copy()
                camera_config = get_calibration_config(log_calib_data,
                                                       camera_name)
                planes = generate_frustum_planes(
                    camera_config.intrinsic.copy(), camera_name)
                img = plot_lane_centerlines_in_img(
                    lidar_pts,
                    city_to_egovehicle_se3,
                    img,
                    city_name,
                    avm,
                    camera_config,
                    planes,
                )

                for label_idx, label in enumerate(labels):
                    obj_rec = json_label_dict_to_obj_record(label)
                    if obj_rec.occlusion == 100:
                        continue

                    cuboid_vertices = obj_rec.as_3d_bbox()
                    points_h = point_cloud_to_homogeneous(cuboid_vertices).T
                    if motion_compensate:
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            K,
                        ) = project_lidar_to_img_motion_compensated(
                            points_h,  # these are recorded at lidar_time
                            copy.deepcopy(log_calib_data),
                            camera_name,
                            cam_timestamp,
                            lidar_timestamp,
                            data_dir,
                            log_id,
                            return_K=True,
                        )
                    else:
                        # project_lidar_to_img
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            camera_config,
                        ) = project_lidar_to_undistorted_img(
                            points_h, copy.deepcopy(log_calib_data),
                            camera_name)
                    if valid_pts_bool.sum() == 0:
                        continue

                    img = obj_rec.render_clip_frustum_cv2(
                        img,
                        uv_cam.T[:, :3],
                        planes.copy(),
                        copy.deepcopy(camera_config),
                    )

                cv2.imwrite(save_img_fpath, img)
                saved_img_fpaths += [save_img_fpath]
                if max_num_images_to_render != -1 and len(
                        saved_img_fpaths) > max_num_images_to_render:
                    flag_done = True
                    break
            if flag_done:
                break
        category_subdir = "amodal_labels"

        if not Path(f"{experiment_prefix}_{category_subdir}").exists():
            os.makedirs(f"{experiment_prefix}_{category_subdir}")

        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            # Write the cuboid video -- could also write w/ fps=20,30,40
            if "stereo" in camera_name:
                fps = 5
            else:
                fps = 30
            img_wildcard = f"{save_dir}/{camera_name}_%*.jpg"
            output_fpath = f"{experiment_prefix}_{category_subdir}/{log_id}_{camera_name}_{fps}fps.mp4"
            write_nonsequential_idx_video(img_wildcard, output_fpath, fps)

    return saved_img_fpaths
Example #15
0
                            only_lidar_time.append(lidar_timestamp_list[j])

            # sort only corresponding lidar files (absolute file location)
            only_lidar = match_and_return(overlap, lidar_list)

            # sort only corresponding stereo left files (absolute file location)
            only_left = match_and_return(overlap, stereo_left_list['stereo_front_left'])
                        
            # sort only corresponding stereo right files (absolute file location)
            only_right = match_and_return(overlap, stereo_left_list['stereo_front_right'])
                        
            for idx in range(len(only_lidar)):
                lidar_timestamp = only_lidar_time[idx]
                print("index: ", idx, "current timestamp: ", lidar_timestamp)
                
                pc = load_ply(only_lidar[idx])
                
                uv = calibL.project_ego_to_image(pc)
                
                fov_inds = (uv[:, 0] < width - 1) & (uv[:, 0] >= 0) & \
                        (uv[:, 1] < height - 1)& (uv[:, 1] >= 0)
                fov_inds = fov_inds & (pc[:, 0] > 1) # filters out points that are behind camera
                
                valid_uv = uv[fov_inds, :]  
                valid_uv = np.round(valid_uv).astype(int)
                
                valid_pc = pc[fov_inds, :]        
                valid_uvd = calibL.project_ego_to_cam(valid_pc)
                
                depth_map = np.zeros((height, width)) - 1
                
Example #16
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Visualize Lidar')
    parser.add_argument(
        '--root_dir',
        type=str,
        default='/Users/mengsli/Downloads/DLRepo/argoverse-tracking/')
    parser.add_argument('--sub_folder', type=str,
                        default='sample/')  #train1, train2 ... val, test
    parser.add_argument('--max_high', type=int, default=1)
    args = parser.parse_args()

    assert os.path.isdir(args.root_dir)
    sub_dir = args.root_dir + '/' + args.sub_folder

    argoverse_loader = ArgoverseTrackingLoader(sub_dir)
    for log_id in argoverse_loader.log_list:

        pseudo_lidar_dir = sub_dir + '/' + log_id + '/' + 'lidar/'  #true lidar for debugging
        #pseudo_lidar_dir = sub_dir + '/' + log_id + '/' + 'pred_lidar/'
        assert os.path.isdir(pseudo_lidar_dir)

        lidar_list = [
            x for x in os.listdir(pseudo_lidar_dir) if x[-3:] == 'ply'
        ]
        lidar_list = sorted(lidar_list)

        for fn in lidar_list:
            lidar = load_ply(pseudo_lidar_dir + '/' + fn)
            fig = draw_lidar(lidar)
            mayavi_wrapper.mlab.show()