Beispiel #1
0
def test_ffmpeg_nonseq_frame_vid_smokescreen():
    """
        """
    img_wildcard = "imgs_%*.jpg"
    output_fpath = "out.mp4"
    fps = 10
    write_nonsequential_idx_video(img_wildcard, output_fpath, fps)
Beispiel #2
0
    def plot_log_one_at_a_time(self,
                               log_id="",
                               idx=-1,
                               save_video=True,
                               city=""):
        """
        Playback a log in the static context of a map.
        In the far left frame, we show the car moving in the map. In the middle frame, we show
        the car's LiDAR returns (in the map frame). In the far right frame, we show the front camera's
        RGB image.
        """
        avm = ArgoverseMap()
        for city_name, trajs in self.per_city_traj_dict.items():
            if city != "":
                if city != city_name:
                    continue
            if city_name not in ["PIT", "MIA"]:
                logger.info("Unknown city")
                continue

            log_ids = []
            logger.info(f"{city_name} has {len(trajs)} tracks")

            if log_id == "":
                # first iterate over the instance axis
                for traj_idx, (traj, log_id) in enumerate(trajs):
                    log_ids += [log_id]
            else:
                log_ids = [log_id]

            # eliminate the duplicates
            for log_id in set(log_ids):
                logger.info(f"Log: {log_id}")

                ply_fpaths = sorted(
                    glob.glob(f"{self.dataset_dir}/{log_id}/lidar/PC_*.ply"))

                # then iterate over the time axis
                for i, ply_fpath in enumerate(ply_fpaths):
                    if idx != -1:
                        if i != idx:
                            continue
                    if i % 500 == 0:
                        logger.info(f"\tOn file {i} of {log_id}")
                    lidar_timestamp = ply_fpath.split("/")[-1].split(
                        ".")[0].split("_")[-1]
                    lidar_timestamp = int(lidar_timestamp)
                    print("Lidar timestamp = ",
                          lidar_timestamp)  # added by Yike
                    print("Log Egopose Dict = ", self.log_egopose_dict)

                    if lidar_timestamp not in self.log_egopose_dict[log_id]:
                        all_available_timestamps = sorted(
                            self.log_egopose_dict[log_id].keys())
                        diff = (all_available_timestamps[0] -
                                lidar_timestamp) / 1e9
                        logger.info(
                            f"{diff:.2f} sec before first labeled sweep")
                        continue

                    logger.info(f"\tt={lidar_timestamp}")
                    if self.plot_lidar_bev:
                        fig = plt.figure(figsize=(15, 15))
                        plt.title(
                            f"Log {log_id} @t={lidar_timestamp} in {city_name}"
                        )
                        plt.axis("off")
                        # ax_map = fig.add_subplot(131)
                        ax_3d = fig.add_subplot(111)
                        # ax_rgb = fig.add_subplot(133)
                        plt.ion()  # added by Yike

                    # need the ego-track here
                    pose_city_to_ego = self.log_egopose_dict[log_id][
                        lidar_timestamp]
                    xcenter = pose_city_to_ego["translation"][0]
                    ycenter = pose_city_to_ego["translation"][1]
                    ego_center_xyz = np.array(pose_city_to_ego["translation"])

                    city_to_egovehicle_se3 = SE3(
                        rotation=pose_city_to_ego["rotation"],
                        translation=ego_center_xyz)

                    if self.plot_lidar_bev:
                        xmin = xcenter - 80  # 150
                        xmax = xcenter + 80  # 150
                        ymin = ycenter - 80  # 150
                        ymax = ycenter + 80  # 150
                        # ax_map.scatter(xcenter, ycenter, 200, color="g", marker=".", zorder=2)
                        # ax_map.set_xlim([xmin, xmax])
                        # ax_map.set_ylim([ymin, ymax])
                        local_lane_polygons = avm.find_local_lane_polygons(
                            [xmin, xmax, ymin, ymax], city_name)
                        local_das = avm.find_local_driveable_areas(
                            [xmin, xmax, ymin, ymax], city_name)

                    lidar_pts = load_ply(ply_fpath)
                    if self.plot_lidar_in_img:
                        draw_ground_pts_in_image(
                            self.sdb,
                            copy.deepcopy(lidar_pts),
                            city_to_egovehicle_se3,
                            avm,
                            log_id,
                            lidar_timestamp,
                            city_name,
                            self.dataset_dir,
                            self.experiment_prefix,
                            plot_ground=True,
                        )

                    if self.plot_lidar_bev:
                        driveable_area_pts = copy.deepcopy(lidar_pts)
                        driveable_area_pts = city_to_egovehicle_se3.transform_point_cloud(
                            driveable_area_pts)  # put into city coords
                        driveable_area_pts = avm.remove_non_driveable_area_points(
                            driveable_area_pts, city_name)
                        driveable_area_pts = avm.remove_ground_surface(
                            driveable_area_pts, city_name)
                        driveable_area_pts = city_to_egovehicle_se3.inverse_transform_point_cloud(
                            driveable_area_pts
                        )  # put back into ego-vehicle coords
                        self.render_bev_labels_mpl(
                            city_name,
                            ax_3d,
                            "ego_axis",
                            lidar_pts,
                            copy.deepcopy(local_lane_polygons),
                            copy.deepcopy(local_das),
                            log_id,
                            lidar_timestamp,
                            city_to_egovehicle_se3,
                            avm,
                        )

                        fig.tight_layout()
                        if not Path(
                                f"{self.experiment_prefix}_per_log_viz/{log_id}"
                        ).exists():
                            os.makedirs(
                                f"{self.experiment_prefix}_per_log_viz/{log_id}"
                            )

                        plt.savefig(
                            f"{self.experiment_prefix}_per_log_viz/{log_id}/{city_name}_{log_id}_{lidar_timestamp}.png",
                            dpi=400,
                        )
                    # plt.show()
                    # plt.close("all")

                # after all frames are processed, write video with saved images
                if save_video:
                    if self.plot_lidar_bev:
                        fps = 10
                        img_wildcard = f"{self.experiment_prefix}_per_log_viz/{log_id}/{city_name}_{log_id}_%*.png"
                        output_fpath = f"{self.experiment_prefix}_per_log_viz/{log_id}_lidar_roi_nonground.mp4"
                        write_nonsequential_idx_video(img_wildcard,
                                                      output_fpath, fps)

                    if self.plot_lidar_in_img:
                        for camera_name in RING_CAMERA_LIST + STEREO_CAMERA_LIST:
                            image_prefix = (
                                f"{self.experiment_prefix}_per_log_viz/{log_id}/{camera_name}/{camera_name}_%d.jpg"
                            )
                            output_prefix = f"{self.experiment_prefix}_per_log_viz/{log_id}_{camera_name}"
                            write_video(image_prefix, output_prefix)
Beispiel #3
0
def test_ffmpeg_nonseq_frame_vid_smokescreen() -> None:
    """Test writing video with non-consecutive, nanosecond, integer timestamps in `ffmpeg`."""
    img_wildcard = "imgs_%*.jpg"
    output_fpath = "out.mp4"
    fps = 10
    write_nonsequential_idx_video(img_wildcard, output_fpath, fps)
Beispiel #4
0
def dump_clipped_3d_cuboids_to_images(
    log_ids: Sequence[str],
    max_num_images_to_render: int,
    data_dir: str,
    experiment_prefix: str,
    motion_compensate: bool = True,
) -> List[str]:
    """
    We bring the 3D points into each camera coordinate system, and do the clipping there in 3D.

    Args:
        log_ids: A list of log IDs
        max_num_images_to_render: maximum numbers of images to render.
        data_dir: path to dataset with the latest data
        experiment_prefix: Output directory
        motion_compensate: Whether to motion compensate when projecting

    Returns:
        saved_img_fpaths
    """
    saved_img_fpaths = []
    dl = SimpleArgoverseTrackingDataLoader(data_dir=data_dir,
                                           labels_dir=data_dir)
    avm = ArgoverseMap()

    for log_id in log_ids:
        save_dir = f"{experiment_prefix}_{log_id}"
        if not Path(save_dir).exists():
            os.makedirs(save_dir)

        city_name = dl.get_city_name(log_id)
        log_calib_data = dl.get_log_calibration_data(log_id)

        flag_done = False
        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            cam_im_fpaths = dl.get_ordered_log_cam_fpaths(log_id, camera_name)
            for i, im_fpath in enumerate(cam_im_fpaths):
                if i % 50 == 0:
                    logging.info("\tOn file %s of camera %s of %s", i,
                                 camera_name, log_id)

                cam_timestamp = Path(im_fpath).stem.split("_")[-1]
                cam_timestamp = int(cam_timestamp)

                # load PLY file path, e.g. 'PC_315978406032859416.ply'
                ply_fpath = dl.get_closest_lidar_fpath(log_id, cam_timestamp)
                if ply_fpath is None:
                    continue
                lidar_pts = load_ply(ply_fpath)
                save_img_fpath = f"{save_dir}/{camera_name}_{cam_timestamp}.jpg"
                if Path(save_img_fpath).exists():
                    saved_img_fpaths += [save_img_fpath]
                    if max_num_images_to_render != -1 and len(
                            saved_img_fpaths) > max_num_images_to_render:
                        flag_done = True
                        break
                    continue

                city_to_egovehicle_se3 = dl.get_city_to_egovehicle_se3(
                    log_id, cam_timestamp)
                if city_to_egovehicle_se3 is None:
                    continue

                lidar_timestamp = Path(ply_fpath).stem.split("_")[-1]
                lidar_timestamp = int(lidar_timestamp)
                labels = dl.get_labels_at_lidar_timestamp(
                    log_id, lidar_timestamp)
                if labels is None:
                    logging.info("\tLabels missing at t=%s", lidar_timestamp)
                    continue

                # Swap channel order as OpenCV expects it -- BGR not RGB
                # must make a copy to make memory contiguous
                img = imageio.imread(im_fpath)[:, :, ::-1].copy()
                camera_config = get_calibration_config(log_calib_data,
                                                       camera_name)
                planes = generate_frustum_planes(
                    camera_config.intrinsic.copy(), camera_name)
                img = plot_lane_centerlines_in_img(
                    lidar_pts,
                    city_to_egovehicle_se3,
                    img,
                    city_name,
                    avm,
                    camera_config,
                    planes,
                )

                for label_idx, label in enumerate(labels):
                    obj_rec = json_label_dict_to_obj_record(label)
                    if obj_rec.occlusion == 100:
                        continue

                    cuboid_vertices = obj_rec.as_3d_bbox()
                    points_h = point_cloud_to_homogeneous(cuboid_vertices).T
                    if motion_compensate:
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            K,
                        ) = project_lidar_to_img_motion_compensated(
                            points_h,  # these are recorded at lidar_time
                            copy.deepcopy(log_calib_data),
                            camera_name,
                            cam_timestamp,
                            lidar_timestamp,
                            data_dir,
                            log_id,
                            return_K=True,
                        )
                    else:
                        # project_lidar_to_img
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            camera_config,
                        ) = project_lidar_to_undistorted_img(
                            points_h, copy.deepcopy(log_calib_data),
                            camera_name)
                    if valid_pts_bool.sum() == 0:
                        continue

                    img = obj_rec.render_clip_frustum_cv2(
                        img,
                        uv_cam.T[:, :3],
                        planes.copy(),
                        copy.deepcopy(camera_config),
                    )

                cv2.imwrite(save_img_fpath, img)
                saved_img_fpaths += [save_img_fpath]
                if max_num_images_to_render != -1 and len(
                        saved_img_fpaths) > max_num_images_to_render:
                    flag_done = True
                    break
            if flag_done:
                break
        category_subdir = "amodal_labels"

        if not Path(f"{experiment_prefix}_{category_subdir}").exists():
            os.makedirs(f"{experiment_prefix}_{category_subdir}")

        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            # Write the cuboid video -- could also write w/ fps=20,30,40
            if "stereo" in camera_name:
                fps = 5
            else:
                fps = 30
            img_wildcard = f"{save_dir}/{camera_name}_%*.jpg"
            output_fpath = f"{experiment_prefix}_{category_subdir}/{log_id}_{camera_name}_{fps}fps.mp4"
            write_nonsequential_idx_video(img_wildcard, output_fpath, fps)

    return saved_img_fpaths