예제 #1
0
def get_split_cam_configs_and_sync_data(argoverse_tracking_root_dir, split_dir, camera_list, source_views_indexes):
    """
    Returns a dictionary giving the intrinsics/extrinsics/resolution of given cameras for each sequence.

    Parameters
    ----------
    split_dir: pathlib Path
        argoverse-tracking split root dir, e.g.: path_to_data/argoverse-tracking/train1

    camera_list: list
        list of camera for which to load the parameters. Must be in Argoverse's CAMERA_LIST

    Returns
    -------
    dict
        A dictionary where the key is the log string and the value a dict corresponding to the cameras' parameters
    """

    camera_configs = {}
    synchronized_data = []

    db = SynchronizationDB(str(split_dir))
    valid_logs = list(db.get_valid_logs())  # log_ids founds under split_dir

    split = split_dir.stem  # e.g., train1
    for log in valid_logs:
        camera_configs[log] = load_camera_config(str(split_dir / log / VEHICLE_CALIBRATION_INFO_FILENAME),
                                                      camera_list)
        synchronized_data += get_synchronized_data(argoverse_tracking_root_dir, db, split, log, camera_list,
                                                   source_views_indexes)

    return camera_configs, synchronized_data
 def __init__(self, data_dir: str, labels_dir: str) -> None:
     """
     Args:
         data_dir: str, representing path to raw Argoverse data
         labels_dir: strrepresenting path to Argoverse data labels
     """
     self.data_dir = data_dir
     self.labels_dir = labels_dir
     self.sdb = SynchronizationDB(data_dir)
 def __init__(self, data_dir: str, labels_dir: str) -> None:
     """
     Args:
         data_dir: str, representing path to raw Argoverse data
         labels_dir: str representing path to Argoverse data labels (e.g. labels or estimated detections/tracks)
     """
     self.data_dir = data_dir
     self.labels_dir = labels_dir
     self.sdb = SynchronizationDB(data_dir)
    def __init__(
        self, dataset_dir: str, experiment_prefix: str, use_existing_files: bool = True, log_id: str = None
    ) -> None:
        """We will cache the accumulated trajectories per city, per log, and per frame
        for the tracking benchmark.
            """
        self.plot_lane_tangent_arrows = True
        self.plot_lidar_bev = True
        self.plot_lidar_in_img = False
        self.experiment_prefix = experiment_prefix
        self.dataset_dir = dataset_dir
        self.labels_dir = dataset_dir
        self.sdb = SynchronizationDB(self.dataset_dir)

        if log_id is None:
            tmp_dir = tempfile.gettempdir()
            per_city_traj_dict_fpath = f"{tmp_dir}/per_city_traj_dict_{experiment_prefix}.pkl"
            log_egopose_dict_fpath = f"{tmp_dir}/log_egopose_dict_{experiment_prefix}.pkl"
            log_timestamp_dict_fpath = f"{tmp_dir}/log_timestamp_dict_{experiment_prefix}.pkl"
            if not use_existing_files:
                # write the accumulate data dictionaries to disk
                PerFrameLabelAccumulator(dataset_dir, dataset_dir, experiment_prefix)

            self.per_city_traj_dict = load_pkl_dictionary(per_city_traj_dict_fpath)
            self.log_egopose_dict = load_pkl_dictionary(log_egopose_dict_fpath)
            self.log_timestamp_dict = load_pkl_dictionary(log_timestamp_dict_fpath)
        else:
            pfa = PerFrameLabelAccumulator(dataset_dir, dataset_dir, experiment_prefix, save=False)
            pfa.accumulate_per_log_data(log_id=log_id)
            self.per_city_traj_dict = pfa.per_city_traj_dict
            self.log_egopose_dict = pfa.log_egopose_dict
            self.log_timestamp_dict = pfa.log_timestamp_dict
예제 #5
0
def visualize_ground_lidar_pts(log_id: str, dataset_dir: str,
                               experiment_prefix: str):
    """Process a log by drawing the LiDAR returns that are classified as belonging
    to the ground surface in a red to green colormap in the image.

    Args:
        log_id: The ID of a log
        dataset_dir: Where the dataset is stored
        experiment_prefix: Output prefix
    """
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    city_info_fpath = f"{dataset_dir}/{log_id}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    avm = ArgoverseMap()

    ply_fpaths = sorted(glob.glob(f"{dataset_dir}/{log_id}/lidar/PC_*.ply"))

    for i, ply_fpath in enumerate(ply_fpaths):
        if i % 500 == 0:
            print(f"\tOn file {i} of {log_id}")
        lidar_timestamp_ns = ply_fpath.split("/")[-1].split(".")[0].split(
            "_")[-1]

        pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{lidar_timestamp_ns}.json"
        if not Path(pose_fpath).exists():
            continue

        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)

        lidar_pts = load_ply(ply_fpath)

        lidar_timestamp_ns = int(lidar_timestamp_ns)
        draw_ground_pts_in_image(
            sdb,
            lidar_pts,
            city_to_egovehicle_se3,
            avm,
            log_id,
            lidar_timestamp_ns,
            city_name,
            dataset_dir,
            experiment_prefix,
        )

    for camera_name in CAMERA_LIST:
        if "stereo" in camera_name:
            fps = 5
        else:
            fps = 10
        cmd = f"ffmpeg -r {fps} -f image2 -i '{experiment_prefix}_ground_viz/{log_id}/{camera_name}/%*.jpg' {experiment_prefix}_ground_viz/{experiment_prefix}_{log_id}_{camera_name}_{fps}fps.mp4"

        print(cmd)
        run_command(cmd)
예제 #6
0
    def __init__(
        self,
        dataset_dir: str,
        labels_dir: str,
        experiment_prefix: str,
        bboxes_3d: bool = False,
        save: bool = True,
    ) -> None:
        """Initialize PerFrameLabelAccumulator object for use with tracking benchmark data.

        Args:
            dataset_dir (str): Dataset directory.
            labels_dir (str): Labels directory.
            experiment_prefix (str): Prefix for experimint to use.
            bboxes_3d (bool, optional): to use 3d bounding boxes (True) or 2d bounding boxes (False).
        """
        self.bboxes_3d = bboxes_3d

        self.dataset_dir = dataset_dir
        self.labels_dir = labels_dir
        tmp_dir = tempfile.gettempdir()
        per_city_traj_dict_fpath = f"{tmp_dir}/per_city_traj_dict_{experiment_prefix}.pkl"
        log_egopose_dict_fpath = f"{tmp_dir}/log_egopose_dict_{experiment_prefix}.pkl"
        log_timestamp_dict_fpath = f"{tmp_dir}/log_timestamp_dict_{experiment_prefix}.pkl"

        # coordinate system is the map world frame

        self.per_city_traj_dict: Dict[str, List[Tuple[np.ndarray, str]]] = {
            "MIA": [],
            "PIT": [],
        }  # all the trajectories for these 2 cities
        self.log_egopose_dict: Dict[str, Dict[int, Dict[str, np.ndarray]]] = {}
        self.log_timestamp_dict: Dict[str, Dict[int, List[FrameRecord]]] = {}
        self.sdb = SynchronizationDB(self.dataset_dir)

        if save:
            self.accumulate_per_log_data()
            save_pkl_dictionary(per_city_traj_dict_fpath,
                                self.per_city_traj_dict)
            save_pkl_dictionary(log_egopose_dict_fpath, self.log_egopose_dict)
            save_pkl_dictionary(log_timestamp_dict_fpath,
                                self.log_timestamp_dict)
    def __init__(self, root_dir: str) -> None:
        # initialize class member
        self.CAMERA_LIST = CAMERA_LIST
        self._log_list: Optional[List[str]] = [None]
        self._image_list: Optional[Dict[str, Dict[str, List[str]]]] = None
        self._image_list_sync: Optional[Dict[str,
                                             Dict[str,
                                                  List[np.ndarray]]]] = None
        self._lidar_list: Optional[Dict[str, List[str]]] = None
        self._image_timestamp_list: Optional[Dict[str, Dict[str,
                                                            List[int]]]] = None
        self._timestamp_image_dict: Optional[Dict[str, Dict[str,
                                                            Dict[int,
                                                                 str]]]] = None
        self._image_timestamp_list_sync: Optional[Dict[str,
                                                       Dict[str,
                                                            List[int]]]] = None
        self._lidar_timestamp_list: Optional[Dict[str, List[int]]] = None
        self._timestamp_lidar_dict: Optional[Dict[str, Dict[int, str]]] = None
        self._label_list: Optional[Dict[str, List[str]]] = None
        self._calib: Optional[Dict[str, Dict[
            str,
            Calibration]]] = None  # { log_name: { camera_name: Calibration } }
        self._city_name = None
        self.counter: int = 0

        self.image_count: int = 0
        self.lidar_count: int = 0

        self.root_dir: str = root_dir

        self.current_log = self.log_list[self.counter]

        assert self.image_list is not None
        assert self.lidar_list is not None
        assert self.label_list is not None

        # load calibration file
        self.calib_filename: str = os.path.join(
            self.root_dir, self.current_log, "vehicle_calibration_info.json")

        # lidar @10hz, ring camera @30hz, stereo camera @5hz
        self.num_lidar_frame: int = len(self.lidar_timestamp_list)
        self.num_ring_camera_frame: int = len(
            self.image_timestamp_list[RING_CAMERA_LIST[0]])
        self.num_stereo_camera_frame: int = len(
            self.image_timestamp_list[STEREO_CAMERA_LIST[0]])

        self.sync: SynchronizationDB = SynchronizationDB(root_dir)

        assert self.image_list_sync is not None
        assert self.calib is not None
예제 #8
0
    def __init__(self, dataset_dir: str, experiment_prefix: str,
                 tf_record_prefix: str) -> None:
        """We will cache the accumulated trajectories per city, per log, and per frame
        for the tracking benchmark.
            """
        self.experiment_prefix = experiment_prefix
        self.tf_record_prefix = tf_record_prefix
        self.dataset_dir = dataset_dir
        self.labels_dir = dataset_dir
        self.sdb = SynchronizationDB(self.dataset_dir)

        self.per_city_traj_dict = None
        self.log_egopose_dict = None
        self.log_timestamp_dict = None
        self.timestamps = None
예제 #9
0
def main(argo_tracking_root_dir, output_dir, cameras, acc_sweeps, ip_basic):
    print('Preprocessing data....')
    print("INPUT DIR: ", argo_tracking_root_dir)
    print("OUTPUT DIR: ", output_dir)

    if cameras is None:
        cameras = CAMERA_LIST
    else:
        cameras = validate_camera_option(cameras)

    print(cameras)

    argo_tracking_root_dir = Path(argo_tracking_root_dir).expanduser()
    output_dir = Path(output_dir).expanduser()

    split_namedirs = ["train1", "train2", "train3", "train4", "test", "val"]
    split_dirs = [
        argo_tracking_root_dir / split_namedir
        for split_namedir in split_namedirs
    ]

    for split_dir in split_dirs:

        db = SynchronizationDB(str(split_dir))

        log_dirs = sorted(list(split_dir.iterdir()))
        for log_dir in log_dirs:
            lidar_dir = log_dir / "lidar"
            lidar_filepaths = [f for f in sorted(list(lidar_dir.iterdir()))]
            total = len(lidar_filepaths)

            calib_filepath = str(log_dir / "vehicle_calibration_info.json")
            output_base_path = output_dir / split_dir.stem / log_dir.stem

            with open(calib_filepath, "r") as f:
                calib_data = json.load(f)
            args = (output_base_path, calib_data, cameras, db, acc_sweeps,
                    ip_basic)

            with tqdm(lidar_filepaths,
                      desc=f"{split_dir.stem} | log {log_dir.stem} ",
                      total=total) as progress:
                for lidar_filepath in progress:
                    project_and_save(lidar_filepath, *args)

    print('Preprocessing of LiDAR data Finished.')
예제 #10
0
def generate_vehicle_bev(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    camera = argoverse_loader.CAMERA_LIST[7]
    calib = argoverse_data.get_calibration(camera)
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] == \
                'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    egovehicle_SE3_cam = SE3(rotation=quat2rotmat(rotation),
                             translation=translation)

    if not os.path.exists(os.path.join(output_dir, log_id, "car_bev_gt")):
        os.makedirs(os.path.join(output_dir, log_id, "car_bev_gt"))

    lidar_dir = os.path.join(dataset_dir, log_id, "lidar")
    ply_list = os.listdir(lidar_dir)

    pfa = PerFrameLabelAccumulator(dataset_dir,
                                   dataset_dir,
                                   "argoverse_bev_viz",
                                   save=False,
                                   bboxes_3d=True)
    pfa.accumulate_per_log_data(log_id)
    log_timestamp_dict = pfa.log_timestamp_dict
    for i, ply_name in enumerate(ply_list):
        lidar_timestamp = ply_name.split('.')[0].split('_')[1]
        lidar_timestamp = int(lidar_timestamp)

        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, "stereo_front_left", str(log_id))
        image_path = os.path.join(
            output_dir, str(log_id), "car_bev_gt",
            "stereo_front_left_" + str(cam_timestamp) + ".jpg")
        objects = log_timestamp_dict[log_id][lidar_timestamp]
        top_view = np.zeros((256, 256))

        all_occluded = True
        for frame_rec in objects:
            if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                all_occluded = False

        if not all_occluded:
            for i, frame_rec in enumerate(objects):
                bbox_ego_frame = frame_rec.bbox_ego_frame
                uv = calib.project_ego_to_image(bbox_ego_frame).T
                idx_ = np.all(
                    np.logical_and(
                        np.logical_and(
                            np.logical_and(uv[0, :] >= 0.0,
                                           uv[0, :] < size[1] - 1.0),
                            np.logical_and(uv[1, :] >= 0.0,
                                           uv[1, :] < size[0] - 1.0)),
                        uv[2, :] > 0))
                if not idx_:
                    continue
                bbox_cam_fr = egovehicle_SE3_cam.inverse().\
                    transform_point_cloud(bbox_ego_frame)
                X = bbox_cam_fr[:, 0]
                Z = bbox_cam_fr[:, 2]

                if (frame_rec.occlusion_val != IS_OCCLUDED_FLAG
                        and frame_rec.obj_class_str == "VEHICLE"):
                    y_img = (-Z / res).astype(np.int32)
                    x_img = (X / res).astype(np.int32)
                    x_img -= int(np.floor(-20 / res))
                    y_img += int(np.floor(40 / res))
                    box = np.array([x_img[2], y_img[2]])
                    box = np.vstack((box, [x_img[6], y_img[6]]))
                    box = np.vstack((box, [x_img[7], y_img[7]]))
                    box = np.vstack((box, [x_img[3], y_img[3]]))
                    cv2.drawContours(top_view, [box], 0, 255, -1)

        cv2.imwrite(image_path, top_view)
예제 #11
0
def generate_road_bev(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    city_name = argoverse_data.city_name
    avm = ArgoverseMap()
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)
    try:
        path = os.path.join(output_dir, log_id, 'road_gt')
        command = "rm -r " + path
        os.system(command)
    except BaseException:
        pass
    try:
        os.makedirs(os.path.join(output_dir, log_id, 'road_gt'))
    except BaseException:
        pass

    ply_fpath = os.path.join(dataset_dir, log_id, 'lidar')

    ply_locs = []
    for idx, ply_name in enumerate(os.listdir(ply_fpath)):
        ply_loc = np.array([idx, int(ply_name.split('.')[0].split('_')[-1])])
        ply_locs.append(ply_loc)
    ply_locs = np.array(ply_locs)
    lidar_timestamps = sorted(ply_locs[:, 1])

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] ==\
                'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    egovehicle_SE3_cam = SE3(rotation=quat2rotmat(rotation),
                             translation=translation)

    for idx in range(len(lidar_timestamps)):
        lidar_timestamp = lidar_timestamps[idx]
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, "stereo_front_left", str(log_id))
        occupancy_map = np.zeros((256, 256))
        pose_fpath = os.path.join(
            dataset_dir, log_id, "poses",
            "city_SE3_egovehicle_" + str(lidar_timestamp) + ".json")
        if not Path(pose_fpath).exists():
            continue
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        xcenter = translation[0]
        ycenter = translation[1]
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        ego_car_nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(
            xcenter, ycenter, city_name, 50.0)
        occupancy_map = get_lane_bev(ego_car_nearby_lane_ids, avm, city_name,
                                     city_to_egovehicle_se3,
                                     egovehicle_SE3_cam, occupancy_map, res,
                                     255)
        output_loc = os.path.join(
            output_dir, log_id, 'road_gt',
            'stereo_front_left_' + str(cam_timestamp) + '.png')
        cv2.imwrite(output_loc, occupancy_map)
def draw_ground_pts_in_image(
    sdb: SynchronizationDB,
    lidar_points: np.ndarray,
    city_to_egovehicle_se3: SE3,
    dataset_map: ArgoverseMap,
    log_id: str,
    lidar_timestamp: int,
    city_name: str,
    dataset_dir: str,
    experiment_prefix: str,
    plot_ground: bool = True,
    motion_compensate: bool = False,
    camera: Optional[str] = None,
) -> Union[None, np.ndarray]:
    """Write an image to disk with rendered ground points for every camera.

    Args:
        sdb: instance of SynchronizationDB
        lidar_points: Numpy array of shape (N,3) in egovehicle frame
        city_to_egovehicle_se3: SE3 instance which takes a point in egovehicle frame and brings it into city frame
        dataset_map: Map dataset instance
        log_id: ID of the log
        city_name: A city's name (e.g. 'MIA' or 'PIT')
        motion_compensate: Whether to bring lidar points from world frame @ lidar timestamp, to world frame @ camera
                           timestamp
        camera: camera name, if specified will return image of that specific camera, if None, will save all camera to
                disk and return None

    """
    # put into city coords, then prune away ground and non-RoI points
    lidar_points = city_to_egovehicle_se3.transform_point_cloud(lidar_points)
    lidar_points = dataset_map.remove_non_driveable_area_points(
        lidar_points, city_name)
    _, not_ground_logicals = dataset_map.remove_ground_surface(
        copy.deepcopy(lidar_points), city_name, return_logicals=True)
    lidar_points = lidar_points[np.logical_not(not_ground_logicals)
                                if plot_ground else not_ground_logicals]

    # put back into ego-vehicle coords
    lidar_points = city_to_egovehicle_se3.inverse_transform_point_cloud(
        lidar_points)

    calib_fpath = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_fpath)

    # repeat green to red colormap every 50 m.
    colors_arr = np.array([
        [color_obj.rgb]
        for color_obj in Color("red").range_to(Color("green"), NUM_RANGE_BINS)
    ]).squeeze()
    np.fliplr(colors_arr)

    for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                          STEREO_CAMERA_LIST):
        im_dir = f"{dataset_dir}/{log_id}/{camera_name}"

        # load images, e.g. 'image_raw_ring_front_center_000000486.jpg'
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            continue

        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"

        # Swap channel order as OpenCV expects it -- BGR not RGB
        # must make a copy to make memory contiguous
        img = imageio.imread(im_fpath)[:, :, ::-1].copy()
        points_h = point_cloud_to_homogeneous(copy.deepcopy(lidar_points)).T

        if motion_compensate:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
                points_h,  # these are recorded at lidar_time
                copy.deepcopy(calib_data),
                camera_name,
                cam_timestamp,
                lidar_timestamp,
                dataset_dir,
                log_id,
                False,
            )
        else:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img(
                points_h, copy.deepcopy(calib_data), camera_name, False)

        if valid_pts_bool is None or uv is None or uv_cam is None:
            continue

        if valid_pts_bool.sum() == 0:
            continue

        uv = np.round(uv[valid_pts_bool]).astype(np.int32)
        uv_cam = uv_cam.T[valid_pts_bool]
        pt_ranges = np.linalg.norm(uv_cam[:, :3], axis=1)
        rgb_bins = np.round(pt_ranges).astype(np.int32)
        # account for moving past 100 meters, loop around again
        rgb_bins = rgb_bins % NUM_RANGE_BINS
        uv_colors = (255 * colors_arr[rgb_bins]).astype(np.int32)

        img = draw_point_cloud_in_img_cv2(img, uv, np.fliplr(uv_colors))

        if not Path(f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
                    ).exists():
            os.makedirs(
                f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}")

        save_dir = f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
        cv2.imwrite(f"{save_dir}/{camera_name}_{lidar_timestamp}.jpg", img)
        if camera == camera_name:
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return None
예제 #13
0
class PerFrameLabelAccumulator:
    """We will cache the accumulated track label trajectories per city, per log, and per frame.
    In order to plot each frame sequentially, one at a time, we need to aggregate beforehand
    the tracks and cuboids for each frame.

    Attributes:
        bboxes_3d (bool): to use 3d bounding boxes (True) or 2d bounding boxes (False).
        dataset_dir (str): Dataset directory.
        labels_dir (str): Labels directory.
        log_egopose_dict (dict): Egopose per log id and timestamp.
        log_timestamp_dict (dict): List of frame records per log id and timestamp.
        per_city_traj_dict (dict): Per city trajectory dictionary.
        sdb (SynchronizationDB): Synchronization DB.
    """
    def __init__(
        self,
        dataset_dir: str,
        labels_dir: str,
        experiment_prefix: str,
        bboxes_3d: bool = False,
        save: bool = True,
    ) -> None:
        """Initialize PerFrameLabelAccumulator object for use with tracking benchmark data.

        Args:
            dataset_dir (str): Dataset directory.
            labels_dir (str): Labels directory.
            experiment_prefix (str): Prefix for experimint to use.
            bboxes_3d (bool, optional): to use 3d bounding boxes (True) or 2d bounding boxes (False).
        """
        self.bboxes_3d = bboxes_3d

        self.dataset_dir = dataset_dir
        self.labels_dir = labels_dir
        tmp_dir = tempfile.gettempdir()
        per_city_traj_dict_fpath = f"{tmp_dir}/per_city_traj_dict_{experiment_prefix}.pkl"
        log_egopose_dict_fpath = f"{tmp_dir}/log_egopose_dict_{experiment_prefix}.pkl"
        log_timestamp_dict_fpath = f"{tmp_dir}/log_timestamp_dict_{experiment_prefix}.pkl"

        # coordinate system is the map world frame

        self.per_city_traj_dict: Dict[str, List[Tuple[np.ndarray, str]]] = {
            "MIA": [],
            "PIT": [],
        }  # all the trajectories for these 2 cities
        self.log_egopose_dict: Dict[str, Dict[int, Dict[str, np.ndarray]]] = {}
        self.log_timestamp_dict: Dict[str, Dict[int, List[FrameRecord]]] = {}
        self.sdb = SynchronizationDB(self.dataset_dir)

        if save:
            self.accumulate_per_log_data()
            save_pkl_dictionary(per_city_traj_dict_fpath,
                                self.per_city_traj_dict)
            save_pkl_dictionary(log_egopose_dict_fpath, self.log_egopose_dict)
            save_pkl_dictionary(log_timestamp_dict_fpath,
                                self.log_timestamp_dict)

    def accumulate_per_log_data(self, log_id: Optional[str] = None) -> None:
        """Loop through all of the logs that we have. Get the labels that pertain to the
        benchmark (i.e. tracking or detection) that we are interested in.

        We use a unique color to describe each trajectory, and then we store the
        instance of the trajectory, along with its color, *PER FRAME* , per log.

        """
        MIAMI_CUBOID_COUNT = 0
        PITT_CUBOID_COUNT = 0

        log_fpaths = glob.glob(f"{self.dataset_dir}/*")
        log_fpaths = [f for f in log_fpaths if os.path.isdir(f)]
        num_benchmark_logs = len(log_fpaths)

        for log_idx, log_fpath in enumerate(log_fpaths):
            log_id_ = log_fpath.split("/")[-1]
            if log_id is not None:
                if log_id_ != log_id:
                    continue
            if log_id_ not in self.sdb.get_valid_logs():
                continue

            city_info_fpath = f"{self.dataset_dir}/{log_id_}/city_info.json"
            city_info = read_json_file(city_info_fpath)
            log_city_name = city_info["city_name"]
            if log_city_name not in self.per_city_traj_dict:
                logger.warning(f"{log_city_name} not listed city")
                continue

            self.log_egopose_dict[log_id_] = {}
            self.log_timestamp_dict[log_id_] = {}

            traj_labels = self.get_log_trajectory_labels(log_id_)
            if traj_labels is None:
                continue  # skip this log since no tracking data

            for traj_idx, traj_label in enumerate(traj_labels):
                if (traj_idx % 500) == 0:
                    logger.info(f"On traj index {traj_idx}")
                traj_city_fr = self.place_trajectory_in_city_frame(
                    traj_label, log_id_)
                # we don't know the city name until here
                if traj_idx == 0:
                    logger.info(
                        f"Log {log_id_} has {len(traj_labels)} trajectories in {log_city_name}"
                    )

                self.per_city_traj_dict[log_city_name].append(
                    (traj_city_fr, log_id_))

        logger.info(f"We looked at {num_benchmark_logs} tracking logs")
        logger.info(
            f"Miami has {MIAMI_CUBOID_COUNT} and Pittsburgh has {PITT_CUBOID_COUNT} cuboids"
        )

    def get_log_trajectory_labels(
            self, log_id: str) -> Optional[List[TrajectoryLabel]]:
        """Create a very large list with all of the trajectory data.

        Treat a single object cuboid label as one step in a trajectory.
        Then we can share the same representation for both.

        Args:
            log_id (str): Log id to load.

        Returns:
            List[TrajectoryLabel]: List of trajectory labels.
        """
        path = f"{self.labels_dir}/{log_id}/track_labels_amodal"
        if Path(path).exists():
            return load_json_track_labels(f"{path}/*.json")
        else:
            return None

    def place_trajectory_in_city_frame(self, traj_label: TrajectoryLabel,
                                       log_id: str) -> np.ndarray:
        """Place trajectory in the city frame
        Args:
            traj_label (TrajectoryLabel): instance of the TrajectoryLabel class.
            log_id (str): Log id.

        Returns:
            -   traj_city_fr: trajectory length of NUM_CUBOID_VERTS (x,y,z) coords per cuboid.

        """
        seq_len = traj_label.timestamps.shape[0]

        if self.bboxes_3d:
            NUM_CUBOID_VERTS = 8
        else:
            NUM_CUBOID_VERTS = 4

        # store NUM_CUBOID_VERTS (x,y,z) coords per cuboid
        traj_city_fr = np.zeros((seq_len, NUM_CUBOID_VERTS, 3))
        rand_color = (
            float(np.random.rand()),
            float(np.random.rand()),
            float(np.random.rand()),
        )
        logger.info(f"On log {log_id} with {traj_label.track_uuid}")
        for t in range(seq_len):

            obj_label_rec = ObjectLabelRecord(
                quaternion=traj_label.quaternions[t],
                translation=traj_label.translations[t],
                length=traj_label.max_length,
                width=traj_label.max_width,
                height=traj_label.max_height,
                occlusion=traj_label.occlusion[t],
            )

            timestamp = int(traj_label.timestamps[t])

            if self.bboxes_3d:
                bbox_ego_frame = obj_label_rec.as_3d_bbox()
            else:
                bbox_ego_frame = obj_label_rec.as_2d_bbox()

            bbox_city_fr, pose_city_to_ego = self.convert_bbox_to_city_frame(
                timestamp, self.dataset_dir, log_id, bbox_ego_frame)
            if bbox_city_fr is None:
                logger.warning(
                    f"\t {log_id}: Couldnt find the pose for {traj_label.track_uuid}!"
                )
                continue

            self.log_egopose_dict[log_id][timestamp] = pose_city_to_ego

            frame_rec = FrameRecord(
                bbox_city_fr=bbox_city_fr,
                bbox_ego_frame=bbox_ego_frame,
                occlusion_val=obj_label_rec.occlusion,
                color=rand_color,
                track_uuid=traj_label.track_uuid,
                obj_class_str=traj_label.obj_class_str,
            )

            self.log_timestamp_dict[log_id].setdefault(timestamp,
                                                       []).append(frame_rec)

            traj_city_fr[t] = bbox_city_fr

        return traj_city_fr

    def convert_bbox_to_city_frame(
        self,
        lidar_timestamp_ns: int,
        dataset_dir: str,
        log_id: str,
        bbox_ego_frame: np.ndarray,
    ) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
        """Convert bounding box to city frame.
        Args:
            lidar_timestamp_ns (int): Lidar timestamp.
            dataset_dir (str): representing full path to the log_ids.
            log_id (str): e.g. '3ced8dba-62d0-3930-8f60-ebeea2feabb8'.
            bbox_ego_frame (np.ndarray): Numpy array of shape (4,3), representing bounding box in egovehicle frame

        Returned:
            bbox_city_fr: Numpy array of shape (4,3), representing bounding box in CITY frame
            pose_city_to_ego: dictionary, has two fields: 'translation' and 'rotation'
                        describing the SE(3) for p_city = city_to_egovehicle_se3 * p_egovehicle
        """
        city_to_egovehicle_se3 = get_city_SE3_egovehicle_at_sensor_t(
            lidar_timestamp_ns, dataset_dir, log_id)
        if city_to_egovehicle_se3 is None:
            raise RuntimeError(
                f"Could not get city to egovehicle coordinate transformation at timestamp {lidar_timestamp_ns}"
            )

        bbox_city_fr = city_to_egovehicle_se3.transform_point_cloud(
            bbox_ego_frame)
        pose_city_to_ego = {
            "rotation": city_to_egovehicle_se3.rotation,
            "translation": city_to_egovehicle_se3.translation,
        }
        return bbox_city_fr, pose_city_to_ego
class SimpleArgoverseTrackingDataLoader:
    """
    Simple abstraction for retrieving log data, given a path to the dataset.
    """
    def __init__(self, data_dir: str, labels_dir: str) -> None:
        """
        Args:
            data_dir: str, representing path to raw Argoverse data
            labels_dir: strrepresenting path to Argoverse data labels
        """
        self.data_dir = data_dir
        self.labels_dir = labels_dir
        self.sdb = SynchronizationDB(data_dir)

    def get_city_name(self, log_id: str) -> str:
        """
        Args:
            log_id: str

        Returns:
            city_name: str
        """
        city_info_fpath = f"{self.data_dir}/{log_id}/city_info.json"
        city_info = read_json_file(city_info_fpath)
        city_name = city_info["city_name"]
        assert isinstance(city_name, str)
        return city_name

    def get_log_calibration_data(self, log_id: str) -> Mapping[str, Any]:
        """
        Args:
            log_id: str

        Returns:
            log_calib_data: dictionary
        """
        calib_fpath = f"{self.data_dir}/{log_id}/vehicle_calibration_info.json"
        log_calib_data = read_json_file(calib_fpath)
        assert isinstance(log_calib_data, dict)
        return log_calib_data

    def get_city_to_egovehicle_se3(self, log_id: str,
                                   timestamp: int) -> Optional[SE3]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            timestamp: int, timestamp of sensor observation, in nanoseconds

        Returns:
            city_to_egovehicle_se3: SE3 transformation to bring egovehicle frame point into city frame.
        """
        pose_fpath = f"{self.data_dir}/{log_id}/poses/city_SE3_egovehicle_{timestamp}.json"
        if not Path(pose_fpath).exists():
            return None
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        return city_to_egovehicle_se3

    def get_closest_im_fpath(self, log_id: str, camera_name: str,
                             lidar_timestamp: int) -> Optional[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            camera_name: str
            lidar_timestamp: int, timestamp of LiDAR sweep capture, in nanoseconds

        Returns:
            im_fpath, string representing path to image, or else None.
        """
        cam_timestamp = self.sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            return None
        im_dir = f"{self.data_dir}/{log_id}/{camera_name}"
        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"
        return im_fpath

    def get_closest_lidar_fpath(self, log_id: str,
                                cam_timestamp: int) -> Optional[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            cam_timestamp: int, timestamp of image capture, in nanoseconds

        Returns:
            ply_fpath: str, string representing path to PLY file, or else None.
        """
        lidar_timestamp = self.sdb.get_closest_lidar_timestamp(
            cam_timestamp, log_id)
        if lidar_timestamp is None:
            return None
        lidar_dir = f"{self.data_dir}/{log_id}/lidar"
        ply_fname = f"PC_{lidar_timestamp}.ply"
        ply_fpath = f"{lidar_dir}/{ply_fname}"
        return ply_fpath

    def get_ordered_log_ply_fpaths(self, log_id: str) -> List[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
        Returns:
            ply_fpaths: List of strings, representing paths to ply files in this log
            """
        ply_fpaths = sorted(
            glob.glob(f"{self.data_dir}/{log_id}/lidar/PC_*.ply"))
        return ply_fpaths

    def get_ordered_log_cam_fpaths(self, log_id: str,
                                   camera_name: str) -> List[str]:
        """
        Args
            log_id: str, unique ID of vehicle log

        Returns
            cam_img_fpaths: List of strings, representing paths to JPEG files in this log,
                for a specific camera
        """
        cam_img_fpaths = sorted(
            glob.glob(
                f"{self.data_dir}/{log_id}/{camera_name}/{camera_name}_*.jpg"))
        return cam_img_fpaths

    def get_labels_at_lidar_timestamp(
            self, log_id: str,
            lidar_timestamp: int) -> Optional[List[Mapping[str, Any]]]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            lidar_timestamp: int, timestamp of LiDAR sweep capture, in nanoseconds

        Returns:
            labels: dictionary
        """
        timestamp_track_label_fpath = (
            f"{self.labels_dir}/{log_id}/per_sweep_annotations_amodal/tracked_object_labels_{lidar_timestamp}.json"
        )
        if not Path(timestamp_track_label_fpath).exists():
            return None

        labels = read_json_file(timestamp_track_label_fpath)
        assert isinstance(labels, list), labels
        return labels
class SimpleArgoverseTrackingDataLoader:
    """
    Simple abstraction for retrieving log data, given a path to the dataset.
    """
    def __init__(self, data_dir: str, labels_dir: str) -> None:
        """
        Args:
            data_dir: str, representing path to raw Argoverse data
            labels_dir: str representing path to Argoverse data labels (e.g. labels or estimated detections/tracks)
        """
        self.data_dir = data_dir
        self.labels_dir = labels_dir
        self.sdb = SynchronizationDB(data_dir)

    def get_city_name(self, log_id: str) -> str:
        """
        Args:
            log_id: str

        Returns:
            city_name: str
        """
        city_info_fpath = f"{self.data_dir}/{log_id}/city_info.json"
        city_name = read_city_name(city_info_fpath)
        assert isinstance(city_name, str)
        return city_name

    def get_log_calibration_data(self, log_id: str) -> Mapping[str, Any]:
        """
        Args:
            log_id: str

        Returns:
            log_calib_data: dictionary
        """
        calib_fpath = f"{self.data_dir}/{log_id}/vehicle_calibration_info.json"
        log_calib_data = read_json_file(calib_fpath)
        assert isinstance(log_calib_data, dict)
        return log_calib_data

    def get_city_to_egovehicle_se3(self, log_id: str,
                                   timestamp: int) -> Optional[SE3]:
        """Deprecated version of get_city_SE3_egovehicle() below, as does not follow standard naming convention
        Args:
            log_id: str, unique ID of vehicle log
            timestamp: int, timestamp of sensor observation, in nanoseconds

        Returns:
            city_SE3_egovehicle: SE3 transformation to bring points in egovehicle frame into city frame.
        """
        return self.get_city_SE3_egovehicle(log_id, timestamp)

    def get_city_SE3_egovehicle(self, log_id: str,
                                timestamp: int) -> Optional[SE3]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            timestamp: int, timestamp of sensor observation, in nanoseconds

        Returns:
            city_SE3_egovehicle: SE3 transformation to bring points in egovehicle frame into city frame.
        """
        return get_city_SE3_egovehicle_at_sensor_t(timestamp, self.data_dir,
                                                   log_id)

    def get_closest_im_fpath(self, log_id: str, camera_name: str,
                             lidar_timestamp: int) -> Optional[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            camera_name: str
            lidar_timestamp: int, timestamp of LiDAR sweep capture, in nanoseconds

        Returns:
            im_fpath, string representing path to image, or else None.
        """
        cam_timestamp = self.sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            return None
        im_dir = f"{self.data_dir}/{log_id}/{camera_name}"
        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"
        return im_fpath

    def get_closest_lidar_fpath(self, log_id: str,
                                cam_timestamp: int) -> Optional[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            cam_timestamp: int, timestamp of image capture, in nanoseconds

        Returns:
            ply_fpath: str, string representing path to PLY file, or else None.
        """
        lidar_timestamp = self.sdb.get_closest_lidar_timestamp(
            cam_timestamp, log_id)
        if lidar_timestamp is None:
            return None
        lidar_dir = f"{self.data_dir}/{log_id}/lidar"
        ply_fname = f"PC_{lidar_timestamp}.ply"
        ply_fpath = f"{lidar_dir}/{ply_fname}"
        return ply_fpath

    def get_ordered_log_ply_fpaths(self, log_id: str) -> List[str]:
        """
        Args:
            log_id: str, unique ID of vehicle log
        Returns:
            ply_fpaths: List of strings, representing paths to chronologically ordered ply files in this log
                File paths are strings are of the same length ending with a nanosecond timestamp, thus
                sorted() will place them in numerical order.
        """
        ply_fpaths = sorted(
            glob.glob(f"{self.data_dir}/{log_id}/lidar/PC_*.ply"))
        return ply_fpaths

    def get_ordered_log_cam_fpaths(self, log_id: str,
                                   camera_name: str) -> List[str]:
        """
        Args
            log_id: str, unique ID of vehicle log

        Returns
            cam_img_fpaths: List of strings, representing paths to ordered JPEG files in this log,
                for a specific camera
        """
        cam_img_fpaths = sorted(
            glob.glob(
                f"{self.data_dir}/{log_id}/{camera_name}/{camera_name}_*.jpg"))
        return cam_img_fpaths

    def get_labels_at_lidar_timestamp(
            self, log_id: str,
            lidar_timestamp: int) -> Optional[List[Mapping[str, Any]]]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            lidar_timestamp: int, timestamp of LiDAR sweep capture, in nanoseconds

        Returns:
            labels: dictionary
        """
        timestamp_track_label_fpath = (
            f"{self.labels_dir}/{log_id}/per_sweep_annotations_amodal/tracked_object_labels_{lidar_timestamp}.json"
        )
        if not Path(timestamp_track_label_fpath).exists():
            return None

        labels = read_json_file(timestamp_track_label_fpath)
        assert isinstance(labels, list), labels
        return labels