def __init__(
        self, dataset_dir: str, experiment_prefix: str, use_existing_files: bool = True, log_id: str = None
    ) -> None:
        """We will cache the accumulated trajectories per city, per log, and per frame
        for the tracking benchmark.
            """
        self.plot_lane_tangent_arrows = True
        self.plot_lidar_bev = True
        self.plot_lidar_in_img = False
        self.experiment_prefix = experiment_prefix
        self.dataset_dir = dataset_dir
        self.labels_dir = dataset_dir
        self.sdb = SynchronizationDB(self.dataset_dir)

        if log_id is None:
            tmp_dir = tempfile.gettempdir()
            per_city_traj_dict_fpath = f"{tmp_dir}/per_city_traj_dict_{experiment_prefix}.pkl"
            log_egopose_dict_fpath = f"{tmp_dir}/log_egopose_dict_{experiment_prefix}.pkl"
            log_timestamp_dict_fpath = f"{tmp_dir}/log_timestamp_dict_{experiment_prefix}.pkl"
            if not use_existing_files:
                # write the accumulate data dictionaries to disk
                PerFrameLabelAccumulator(dataset_dir, dataset_dir, experiment_prefix)

            self.per_city_traj_dict = load_pkl_dictionary(per_city_traj_dict_fpath)
            self.log_egopose_dict = load_pkl_dictionary(log_egopose_dict_fpath)
            self.log_timestamp_dict = load_pkl_dictionary(log_timestamp_dict_fpath)
        else:
            pfa = PerFrameLabelAccumulator(dataset_dir, dataset_dir, experiment_prefix, save=False)
            pfa.accumulate_per_log_data(log_id=log_id)
            self.per_city_traj_dict = pfa.per_city_traj_dict
            self.log_egopose_dict = pfa.log_egopose_dict
            self.log_timestamp_dict = pfa.log_timestamp_dict
Exemple #2
0
    def set_log_id(self, log_id: str = None):

        pfa = PerFrameLabelAccumulator(self.dataset_dir,
                                       self.dataset_dir,
                                       self.experiment_prefix,
                                       save=False)
        pfa.accumulate_per_log_data(log_id=log_id)
        self.per_city_traj_dict = pfa.per_city_traj_dict
        self.log_egopose_dict = pfa.log_egopose_dict
        self.log_timestamp_dict = pfa.log_timestamp_dict

        self.timestamps = []
        fpaths = sorted(
            glob.glob(
                f"{self.dataset_dir}/{log_id}/per_sweep_annotations_amodal/tracked_object_labels_*.json"
            ))
        for i, fpath in enumerate(fpaths):
            stamp = fpath.split("/")[-1].split(".")[0].split("_")[-1]
            self.timestamps.append(int(stamp))
def test_traj_label_place_in_city(frame_acc: PerFrameLabelAccumulator) -> None:
    traj_list = frame_acc.get_log_trajectory_labels("1")
    assert traj_list is not None
    city_frame_1_gt = [
        [[2.0, -1.0, -1.0], [2.0, -3.0, -1.0], [4.0, -1.0, -1.0], [4.0, -3.0, -1.0]],
        [[3.0, 1.0, 1.0], [3.0, 3.0, 1.0], [5.0, 1.0, 1.0], [5.0, 3.0, 1.0]],
        [[1.0, 4.0, 1.0], [1.0, 2.0, 1.0], [-1.0, 4.0, 1.0], [-1.0, 2.0, 1.0]],
    ]
    city_frame_0_gt = [
        [[2.0, 1.0, 1.0], [2.0, -1.0, 1.0], [0.0, 1.0, 1.0], [0.0, -1.0, 1.0]],
        [[1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [3.0, 1.0, -1.0], [3.0, -1.0, -1.0]],
        [[1.0, 0.0, 1.0], [1.0, 2.0, 1.0], [3.0, 0.0, 1.0], [3.0, 2.0, 1.0]],
    ]
    for traj in traj_list:
        assert traj.obj_class_str == "VEHICLE"
        city_frame = frame_acc.place_trajectory_in_city_frame(traj, "1")
        if traj.track_uuid == "00000000-0000-0000-0000-000000000000":
            assert np.array_equal(city_frame_0_gt, city_frame)
        elif traj.track_uuid == "00000000-0000-0000-0000-000000000001":
            assert np.array_equal(city_frame_1_gt, city_frame)
def frame_acc() -> PerFrameLabelAccumulator:
    pfa = PerFrameLabelAccumulator(TEST_DATA_LOC,
                                   TEST_DATA_LOC,
                                   "test",
                                   save=False)
    pfa.accumulate_per_log_data()
    pfa.accumulate_per_log_data(log_id="1")
    return pfa
def generate_vehicle_bev(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    camera = argoverse_loader.CAMERA_LIST[7]
    calib = argoverse_data.get_calibration(camera)
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] == \
                'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    egovehicle_SE3_cam = SE3(rotation=quat2rotmat(rotation),
                             translation=translation)

    if not os.path.exists(os.path.join(output_dir, log_id, "car_bev_gt")):
        os.makedirs(os.path.join(output_dir, log_id, "car_bev_gt"))

    lidar_dir = os.path.join(dataset_dir, log_id, "lidar")
    ply_list = os.listdir(lidar_dir)

    pfa = PerFrameLabelAccumulator(dataset_dir,
                                   dataset_dir,
                                   "argoverse_bev_viz",
                                   save=False,
                                   bboxes_3d=True)
    pfa.accumulate_per_log_data(log_id)
    log_timestamp_dict = pfa.log_timestamp_dict
    for i, ply_name in enumerate(ply_list):
        lidar_timestamp = ply_name.split('.')[0].split('_')[1]
        lidar_timestamp = int(lidar_timestamp)

        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, "stereo_front_left", str(log_id))
        image_path = os.path.join(
            output_dir, str(log_id), "car_bev_gt",
            "stereo_front_left_" + str(cam_timestamp) + ".jpg")
        objects = log_timestamp_dict[log_id][lidar_timestamp]
        top_view = np.zeros((256, 256))

        all_occluded = True
        for frame_rec in objects:
            if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                all_occluded = False

        if not all_occluded:
            for i, frame_rec in enumerate(objects):
                bbox_ego_frame = frame_rec.bbox_ego_frame
                uv = calib.project_ego_to_image(bbox_ego_frame).T
                idx_ = np.all(
                    np.logical_and(
                        np.logical_and(
                            np.logical_and(uv[0, :] >= 0.0,
                                           uv[0, :] < size[1] - 1.0),
                            np.logical_and(uv[1, :] >= 0.0,
                                           uv[1, :] < size[0] - 1.0)),
                        uv[2, :] > 0))
                if not idx_:
                    continue
                bbox_cam_fr = egovehicle_SE3_cam.inverse().\
                    transform_point_cloud(bbox_ego_frame)
                X = bbox_cam_fr[:, 0]
                Z = bbox_cam_fr[:, 2]

                if (frame_rec.occlusion_val != IS_OCCLUDED_FLAG
                        and frame_rec.obj_class_str == "VEHICLE"):
                    y_img = (-Z / res).astype(np.int32)
                    x_img = (X / res).astype(np.int32)
                    x_img -= int(np.floor(-20 / res))
                    y_img += int(np.floor(40 / res))
                    box = np.array([x_img[2], y_img[2]])
                    box = np.vstack((box, [x_img[6], y_img[6]]))
                    box = np.vstack((box, [x_img[7], y_img[7]]))
                    box = np.vstack((box, [x_img[3], y_img[3]]))
                    cv2.drawContours(top_view, [box], 0, 255, -1)

        cv2.imwrite(image_path, top_view)