Ejemplo n.º 1
0
def visualize_ground_lidar_pts(log_id: str, dataset_dir: str,
                               experiment_prefix: str):
    """Process a log by drawing the LiDAR returns that are classified as belonging
    to the ground surface in a red to green colormap in the image.

    Args:
        log_id: The ID of a log
        dataset_dir: Where the dataset is stored
        experiment_prefix: Output prefix
    """
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    city_info_fpath = f"{dataset_dir}/{log_id}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    avm = ArgoverseMap()

    ply_fpaths = sorted(glob.glob(f"{dataset_dir}/{log_id}/lidar/PC_*.ply"))

    for i, ply_fpath in enumerate(ply_fpaths):
        if i % 500 == 0:
            print(f"\tOn file {i} of {log_id}")
        lidar_timestamp_ns = ply_fpath.split("/")[-1].split(".")[0].split(
            "_")[-1]

        pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{lidar_timestamp_ns}.json"
        if not Path(pose_fpath).exists():
            continue

        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)

        lidar_pts = load_ply(ply_fpath)

        lidar_timestamp_ns = int(lidar_timestamp_ns)
        draw_ground_pts_in_image(
            sdb,
            lidar_pts,
            city_to_egovehicle_se3,
            avm,
            log_id,
            lidar_timestamp_ns,
            city_name,
            dataset_dir,
            experiment_prefix,
        )

    for camera_name in CAMERA_LIST:
        if "stereo" in camera_name:
            fps = 5
        else:
            fps = 10
        cmd = f"ffmpeg -r {fps} -f image2 -i '{experiment_prefix}_ground_viz/{log_id}/{camera_name}/%*.jpg' {experiment_prefix}_ground_viz/{experiment_prefix}_{log_id}_{camera_name}_{fps}fps.mp4"

        print(cmd)
        run_command(cmd)
Ejemplo n.º 2
0
def test_save_as_json() -> None:
    """Ensure that JSON serialization of a class instance works correctly."""
    bSc = Sim2(R=np.array([[0, 1], [1, 0]]), t=np.array([-5, 5]), s=0.1)
    save_fpath = TEST_DATA_ROOT / "b_Sim2_c.json"
    bSc.save_as_json(save_fpath=save_fpath)

    bSc_dict = read_json_file(save_fpath)
    assert bSc_dict["R"] == [0, 1, 1, 0]
    assert bSc_dict["t"] == [-5, 5]
    assert bSc_dict["s"] == 0.1
Ejemplo n.º 3
0
    def accumulate_per_log_data(self, log_id: Optional[str] = None) -> None:
        """Loop through all of the logs that we have. Get the labels that pertain to the
        benchmark (i.e. tracking or detection) that we are interested in.

        We use a unique color to describe each trajectory, and then we store the
        instance of the trajectory, along with its color, *PER FRAME* , per log.

        """
        MIAMI_CUBOID_COUNT = 0
        PITT_CUBOID_COUNT = 0

        log_fpaths = glob.glob(f"{self.dataset_dir}/*")
        log_fpaths = [f for f in log_fpaths if os.path.isdir(f)]
        num_benchmark_logs = len(log_fpaths)

        for log_idx, log_fpath in enumerate(log_fpaths):
            log_id_ = log_fpath.split("/")[-1]
            if log_id is not None:
                if log_id_ != log_id:
                    continue
            if log_id_ not in self.sdb.get_valid_logs():
                continue

            city_info_fpath = f"{self.dataset_dir}/{log_id_}/city_info.json"
            city_info = read_json_file(city_info_fpath)
            log_city_name = city_info["city_name"]
            if log_city_name not in self.per_city_traj_dict:
                logger.warning(f"{log_city_name} not listed city")
                continue

            self.log_egopose_dict[log_id_] = {}
            self.log_timestamp_dict[log_id_] = {}

            traj_labels = self.get_log_trajectory_labels(log_id_)
            if traj_labels is None:
                continue  # skip this log since no tracking data

            for traj_idx, traj_label in enumerate(traj_labels):
                if (traj_idx % 500) == 0:
                    logger.info(f"On traj index {traj_idx}")
                traj_city_fr = self.place_trajectory_in_city_frame(
                    traj_label, log_id_)
                # we don't know the city name until here
                if traj_idx == 0:
                    logger.info(
                        f"Log {log_id_} has {len(traj_labels)} trajectories in {log_city_name}"
                    )

                self.per_city_traj_dict[log_city_name].append(
                    (traj_city_fr, log_id_))

        logger.info(f"We looked at {num_benchmark_logs} tracking logs")
        logger.info(
            f"Miami has {MIAMI_CUBOID_COUNT} and Pittsburgh has {PITT_CUBOID_COUNT} cuboids"
        )
Ejemplo n.º 4
0
def test_read_json_file() -> None:
    """Test reading from a JSON file.

    Load a file that has the following dictionary saved as JSON:
    dict = {'a':1,'b':'2','c':[9,8,7,6,5,'d','c','b','a']}
    """
    fpath = _TEST_DIR / "test_data/json_read_test_file.json"
    json_data = read_json_file(fpath)

    gt_dict = {"a": 1, "b": "2", "c": [9, 8, 7, 6, 5, "d", "c", "b", "a"]}
    assert gt_dict == json_data
    def get_log_calibration_data(self, log_id: str) -> Mapping[str, Any]:
        """
        Args:
            log_id: str

        Returns:
            log_calib_data: dictionary
        """
        calib_fpath = f"{self.data_dir}/{log_id}/vehicle_calibration_info.json"
        log_calib_data = read_json_file(calib_fpath)
        assert isinstance(log_calib_data, dict)
        return log_calib_data
Ejemplo n.º 6
0
def test_leave_only_roi_region():
    """
        test leave_only_roi_region function
        (lidar_pts,egovehicle_to_city_se3,ground_removal_method, city_name='MIA')
        """
    pc = ply_loader.load_ply(TEST_DATA_LOC / "1/lidar/PC_0.ply")
    pose_data = read_json_file(TEST_DATA_LOC / "1/poses/city_SE3_egovehicle_0.json")
    rotation = np.array(pose_data["rotation"])
    translation = np.array(pose_data["translation"])
    ego_R = quat2rotmat(rotation)
    ego_t = translation
    egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
    pts = eval_utils.leave_only_roi_region(pc, egovehicle_to_city_se3, ground_removal_method="map")
    def get_city_name(self, log_id: str) -> str:
        """
        Args:
            log_id: str

        Returns:
            city_name: str
        """
        city_info_fpath = f"{self.data_dir}/{log_id}/city_info.json"
        city_info = read_json_file(city_info_fpath)
        city_name = city_info["city_name"]
        assert isinstance(city_name, str)
        return city_name
Ejemplo n.º 8
0
    def get_log_calibration_data(self, log_id: str) -> Mapping[str, Any]:
        """Get calibration data.

        Args:
            log_id: Unique ID of vehicle log.

        Returns:
            log_calib_data: calibration dictionary.
        """
        calib_fpath = (
            f"{self.data_dir}/rectified_stereo_images_v1.1/{self.split_name}/{log_id}/"
            f"vehicle_calibration_stereo_info.json"
        )

        log_calib_data = read_json_file(calib_fpath)
        assert isinstance(log_calib_data, dict)
        return log_calib_data
    def get_city_to_egovehicle_se3(self, log_id: str,
                                   timestamp: int) -> Optional[SE3]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            timestamp: int, timestamp of sensor observation, in nanoseconds

        Returns:
            city_to_egovehicle_se3: SE3 transformation to bring egovehicle frame point into city frame.
        """
        pose_fpath = f"{self.data_dir}/{log_id}/poses/city_SE3_egovehicle_{timestamp}.json"
        if not Path(pose_fpath).exists():
            return None
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        return city_to_egovehicle_se3
    def get_labels_at_lidar_timestamp(
            self, log_id: str,
            lidar_timestamp: int) -> Optional[List[Mapping[str, Any]]]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            lidar_timestamp: int, timestamp of LiDAR sweep capture, in nanoseconds

        Returns:
            labels: dictionary
        """
        timestamp_track_label_fpath = (
            f"{self.labels_dir}/{log_id}/per_sweep_annotations_amodal/tracked_object_labels_{lidar_timestamp}.json"
        )
        if not Path(timestamp_track_label_fpath).exists():
            return None

        labels = read_json_file(timestamp_track_label_fpath)
        assert isinstance(labels, list), labels
        return labels
Ejemplo n.º 11
0
def get_city_SE3_egovehicle_at_sensor_t(sensor_timestamp: int, dataset_dir: str, log_id: str) -> Optional[SE3]:
    """Get transformation from ego-vehicle to city coordinates at a given timestamp.

    Args:
        sensor_timestamp: integer representing timestamp when sensor measurement captured, in nanoseconds
        dataset_dir:
        log_id: string representing unique log identifier

    Returns:
        SE3 for translating ego-vehicle coordinates to city coordinates if found, else None.
    """
    pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{sensor_timestamp}.json"
    if not Path(pose_fpath).exists():
        logger.error(f"missing pose {sensor_timestamp}")
        return None

    pose_city_to_ego = read_json_file(pose_fpath)
    rotation = np.array(pose_city_to_ego["rotation"])
    translation = np.array(pose_city_to_ego["translation"])
    city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation), translation=translation)
    return city_to_egovehicle_se3
Ejemplo n.º 12
0
    def build_hallucinated_lane_bbox_index(self) -> Tuple[Mapping[str, Any], Mapping[str, Any]]:
        """
        Populate the pre-computed hallucinated extent of each lane polygon, to allow for fast
        queries.

        Returns:
            city_halluc_bbox_table
            city_id_to_halluc_tableidx_map
        """

        city_halluc_bbox_table = {}
        city_halluc_tableidx_to_laneid_map = {}

        for city_name, city_id in self.city_name_to_city_id_dict.items():
            json_fpath = MAP_FILES_ROOT / f"{city_name}_{city_id}_tableidx_to_laneid_map.json"
            city_halluc_tableidx_to_laneid_map[city_name] = read_json_file(json_fpath)

            npy_fpath = MAP_FILES_ROOT / f"{city_name}_{city_id}_halluc_bbox_table.npy"
            city_halluc_bbox_table[city_name] = np.load(npy_fpath)

        return city_halluc_bbox_table, city_halluc_tableidx_to_laneid_map
Ejemplo n.º 13
0
def generate_annotation(annotation,argoverse_data,log_id,distance_range=20):
    for idx in range(len(argoverse_data.image_list_sync['ring_front_center'])):#just to find length
        # print(idx)
        dic={}
        dic['context_name']=str(log_id)
        dic['frame_name']=str(argoverse_data.get_image_sync(idx=0,camera=camera_name[0],load=False)).split('/')[-1]
        dic["direction"]=[{"name":"ring_front_center","object":[]},
                                {"name":"ring_front_left","object":[]},
                                {"name":"ring_front_right","object":[]},
                                {"name":"ring_side_left","object":[]},
                                {"name":"ring_side_right","object":[]},
                                {"name":"ring_rear_left","object":[]},
                                {"name":"ring_rear_right","object":[]}
                                ]
        dic['lidar_index']=idx
        for camera in camera_name:
            img = argoverse_data.get_image_sync(idx, camera=camera)
            objects = argoverse_data.get_label_object(idx)
            calib = argoverse_data.get_calibration(camera=camera)
            for obj in objects:
                box3d_pts_3d = obj.as_3d_bbox()
                cx=box3d_pts_3d.mean(axis=0)[0]
                cy=box3d_pts_3d.mean(axis=0)[1]
                dist=np.sqrt(cx**2+cy**2)
                if dist>distance_range:
                    continue
                calib_fpath = os.path.join(args.root_dir,log_id,'vehicle_calibration_info.json')
                calib_data = read_json_file(calib_fpath)
                uv_cam = calib.project_ego_to_cam(box3d_pts_3d)
                uv, uv_cam_T, valid_pts_bool, camera_config = proj_cam_to_uv(uv_cam, copy.deepcopy(calib.camera_config))
                if any(valid_pts_bool):  #all for object completely lie inside the frame
                    objdic={}
                    objdic['type']=obj.label_class
                    objdic['3d coordinates']=box3d_pts_3d.tolist()
                    dic['direction'][camera_name.index(camera)]["object"].append(objdic)
        annotation.append(dic)
Ejemplo n.º 14
0
def eval_tracks(
    path_tracker_outputs: List[_PathLike],
    path_datasets: List[_PathLike],
    d_min: float,
    d_max: float,
    out_file: TextIO,
    centroid_method: str,
    category: str = "VEHICLE",
) -> None:
    """Evaluate tracking output.

    Args:
        path_tracker_output: list of path to tracker output, one for each log
        path_dataset: path to dataset
        d_min: minimum distance range
        d_max: maximum distance range
        out_file: output file object
        centroid_method: method for ground truth centroid estimation
    """
    acc_c = mm.MOTAccumulator(auto_id=True)
    acc_i = mm.MOTAccumulator(auto_id=True)
    acc_o = mm.MOTAccumulator(auto_id=True)

    ID_gt_all: List[str] = []

    count_all: int = 0

    for path_tracker_output, path_dataset in zip(path_tracker_outputs,
                                                 path_datasets):

        path_track_data = sorted(
            glob.glob(os.fspath(path_tracker_output) + "/*"))

        log_id = pathlib.Path(path_dataset).name
        logger.info("log_id = %s", log_id)

        city_info_fpath = f"{path_dataset}/city_info.json"
        city_info = read_json_file(city_info_fpath)
        city_name = city_info["city_name"]
        logger.info("city name = %s", city_name)

        for ind_frame in range(len(path_track_data)):
            if ind_frame % 50 == 0:
                logger.info("%d/%d" % (ind_frame, len(path_track_data)))

            timestamp_lidar = int(path_track_data[ind_frame].split("/")
                                  [-1].split("_")[-1].split(".")[0])
            path_gt = os.path.join(
                path_dataset, "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json")

            if not os.path.exists(path_gt):
                logger.warning("Missing ", path_gt)
                continue

            gt_data = read_json_file(path_gt)

            pose_data = read_json_file(
                f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json"
            )
            rotation = np.array(pose_data["rotation"])
            translation = np.array(pose_data["translation"])
            ego_R = quat2rotmat(rotation)
            ego_t = translation
            egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)

            gt: Dict[str, Dict[str, Any]] = {}
            id_gts = []
            for i in range(len(gt_data)):

                if gt_data[i]["label_class"] != category:
                    continue

                bbox, orientation = label_to_bbox(gt_data[i])

                center = np.array([
                    gt_data[i]["center"]["x"], gt_data[i]["center"]["y"],
                    gt_data[i]["center"]["z"]
                ])
                if bbox[3] > 0 and in_distance_range_pose(
                        np.zeros(3), center, d_min, d_max):
                    track_label_uuid = gt_data[i]["track_label_uuid"]
                    gt[track_label_uuid] = {}
                    gt[track_label_uuid]["centroid"] = center

                    gt[track_label_uuid]["bbox"] = bbox
                    gt[track_label_uuid]["orientation"] = orientation
                    gt[track_label_uuid]["width"] = gt_data[i]["width"]
                    gt[track_label_uuid]["length"] = gt_data[i]["length"]
                    gt[track_label_uuid]["height"] = gt_data[i]["height"]

                    if track_label_uuid not in ID_gt_all:
                        ID_gt_all.append(track_label_uuid)

                    id_gts.append(track_label_uuid)

            tracks: Dict[str, Dict[str, Any]] = {}
            id_tracks: List[str] = []

            track_data = read_json_file(path_track_data[ind_frame])

            for track in track_data:
                key = track["track_label_uuid"]

                if track["label_class"] != category or track["height"] == 0:
                    continue

                center = np.array([
                    track["center"]["x"], track["center"]["y"],
                    track["center"]["z"]
                ])
                bbox, orientation = label_to_bbox(track)
                if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
                    tracks[key] = {}
                    tracks[key]["centroid"] = center
                    tracks[key]["bbox"] = bbox
                    tracks[key]["orientation"] = orientation
                    tracks[key]["width"] = track["width"]
                    tracks[key]["length"] = track["length"]
                    tracks[key]["height"] = track["height"]

                    id_tracks.append(key)

            dists_c: List[List[float]] = []
            dists_i: List[List[float]] = []
            dists_o: List[List[float]] = []
            for gt_key, gt_value in gt.items():
                gt_track_data_c: List[float] = []
                gt_track_data_i: List[float] = []
                gt_track_data_o: List[float] = []
                dists_c.append(gt_track_data_c)
                dists_i.append(gt_track_data_i)
                dists_o.append(gt_track_data_o)
                for track_key, track_value in tracks.items():
                    count_all += 1
                    gt_track_data_c.append(
                        get_distance(gt_value, track_value, "centroid"))
                    gt_track_data_i.append(
                        get_distance(gt_value, track_value, "iou"))
                    gt_track_data_o.append(
                        get_distance(gt_value, track_value, "orientation"))

            acc_c.update(id_gts, id_tracks, dists_c)
            acc_i.update(id_gts, id_tracks, dists_i)
            acc_o.update(id_gts, id_tracks, dists_o)
    if count_all == 0:
        # fix for when all hypothesis is empty,
        # pymotmetric currently doesn't support this, see https://github.com/cheind/py-motmetrics/issues/49
        acc_c.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_i.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_o.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)

    summary = mh.compute(
        acc_c,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    logger.info("summary = %s", summary)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp_c = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_flag = summary["num_fragmentations"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_i.events.loc[acc_c.events.Type != "RAW", "D"]

    sum_motp_i = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-I = %s", sum_motp_i)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_i = sum_motp_i["motp"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_o.events.loc[acc_c.events.Type != "RAW", "D"]
    sum_motp_o = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-O = %s", sum_motp_o)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_o = sum_motp_o["motp"][0]

    out_string = (
        f"{fn} {num_frames} {mota:.2f} {motp_c:.2f} {motp_o:.2f} {motp_i:.2f} {idf1:.2f} {most_track:.2f} "
        f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n")
    out_file.write(out_string)
Ejemplo n.º 15
0
def generate_weak_static(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    camera = argoverse_loader.CAMERA_LIST[7]
    calib = argoverse_data.get_calibration(camera)
    ply_fpath = os.path.join(dataset_dir, log_id, 'lidar')
    ply_locs = []
    for idx, ply_name in enumerate(os.listdir(ply_fpath)):
        ply_loc = np.array([idx, int(ply_name.split('.')[0].split('_')[-1])])
        ply_locs.append(ply_loc)
    ply_locs = np.array(ply_locs)
    lidar_timestamps = sorted(ply_locs[:, 1])

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] == \
                                    'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    sparse_road_bev_loc = os.path.join(output_dir, log_id, 'sparse_road_bev')
    try:
        os.makedirs(sparse_road_bev_loc)
    except BaseException:
        pass
    dense_city_road_pts = []

    for idx in range(len(lidar_timestamps)):

        occupancy_map = np.zeros((256, 256))
        lidar_timestamp = lidar_timestamps[idx]
        try:
            img_loc = argoverse_data.get_image_list_sync(camera=camera)[idx]
        except BaseException:
            continue
        cam_timestamp = int(img_loc.split('.')[0].split('_')[-1])
        segment_loc = os.path.join(
            dataset_dir, log_id, 'segment',
            'stereo_front_left_' + str(cam_timestamp) + '.png')
        segment_img = cv2.imread(segment_loc, 0)

        pc = argoverse_data.get_lidar(idx)
        uv = calib.project_ego_to_image(pc).T
        idx_ = np.where(
            np.logical_and.reduce(
                (uv[0, :] >= 0.0, uv[0, :] < np.shape(segment_img)[1] - 1.0,
                 uv[1, :] >= 0.0, uv[1, :] < np.shape(segment_img)[0] - 1.0,
                 uv[2, :] > 0)))
        idx_ = idx_[0]
        uv1 = uv[:, idx_]
        uv1 = uv1.T

        x = uv1[:, 0].astype(np.int32)
        y = uv1[:, 1].astype(np.int32)
        col = segment_img[y, x]
        filt = np.logical_or(col == 13, np.logical_or(col == 24, col == 24))
        road_uv1 = uv1[filt, :]

        lidar_road_pts = calib.project_image_to_ego(road_uv1)

        cam_road_pts = calib.project_ego_to_cam(lidar_road_pts)
        X = cam_road_pts[:, 0]
        Z = cam_road_pts[:, 2]
        filt2 = np.logical_and(X > -20, X < 20)
        filt2 = np.logical_and(filt2, np.logical_and(Z > 0, Z < 40))
        y_img = (-Z[filt2] / res).astype(np.int32)
        x_img = (X[filt2] / res).astype(np.int32)
        x_img -= int(np.floor(-20 / res))
        y_img += int(np.floor(40 / res))
        occupancy_map[y_img, x_img] = 255
        occ_map_loc = os.path.join(
            sparse_road_bev_loc,
            'stereo_front_left_' + str(cam_timestamp) + '.png')
        cv2.imwrite(occ_map_loc, occupancy_map)
        pose_fpath = os.path.join(
            dataset_dir, log_id, "poses",
            "city_SE3_egovehicle_" + str(lidar_timestamp) + ".json")
        if not Path(pose_fpath).exists():
            print("not a apth")
            continue
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        sparse_city_road_pts = city_to_egovehicle_se3.transform_point_cloud(
            lidar_road_pts)
        try:
            dense_city_road_pts = np.concatenate(
                (dense_city_road_pts, sparse_city_road_pts), axis=0)
        except BaseException:
            dense_city_road_pts = sparse_city_road_pts

    dense_road_bev_loc = os.path.join(output_dir, log_id, 'dense_road_bev')
    try:
        os.makedirs(dense_road_bev_loc)
    except BaseException:
        pass

    for idx in range(len(lidar_timestamps)):

        occupancy_map = np.zeros((256, 256))
        lidar_timestamp = lidar_timestamps[idx]
        try:
            img_loc = argoverse_data.get_image_list_sync(camera=camera)[idx]
        except BaseException:
            continue
        cam_timestamp = int(img_loc.split('.')[0].split('_')[-1])
        pose_fpath = os.path.join(
            dataset_dir, log_id, "poses",
            "city_SE3_egovehicle_" + str(lidar_timestamp) + ".json")
        if not Path(pose_fpath).exists():
            print("not a path")
            continue
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        current_ego_frame_road_pts =\
            city_to_egovehicle_se3\
            .inverse_transform_point_cloud(dense_city_road_pts)
        current_cam_frame_road_pts = calib.project_ego_to_cam(
            current_ego_frame_road_pts)
        X = current_cam_frame_road_pts[:, 0]
        Z = current_cam_frame_road_pts[:, 2]
        filt2 = np.logical_and(X > -20, X < 20)
        filt2 = np.logical_and(filt2, np.logical_and(Z > 0, Z < 40))
        y_img = (-Z[filt2] / res).astype(np.int32)
        x_img = (X[filt2] / res).astype(np.int32)
        x_img -= int(np.floor(-20 / res))
        y_img += int(np.floor(40 / res)) - 1

        occupancy_map[y_img, x_img] = 255
        occ_map_loc = os.path.join(
            dense_road_bev_loc,
            'stereo_front_left_' + str(cam_timestamp) + '.png')
        cv2.imwrite(occ_map_loc, occupancy_map)
Ejemplo n.º 16
0
def eval_tracks(
    path_tracker_output_root: _PathLike,
    path_dataset_root: _PathLike,
    d_min: float,
    d_max: float,
    out_file: TextIO,
    centroid_method: str,
    diffatt: Optional[str],
    category: str = "VEHICLE",
) -> None:
    """Evaluate tracking output.

    Args:
        path_tracker_output_root: path to tracker output root, containing log_id subdirs
        path_dataset_root: path to dataset root, containing log_id subdirs
        d_min: minimum allowed distance range for ground truth and predicted objects,
            in meters
        d_max: maximum allowed distance range, as above, in meters
        out_file: output file object
        centroid_method: method for ground truth centroid estimation
        diffatt: difficulty attribute ['easy',  'far', 'fast', 'occ', 'short']. Note that if
            tracking according to a specific difficulty attribute is computed, then all ground
            truth annotations not fulfilling that attribute specification are
            disregarded/dropped out. Since the corresponding track predictions are not dropped
            out, the number of false positives, false negatives, and MOTA will not be accurate
            However, `mostly tracked` and `mostly lost` will be accurate.
        category: such as "VEHICLE" "PEDESTRIAN"
    """
    acc_c = mm.MOTAccumulator(auto_id=True)
    acc_i = mm.MOTAccumulator(auto_id=True)
    acc_o = mm.MOTAccumulator(auto_id=True)

    ID_gt_all: List[str] = []

    count_all: int = 0
    if diffatt is not None:
        import argoverse.evaluation

        pkl_path = os.path.join(os.path.dirname(argoverse.evaluation.__file__),
                                "dict_att_all.pkl")
        if not os.path.exists(pkl_path):
            # generate them on the fly
            logger.info(pkl_path)
            raise NotImplementedError

        pickle_in = open(
            pkl_path,
            "rb")  # open(f"{path_dataset_root}/dict_att_all.pkl","rb")
        dict_att_all = pickle.load(pickle_in)

    path_datasets = glob.glob(os.path.join(path_dataset_root, "*"))
    num_total_gt = 0

    for path_dataset in path_datasets:

        log_id = pathlib.Path(path_dataset).name
        if len(log_id) == 0 or log_id.startswith("_"):
            continue

        path_tracker_output = os.path.join(path_tracker_output_root, log_id)

        path_track_data = sorted(
            glob.glob(
                os.path.join(os.fspath(path_tracker_output),
                             "per_sweep_annotations_amodal", "*")))

        logger.info("log_id = %s", log_id)

        for ind_frame in range(len(path_track_data)):
            if ind_frame % 50 == 0:
                logger.info("%d/%d" % (ind_frame, len(path_track_data)))

            timestamp_lidar = int(
                Path(path_track_data[ind_frame]).stem.split("_")[-1])
            path_gt = os.path.join(
                path_dataset,
                "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json",
            )

            if not os.path.exists(path_gt):
                logger.warning("Missing ", path_gt)
                continue

            gt_data = read_json_file(path_gt)

            gt: Dict[str, Dict[str, Any]] = {}
            id_gts = []
            for i in range(len(gt_data)):

                if gt_data[i]["label_class"] != category:
                    continue

                if diffatt is not None:

                    if diffatt not in dict_att_all["test"][log_id][
                            gt_data[i]["track_label_uuid"]]["difficult_att"]:
                        continue

                bbox, orientation = label_to_bbox(gt_data[i])

                center = np.array([
                    gt_data[i]["center"]["x"],
                    gt_data[i]["center"]["y"],
                    gt_data[i]["center"]["z"],
                ])
                if bbox[3] > 0 and in_distance_range_pose(
                        np.zeros(3), center, d_min, d_max):
                    track_label_uuid = gt_data[i]["track_label_uuid"]
                    gt[track_label_uuid] = {}
                    gt[track_label_uuid]["centroid"] = center

                    gt[track_label_uuid]["bbox"] = bbox
                    gt[track_label_uuid]["orientation"] = orientation
                    gt[track_label_uuid]["width"] = gt_data[i]["width"]
                    gt[track_label_uuid]["length"] = gt_data[i]["length"]
                    gt[track_label_uuid]["height"] = gt_data[i]["height"]

                    if track_label_uuid not in ID_gt_all:
                        ID_gt_all.append(track_label_uuid)

                    id_gts.append(track_label_uuid)
                    num_total_gt += 1

            tracks: Dict[str, Dict[str, Any]] = {}
            id_tracks: List[str] = []

            track_data = read_json_file(path_track_data[ind_frame])

            for track in track_data:
                key = track["track_label_uuid"]

                if track["label_class"] != category or track["height"] == 0:
                    continue

                center = np.array([
                    track["center"]["x"], track["center"]["y"],
                    track["center"]["z"]
                ])
                bbox, orientation = label_to_bbox(track)
                if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
                    tracks[key] = {}
                    tracks[key]["centroid"] = center
                    tracks[key]["bbox"] = bbox
                    tracks[key]["orientation"] = orientation
                    tracks[key]["width"] = track["width"]
                    tracks[key]["length"] = track["length"]
                    tracks[key]["height"] = track["height"]

                    id_tracks.append(key)

            dists_c: List[List[float]] = []
            dists_i: List[List[float]] = []
            dists_o: List[List[float]] = []
            for gt_key, gt_value in gt.items():
                gt_track_data_c: List[float] = []
                gt_track_data_i: List[float] = []
                gt_track_data_o: List[float] = []
                dists_c.append(gt_track_data_c)
                dists_i.append(gt_track_data_i)
                dists_o.append(gt_track_data_o)
                for track_key, track_value in tracks.items():
                    count_all += 1
                    gt_track_data_c.append(
                        get_distance(gt_value, track_value, "centroid"))
                    gt_track_data_i.append(
                        get_distance(gt_value, track_value, "iou"))
                    gt_track_data_o.append(
                        get_distance(gt_value, track_value, "orientation"))

            acc_c.update(id_gts, id_tracks, dists_c)
            acc_i.update(id_gts, id_tracks, dists_i)
            acc_o.update(id_gts, id_tracks, dists_o)
    # print(count_all)
    if count_all == 0:
        # fix for when all hypothesis is empty,
        # pymotmetric currently doesn't support this, see https://github.com/cheind/py-motmetrics/issues/49
        acc_c.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_i.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_o.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)

    summary = mh.compute(
        acc_c,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    logger.info("summary = %s", summary)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp_c = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_frag = summary["num_fragmentations"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_i.events.loc[acc_c.events.Type != "RAW", "D"]

    sum_motp_i = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-I = %s", sum_motp_i)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_i = sum_motp_i["motp"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_o.events.loc[acc_c.events.Type != "RAW", "D"]
    sum_motp_o = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-O = %s", sum_motp_o)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_o = sum_motp_o["motp"][0]

    out_string = (
        f"{fn} {num_frames} {mota:.2f} {motp_c:.2f} {motp_o:.2f} {motp_i:.2f} {idf1:.2f} {most_track:.2f} "
        f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_frag} \n")
    out_file.write(out_string)
Ejemplo n.º 17
0
print("root dir = ", root_dir)
print('updating track_labels_amodal folders...')
list_log_folders = glob.glob(os.path.join(root_dir, "*"))

if len(list_log_folders) == 0:
    print("Not file founded.")
else:

    for ind_log, path_log in enumerate(list_log_folders):
        print('Processing %d/%d' % (ind_log + 1, len(list_log_folders)))
        list_path_label_persweep = glob.glob(
            os.path.join(path_log, "per_sweep_annotations_amodal", "*"))
        list_path_label_persweep.sort()
        dist_track_labels = {}
        for path_label_persweep in list_path_label_persweep:
            data = read_json_file(path_label_persweep)
            for data_obj in data:
                id_obj = data_obj['track_label_uuid']
                if id_obj not in dist_track_labels.keys():
                    dist_track_labels[id_obj] = []
                dist_track_labels[id_obj].append(data_obj)

        path_amodal_labels = os.path.join(path_log, "track_labels_amodal")
        data_amodal = {}

        if os.path.exists(path_amodal_labels):
            shutil.rmtree(path_amodal_labels)

        os.mkdir(path_amodal_labels)
        print("Adding files to ", path_amodal_labels)
        for key in dist_track_labels.keys():
Ejemplo n.º 18
0
def get_mean():
    # 1. read tracked files from ground truth
    # 2. extract x, y, z, h, w, l, rot -> angle

    path_datasets = glob.glob(os.path.join(args.path_dataset_root, "*"))

    gt_trajectory_map = {
        tracking_name: defaultdict(dict)
        for tracking_name in tracking_names
    }

    # store every detection data to compute mean and variance

    gt_box_data = {tracking_name: [] for tracking_name in tracking_names}

    for path_dataset in path_datasets:  # path_tracker_output, path_dataset in zip(path_tracker_outputs, path_datasets):

        log_id = pathlib.Path(path_dataset).name
        if len(log_id) == 0 or log_id.startswith("_"):
            continue

        # path_tracker_output = os.path.join(path_tracker_output_root, log_id)

        # print('\npath_tracker_output: ', path_tracker_output) # read tracked log files ALL, worked
        # /media/basic/Transcend/argoai_tracking/argoverse-tracking/val_output/val-split-track-preds-maxage15-minhits5-conf0.3/033669d3-3d6b-3d3d-bd93-7985d86653ea


        path_track_data = sorted(
         glob.glob(os.path.join(os.fspath(path_dataset), \
          "per_sweep_annotations_amodal", "*"))
        )

        # logger.info("log_id = %s", log_id)

        # city_info_fpath = f"{path_dataset}/city_info.json"
        # city_info = read_json_file(city_info_fpath)
        # city_name = city_info["city_name"]
        # logger.info("city name = %s", city_name)

        for ind_frame in range(len(path_track_data)):

            if ind_frame % 50 == 0:
                # print("%d/%d" % (ind_frame, len(path_track_data)))
                print("%d/%d" % (ind_frame, len(path_track_data)))

            timestamp_lidar = int(path_track_data[ind_frame].split("/")
                                  [-1].split("_")[-1].split(".")[0])

            # print('\ntimestamp: ', timestamp_lidar)

            path_gt = os.path.join(
                path_dataset, "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json")

            # print('\npath_dataset gt: ', path_gt) # corrected for reading val, all log files
            #/media/basic/Transcend/argoai_tracking/argoverse-tracking/val/00c561b9-2057-358d-82c6-5b06d76cebcf/per_sweep_annotations_amodal/tracked_object_labels_315969629019741000.json

            # gt_data = read_json_file(path_gt)

            if not os.path.exists(path_gt):
                # logger.warning("Missing ", path_gt)
                continue

            gt_data = read_json_file(path_gt)

            for i in range(len(gt_data)):

                if gt_data[i]["label_class"] not in tracking_names:
                    print('\nignored: ', gt_data[i]["label_class"])

                    continue

                bbox, orientation = label_to_bbox(gt_data[i])

                x, y, z = gt_data[i]["center"]["x"], \
                 gt_data[i]["center"]["y"], \
                 gt_data[i]["center"]["z"]

                center = np.array([
                    gt_data[i]["center"]["x"], gt_data[i]["center"]["y"],
                    gt_data[i]["center"]["z"]
                ])

                w = gt_data[i]['width']
                l = gt_data[i]["length"]
                h = gt_data[i]["height"]

                rotation = [
                    gt_data[i]['rotation']['x'], gt_data[i]['rotation']['y'],
                    gt_data[i]['rotation']['z'], gt_data[i]['rotation']['w']
                ]

                # print('\nx,y,z, w, l, h: ', x, y, z, w, l, h)

                z_angle = rotation_to_positive_z_angle(rotation)

                # print('\nz_angle: ', z_angle)

                box_data = np.array([
                    h,
                    w,
                    l,
                    x,
                    y,
                    z,
                    z_angle,
                    0,
                    0,
                    0,
                    0  # x_dot, y_dot, z_dot, a_dot
                ])

                # print('\nbox_data: ', box_data)
                track_label_uuid = gt_data[i]["track_label_uuid"]
                cat = gt_data[i]["label_class"]
                gt_trajectory_map[cat][track_label_uuid][ind_frame] = box_data

                # compute x_dot, y_dot, z_dot
                # if we can find the same object in the previous frame, get the velocity
                if track_label_uuid in gt_trajectory_map[cat] \
                 and ind_frame - 1 \
                  in gt_trajectory_map[cat][track_label_uuid]:

                    residual_vel = box_data[3:7] - \
                     gt_trajectory_map[cat][track_label_uuid][ind_frame-1][3:7]

                    box_data[7:11] = residual_vel

                    gt_trajectory_map[cat][track_label_uuid][
                        ind_frame] = box_data

                    # back fill
                    if gt_trajectory_map[cat][track_label_uuid][ind_frame -
                                                                1][7] == 0:
                        gt_trajectory_map[cat][track_label_uuid][
                            ind_frame - 1][7:11] = residual_vel

                gt_box_data[gt_data[i]["label_class"]].append(box_data)

    gt_box_data = {
        tracking_name: np.stack(gt_box_data[tracking_name], axis=0)
        for tracking_name in tracking_names
    }

    mean = {
        tracking_name: np.mean(gt_box_data[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    std = {
        tracking_name: np.std(gt_box_data[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    var = {
        tracking_name: np.var(gt_box_data[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    print('\nh, w, l, x, y, z, a, x_dot, y_dot, z_dot, a_dot\n')
    print('\nmean: ', mean, '\n' '\nstd: ', std, '\n', '\nvar: ', var)  #Q
Ejemplo n.º 19
0
def generate_road_bev(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    city_name = argoverse_data.city_name
    avm = ArgoverseMap()
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)
    try:
        path = os.path.join(output_dir, log_id, 'road_gt')
        command = "rm -r " + path
        os.system(command)
    except BaseException:
        pass
    try:
        os.makedirs(os.path.join(output_dir, log_id, 'road_gt'))
    except BaseException:
        pass

    ply_fpath = os.path.join(dataset_dir, log_id, 'lidar')

    ply_locs = []
    for idx, ply_name in enumerate(os.listdir(ply_fpath)):
        ply_loc = np.array([idx, int(ply_name.split('.')[0].split('_')[-1])])
        ply_locs.append(ply_loc)
    ply_locs = np.array(ply_locs)
    lidar_timestamps = sorted(ply_locs[:, 1])

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] ==\
                'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    egovehicle_SE3_cam = SE3(rotation=quat2rotmat(rotation),
                             translation=translation)

    for idx in range(len(lidar_timestamps)):
        lidar_timestamp = lidar_timestamps[idx]
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, "stereo_front_left", str(log_id))
        occupancy_map = np.zeros((256, 256))
        pose_fpath = os.path.join(
            dataset_dir, log_id, "poses",
            "city_SE3_egovehicle_" + str(lidar_timestamp) + ".json")
        if not Path(pose_fpath).exists():
            continue
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        xcenter = translation[0]
        ycenter = translation[1]
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        ego_car_nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(
            xcenter, ycenter, city_name, 50.0)
        occupancy_map = get_lane_bev(ego_car_nearby_lane_ids, avm, city_name,
                                     city_to_egovehicle_se3,
                                     egovehicle_SE3_cam, occupancy_map, res,
                                     255)
        output_loc = os.path.join(
            output_dir, log_id, 'road_gt',
            'stereo_front_left_' + str(cam_timestamp) + '.png')
        cv2.imwrite(output_loc, occupancy_map)
Ejemplo n.º 20
0
def make_att_files(root_dir: str) -> None:
    """Write a .pkl file with difficulty attributes per track"""
    path_output_vis = "vis_output"
    filename_output = "att_file.npy"

    if not os.path.exists(path_output_vis):
        os.mkdir(path_output_vis)

    list_folders = ["test"]
    list_name_class = ["VEHICLE", "PEDESTRIAN"]
    count_track = 0
    dict_att_all: Dict[str, Any] = {}

    for name_folder in list_folders:

        dict_att_all[name_folder] = {}
        list_log_folders = glob.glob(os.path.join(root_dir, name_folder, "*"))
        for ind_log, path_log in enumerate(list_log_folders):

            id_log = f"{Path(path_log).name}"
            print("%s %s %d/%d" %
                  (name_folder, id_log, ind_log, len(list_log_folders)))

            if check_track_label_folder:
                list_path_label_persweep = glob.glob(
                    os.path.join(path_log, "per_sweep_annotations_amodal",
                                 "*"))
                list_path_label_persweep.sort()

                dict_track_labels: Dict[str, Any] = {}
                for path_label_persweep in list_path_label_persweep:
                    data = read_json_file(path_label_persweep)
                    for data_obj in data:
                        id_obj = data_obj["track_label_uuid"]

                        if id_obj not in dict_track_labels.keys():
                            dict_track_labels[id_obj] = []
                        dict_track_labels[id_obj].append(data_obj)

                data_amodal: Dict[str, Any] = {}
                for key in dict_track_labels.keys():
                    dict_amodal: Dict[str, Any] = {}
                    data_amodal[key] = dict_amodal
                    data_amodal[key]["label_class"] = dict_track_labels[key][
                        0]["label_class"]
                    data_amodal[key]["uuid"] = dict_track_labels[key][0][
                        "track_label_uuid"]
                    data_amodal[key]["log_id"] = id_log
                    data_amodal[key]["track_label_frames"] = dict_track_labels[
                        key]

            argoverse_loader = ArgoverseTrackingLoader(
                os.path.join(root_dir, name_folder))
            data_log = argoverse_loader.get(id_log)
            list_lidar_timestamp = data_log.lidar_timestamp_list

            dict_tracks: Dict[str, Any] = {}
            for id_track in data_amodal.keys():

                data = data_amodal[id_track]
                if data["label_class"] not in list_name_class:
                    continue

                data_per_frame = data["track_label_frames"]

                dict_per_track: Dict[str, Any] = {}
                dict_tracks[id_track] = dict_per_track
                dict_tracks[id_track]["ind_lidar_min"] = -1
                dict_tracks[id_track]["ind_lidar_max"] = -1
                length_log = len(list_lidar_timestamp)
                dict_tracks[id_track]["list_city_se3"] = [None] * length_log
                dict_tracks[id_track]["list_bbox"] = [None] * length_log
                count_track += 1

                dict_tracks[id_track]["list_center"] = np.full([length_log, 3],
                                                               np.nan)
                dict_tracks[id_track]["list_center_w"] = np.full(
                    [length_log, 3], np.nan)
                dict_tracks[id_track]["list_dist"] = np.full([length_log],
                                                             np.nan)
                dict_tracks[id_track]["exists"] = np.full([length_log], False)

                for box in data_per_frame:

                    if box["timestamp"] in list_lidar_timestamp:
                        ind_lidar = list_lidar_timestamp.index(
                            box["timestamp"])
                    else:
                        continue

                    if dict_tracks[id_track]["ind_lidar_min"] == -1:
                        dict_tracks[id_track]["ind_lidar_min"] = ind_lidar

                    dict_tracks[id_track]["ind_lidar_max"] = max(
                        ind_lidar, dict_tracks[id_track]["ind_lidar_max"])

                    center = np.array([
                        box["center"]["x"], box["center"]["y"],
                        box["center"]["z"]
                    ])
                    city_SE3_egovehicle = argoverse_loader.get_pose(
                        ind_lidar, id_log)
                    if city_SE3_egovehicle is None:
                        print("Pose not found!")
                        continue
                    center_w = city_SE3_egovehicle.transform_point_cloud(
                        center[np.newaxis, :])[0]

                    dict_tracks[id_track]["list_center"][ind_lidar] = center
                    dict_tracks[id_track]["list_center_w"][
                        ind_lidar] = center_w
                    dict_tracks[id_track]["list_dist"][
                        ind_lidar] = np.linalg.norm(center[0:2])
                    dict_tracks[id_track]["exists"][ind_lidar] = True
                    dict_tracks[id_track]["list_city_se3"][
                        ind_lidar] = city_SE3_egovehicle
                    dict_tracks[id_track]["list_bbox"][ind_lidar] = box

                length_track = dict_tracks[id_track][
                    "ind_lidar_max"] - dict_tracks[id_track][
                        "ind_lidar_min"] + 1

                assert not (dict_tracks[id_track]["ind_lidar_max"] == -1
                            and dict_tracks[id_track]["ind_lidar_min"]
                            == -1), "zero-length track"
                dict_tracks[id_track]["length_track"] = length_track

                (
                    dict_tracks[id_track]["list_vel"],
                    dict_tracks[id_track]["list_acc"],
                ) = compute_v_a(dict_tracks[id_track]["list_center_w"])
                dict_tracks[id_track]["num_missing"] = (
                    dict_tracks[id_track]["length_track"] -
                    dict_tracks[id_track]["exists"].sum())
                dict_tracks[id_track]["difficult_att"] = []
                # get scalar velocity per timestamp as 2-norm of (vx, vy)
                vel_abs = np.linalg.norm(
                    dict_tracks[id_track]["list_vel"][:, 0:2], axis=1)
                acc_abs = np.linalg.norm(
                    dict_tracks[id_track]["list_acc"][:, 0:2], axis=1)

                ind_valid = np.nonzero(
                    1 - np.isnan(dict_tracks[id_track]["list_dist"]))[0]
                ind_close = np.nonzero(dict_tracks[id_track]["list_dist"]
                                       [ind_valid] < NEAR_DISTANCE_THRESH)[0]

                if len(ind_close) > 0:
                    ind_close_max = ind_close.max() + 1
                    ind_close_min = ind_close.min()

                # Only compute "fast" and "occluded" tags for near objects
                # The thresholds are not very meaningful for faraway objects, since they are usually pretty short.
                if dict_tracks[id_track]["list_dist"][ind_valid].min(
                ) > NEAR_DISTANCE_THRESH:
                    dict_tracks[id_track]["difficult_att"].append("far")
                else:
                    is_short_len_track1 = dict_tracks[id_track][
                        "length_track"] < SHORT_TRACK_LENGTH_THRESH
                    is_short_len_track2 = dict_tracks[id_track]["exists"].sum(
                    ) < SHORT_TRACK_COUNT_THRESH
                    if is_short_len_track1 or is_short_len_track2:
                        dict_tracks[id_track]["difficult_att"].append("short")
                    else:
                        if (ind_close_max -
                                ind_close_min) - dict_tracks[id_track][
                                    "exists"][ind_close_min:ind_close_max].sum(
                                    ) > MAX_OCCLUSION_PCT:
                            dict_tracks[id_track]["difficult_att"].append(
                                "occ")

                        if np.quantile(vel_abs[ind_valid][ind_close],
                                       0.9) > FAST_TRACK_THRESH:
                            dict_tracks[id_track]["difficult_att"].append(
                                "fast")

                if len(dict_tracks[id_track]["difficult_att"]) == 0:
                    dict_tracks[id_track]["difficult_att"].append("easy")

            if visualize:
                for ind_lidar, timestamp_lidar in enumerate(
                        list_lidar_timestamp):

                    list_bboxes = []
                    list_difficulty_att = []

                    for id_track in dict_tracks.keys():
                        if dict_tracks[id_track]["exists"][ind_lidar]:
                            list_bboxes.append(
                                dict_tracks[id_track]["list_bbox"][ind_lidar])
                            list_difficulty_att.append(
                                dict_tracks[id_track]["difficult_att"])

                    path_lidar = os.path.join(path_log, "lidar",
                                              "PC_%s.ply" % timestamp_lidar)
                    pc = np.asarray(o3d.io.read_point_cloud(path_lidar).points)
                    list_lidar_timestamp = data_log.lidar_timestamp_list
                    save_bev_img(
                        path_output_vis,
                        list_bboxes,
                        list_difficulty_att,
                        "argoverse_%s" % name_folder,
                        id_log,
                        timestamp_lidar,
                        pc,
                    )

            for id_track in dict_tracks.keys():
                list_key = list(dict_tracks[id_track].keys()).copy()
                for key in list_key:
                    if key != "difficult_att":
                        del dict_tracks[id_track][key]

            dict_att_all[name_folder][id_log] = dict_tracks

    save_pkl_dictionary(filename_output, dict_att_all)
Ejemplo n.º 21
0
def verify_lane_tangent_vector():
    """
        debug low confidence lane tangent predictions

        I noticed that the confidence score of lane direction is
        pretty low (almost zero) in some logs
        """
    POSE_FILE_DIR = "../debug_lane_tangent"

    # both of these are Pittsburgh logs
    log_ids = [
        "033669d3-3d6b-3d3d-bd93-7985d86653ea",
        "028d5cb1-f74d-366c-85ad-84fde69b0fd3"
    ]

    adm = ArgoverseMap()
    city_name = "PIT"
    for log_id in log_ids:
        print(f"On {log_id}")
        pose_fpaths = glob.glob(
            f"{POSE_FILE_DIR}/{log_id}/poses/city_SE3_egovehicle_*.json")
        num_poses = len(pose_fpaths)
        egovehicle_xy_arr = np.zeros((num_poses, 2))
        for i, pose_fpath in enumerate(pose_fpaths):
            json_data = read_json_file(pose_fpath)
            egovehicle_xy_arr[i, 0] = json_data["translation"][0]
            egovehicle_xy_arr[i, 1] = json_data["translation"][1]

        for i, query_xy_city_coords in enumerate(egovehicle_xy_arr[::10, :]):

            query_xy_city_coords = np.array(
                [3116.8282170094944, 1817.1269613456188])

            query_xy_city_coords = np.array(
                [3304.7072308190845, 1993.1670162837597])

            # start = time.time()
            lane_dir_vector, confidence = avm.get_lane_direction(
                query_xy_city_coords, city_name, visualize=False)
            # end = time.time()
            # duration = end - start
            # print(f'query took {duration} s')
            # if confidence < 0.5:
            print(f"\t{i}: {confidence}")
            # if confidence == 0.:
            #   pdb.set_trace()
            # This was an absolute failure case!
            # lane_dir_vector, confidence = avm.get_lane_direction(query_xy_city_coords, city_name, visualize=True)

            visualize = True
            if visualize:
                fig = plt.figure(figsize=(22.5, 8))
                ax = fig.add_subplot(111)

                dx = lane_dir_vector[0] * 20
                dy = lane_dir_vector[1] * 20
                plt.arrow(query_xy_city_coords[0],
                          query_xy_city_coords[1],
                          dx,
                          dy,
                          color="r",
                          width=0.3,
                          zorder=2)

                query_x, query_y = query_xy_city_coords
                ax.scatter([query_x], [query_y], 100, color="k", marker=".")
                # make another plot now!

                plot_nearby_halluc_lanes(ax, city_name, adm, query_x, query_y)

                ax.axis("equal")
                plt.show()
                plt.close("all")
def draw_ground_pts_in_image(
    sdb: SynchronizationDB,
    lidar_points: np.ndarray,
    city_to_egovehicle_se3: SE3,
    dataset_map: ArgoverseMap,
    log_id: str,
    lidar_timestamp: int,
    city_name: str,
    dataset_dir: str,
    experiment_prefix: str,
    plot_ground: bool = True,
    motion_compensate: bool = False,
    camera: Optional[str] = None,
) -> Union[None, np.ndarray]:
    """Write an image to disk with rendered ground points for every camera.

    Args:
        sdb: instance of SynchronizationDB
        lidar_points: Numpy array of shape (N,3) in egovehicle frame
        city_to_egovehicle_se3: SE3 instance which takes a point in egovehicle frame and brings it into city frame
        dataset_map: Map dataset instance
        log_id: ID of the log
        city_name: A city's name (e.g. 'MIA' or 'PIT')
        motion_compensate: Whether to bring lidar points from world frame @ lidar timestamp, to world frame @ camera
                           timestamp
        camera: camera name, if specified will return image of that specific camera, if None, will save all camera to
                disk and return None

    """
    # put into city coords, then prune away ground and non-RoI points
    lidar_points = city_to_egovehicle_se3.transform_point_cloud(lidar_points)
    lidar_points = dataset_map.remove_non_driveable_area_points(
        lidar_points, city_name)
    _, not_ground_logicals = dataset_map.remove_ground_surface(
        copy.deepcopy(lidar_points), city_name, return_logicals=True)
    lidar_points = lidar_points[np.logical_not(not_ground_logicals)
                                if plot_ground else not_ground_logicals]

    # put back into ego-vehicle coords
    lidar_points = city_to_egovehicle_se3.inverse_transform_point_cloud(
        lidar_points)

    calib_fpath = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_fpath)

    # repeat green to red colormap every 50 m.
    colors_arr = np.array([
        [color_obj.rgb]
        for color_obj in Color("red").range_to(Color("green"), NUM_RANGE_BINS)
    ]).squeeze()
    np.fliplr(colors_arr)

    for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                          STEREO_CAMERA_LIST):
        im_dir = f"{dataset_dir}/{log_id}/{camera_name}"

        # load images, e.g. 'image_raw_ring_front_center_000000486.jpg'
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            continue

        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"

        # Swap channel order as OpenCV expects it -- BGR not RGB
        # must make a copy to make memory contiguous
        img = imageio.imread(im_fpath)[:, :, ::-1].copy()
        points_h = point_cloud_to_homogeneous(copy.deepcopy(lidar_points)).T

        if motion_compensate:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
                points_h,  # these are recorded at lidar_time
                copy.deepcopy(calib_data),
                camera_name,
                cam_timestamp,
                lidar_timestamp,
                dataset_dir,
                log_id,
                False,
            )
        else:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img(
                points_h, copy.deepcopy(calib_data), camera_name, False)

        if valid_pts_bool is None or uv is None or uv_cam is None:
            continue

        if valid_pts_bool.sum() == 0:
            continue

        uv = np.round(uv[valid_pts_bool]).astype(np.int32)
        uv_cam = uv_cam.T[valid_pts_bool]
        pt_ranges = np.linalg.norm(uv_cam[:, :3], axis=1)
        rgb_bins = np.round(pt_ranges).astype(np.int32)
        # account for moving past 100 meters, loop around again
        rgb_bins = rgb_bins % NUM_RANGE_BINS
        uv_colors = (255 * colors_arr[rgb_bins]).astype(np.int32)

        img = draw_point_cloud_in_img_cv2(img, uv, np.fliplr(uv_colors))

        if not Path(f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
                    ).exists():
            os.makedirs(
                f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}")

        save_dir = f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
        cv2.imwrite(f"{save_dir}/{camera_name}_{lidar_timestamp}.jpg", img)
        if camera == camera_name:
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return None
Ejemplo n.º 23
0
def eval_tracks(
    path_tracker_output: str,
    path_dataset: _PathLike,
    d_min: float,
    d_max: float,
    out_file: TextIO,
    centroid_method: str,
) -> None:
    """Evaluate tracking output.

    Args:
        path_tracker_output: path to tracker output
        path_dataset: path to dataset
        d_min: minimum distance range
        d_max: maximum distance range
        out_file: output file object
        centroid_method: method for ground truth centroid estimation
    """
    acc = mm.MOTAccumulator(auto_id=True)

    path_track_data = sorted(glob.glob(os.fspath(path_tracker_output) + "/*"))

    log_id = pathlib.Path(path_dataset).name
    logger.info("log_id = %s", log_id)

    city_info_fpath = f"{path_dataset}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    logger.info("city name = %s", city_name)

    ID_gt_all: List[str] = []

    for ind_frame in range(len(path_track_data)):
        if ind_frame % 50 == 0:
            logger.info("%d/%d" % (ind_frame, len(path_track_data)))

        timestamp_lidar = int(path_track_data[ind_frame].split("/")[-1].split("_")[-1].split(".")[0])
        path_gt = os.path.join(
            path_dataset, "per_sweep_annotations_amodal", f"tracked_object_labels_{timestamp_lidar}.json"
        )

        if not os.path.exists(path_gt):
            logger.warning("Missing ", path_gt)
            continue

        gt_data = read_json_file(path_gt)

        pose_data = read_json_file(f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json")
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        ego_R = quat2rotmat(rotation)
        ego_t = translation
        egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)

        pc_raw0 = load_ply(os.path.join(path_dataset, f"lidar/PC_{timestamp_lidar}.ply"))
        pc_raw_roi = leave_only_roi_region(
            pc_raw0, egovehicle_to_city_se3, ground_removal_method="no", city_name=city_name
        )

        gt: Dict[str, Dict[str, Any]] = {}
        id_gts = []
        for i in range(len(gt_data)):

            if gt_data[i]["label_class"] != "VEHICLE":
                continue

            bbox, orientation = label_to_bbox(gt_data[i])
            pc_segment = get_pc_inside_bbox(pc_raw_roi, bbox)

            center = np.array([gt_data[i]["center"]["x"], gt_data[i]["center"]["y"], gt_data[i]["center"]["z"]])
            if (
                len(pc_segment) >= min_point_num
                and bbox[3] > 0
                and in_distance_range_pose(np.zeros(3), center, d_min, d_max)
            ):
                track_label_uuid = gt_data[i]["track_label_uuid"]
                gt[track_label_uuid] = {}
                if centroid_method == "average":
                    gt[track_label_uuid]["centroid"] = pc_segment.sum(axis=0) / len(pc_segment)
                elif centroid_method == "label_center":
                    gt[track_label_uuid]["centroid"] = center

                else:
                    logger.warning("Not implemented")

                gt[track_label_uuid]["bbox"] = bbox
                gt[track_label_uuid]["orientation"] = orientation

                if track_label_uuid not in ID_gt_all:
                    ID_gt_all.append(track_label_uuid)

                id_gts.append(track_label_uuid)

        tracks: Dict[str, Dict[str, Any]] = {}
        id_tracks: List[str] = []

        track_data = read_json_file(path_track_data[ind_frame])

        for track in track_data:
            key = track["track_label_uuid"]

            if track["label_class"] != "VEHICLE" or track["height"] == 0:
                continue

            center = np.array([track["center"]["x"], track["center"]["y"], track["center"]["z"]])

            if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
                tracks[key] = {}
                tracks[key]["centroid"] = center

                id_tracks.append(key)

        dists: List[List[float]] = []
        for gt_key, gt_value in gt.items():
            gt_track_data: List[float] = []
            dists.append(gt_track_data)
            for track_key, track_value in tracks.items():
                gt_track_data.append(get_distance(gt_value, track_value, "centroid"))

        acc.update(id_gts, id_tracks, dists)

    mh = mm.metrics.create()
    summary = mh.compute(
        acc,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    logger.info("summary = %s", summary)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_flag = summary["num_fragmentations"][0]

    out_string = (
        f"{fn} {num_frames} {mota:.2f} {motp:.2f} {idf1:.2f} {most_track:.2f} "
        f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
    )
    out_file.write(out_string)
Ejemplo n.º 24
0
def generate_vehicle_bev(dataset_dir="", log_id="", output_dir=""):

    argoverse_loader = ArgoverseTrackingLoader(dataset_dir)
    argoverse_data = argoverse_loader.get(log_id)
    camera = argoverse_loader.CAMERA_LIST[7]
    calib = argoverse_data.get_calibration(camera)
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    calib_path = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_path)
    ind = 0
    for i in range(9):
        if calib_data['camera_data_'][i]['key'] == \
                'image_raw_stereo_front_left':
            ind = i
            break
    rotation = np.array(calib_data['camera_data_'][ind]['value']
                        ['vehicle_SE3_camera_']['rotation']['coefficients'])
    translation = np.array(calib_data['camera_data_'][ind]['value']
                           ['vehicle_SE3_camera_']['translation'])
    egovehicle_SE3_cam = SE3(rotation=quat2rotmat(rotation),
                             translation=translation)

    if not os.path.exists(os.path.join(output_dir, log_id, "car_bev_gt")):
        os.makedirs(os.path.join(output_dir, log_id, "car_bev_gt"))

    lidar_dir = os.path.join(dataset_dir, log_id, "lidar")
    ply_list = os.listdir(lidar_dir)

    pfa = PerFrameLabelAccumulator(dataset_dir,
                                   dataset_dir,
                                   "argoverse_bev_viz",
                                   save=False,
                                   bboxes_3d=True)
    pfa.accumulate_per_log_data(log_id)
    log_timestamp_dict = pfa.log_timestamp_dict
    for i, ply_name in enumerate(ply_list):
        lidar_timestamp = ply_name.split('.')[0].split('_')[1]
        lidar_timestamp = int(lidar_timestamp)

        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, "stereo_front_left", str(log_id))
        image_path = os.path.join(
            output_dir, str(log_id), "car_bev_gt",
            "stereo_front_left_" + str(cam_timestamp) + ".jpg")
        objects = log_timestamp_dict[log_id][lidar_timestamp]
        top_view = np.zeros((256, 256))

        all_occluded = True
        for frame_rec in objects:
            if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                all_occluded = False

        if not all_occluded:
            for i, frame_rec in enumerate(objects):
                bbox_ego_frame = frame_rec.bbox_ego_frame
                uv = calib.project_ego_to_image(bbox_ego_frame).T
                idx_ = np.all(
                    np.logical_and(
                        np.logical_and(
                            np.logical_and(uv[0, :] >= 0.0,
                                           uv[0, :] < size[1] - 1.0),
                            np.logical_and(uv[1, :] >= 0.0,
                                           uv[1, :] < size[0] - 1.0)),
                        uv[2, :] > 0))
                if not idx_:
                    continue
                bbox_cam_fr = egovehicle_SE3_cam.inverse().\
                    transform_point_cloud(bbox_ego_frame)
                X = bbox_cam_fr[:, 0]
                Z = bbox_cam_fr[:, 2]

                if (frame_rec.occlusion_val != IS_OCCLUDED_FLAG
                        and frame_rec.obj_class_str == "VEHICLE"):
                    y_img = (-Z / res).astype(np.int32)
                    x_img = (X / res).astype(np.int32)
                    x_img -= int(np.floor(-20 / res))
                    y_img += int(np.floor(40 / res))
                    box = np.array([x_img[2], y_img[2]])
                    box = np.vstack((box, [x_img[6], y_img[6]]))
                    box = np.vstack((box, [x_img[7], y_img[7]]))
                    box = np.vstack((box, [x_img[3], y_img[3]]))
                    cv2.drawContours(top_view, [box], 0, 255, -1)

        cv2.imwrite(image_path, top_view)
Ejemplo n.º 25
0
def matching_and_get_diff_stats():

    # gt_path = args.path_dataset_root
    # pr_path = args.path_tracker_output

    tracking_names = ["VEHICLE", "PEDESTRIAN"]

    diff = {tracking_name: []
            for tracking_name in tracking_names}  # [h, w, l, x, y, z, a]
    diff_vel = {tracking_name: []
                for tracking_name in tracking_names
                }  # [x_dot, y_dot, z_dot, a_dot]
    match_diff_t_map = {tracking_name: {} for tracking_name in tracking_names}

    # similar to main.py class AB3DMOT update()
    reorder = [3, 4, 5, 6, 2, 1, 0]
    reorder_back = [6, 5, 4, 0, 1, 2, 3]

    # gt_all = {tracking_name: defaultdict(dict) for tracking_name in tracking_names}
    # pr_all = {tracking_name: defaultdict(dict) for tracking_name in tracking_names}

    gt_trajectory_map = {
        tracking_name: defaultdict(dict)
        for tracking_name in tracking_names
    }
    pr_trajectory_map = {
        tracking_name: defaultdict(dict)
        for tracking_name in tracking_names
    }
    gts_ids = list()
    prs_ids = list()
    tmp_prs = list()
    tmp_gts = list()
    path_datasets = glob.glob(os.path.join(args.path_dataset_root, "*"))

    for path_dataset in path_datasets:  # path_tracker_output, path_dataset in zip(path_tracker_outputs, path_datasets):

        log_id = pathlib.Path(path_dataset).name
        if len(log_id) == 0 or log_id.startswith("_"):
            continue

        # path_tracker_output = os.path.join(path_tracker_output_root, log_id)

        # print('\npath_tracker_output: ', path_tracker_output) # read tracked log files ALL, worked
        # /media/basic/Transcend/argoai_tracking/argoverse-tracking/val_output/val-split-track-preds-maxage15-minhits5-conf0.3/033669d3-3d6b-3d3d-bd93-7985d86653ea

        print('\npath_dataset: ', path_dataset)

        path_track_data = sorted(
         glob.glob(os.path.join(os.fspath(path_dataset), \
          "per_sweep_annotations_amodal", "*"))
        )

        # iterate each *.json in each log_id
        for ind_frame in range(len(path_track_data)):

            if ind_frame % 50 == 0:
                # print("%d/%d" % (ind_frame, len(path_track_data)))
                print("%d/%d" % (ind_frame, len(path_track_data)))

            timestamp_lidar = int(path_track_data[ind_frame].split("/")
                                  [-1].split("_")[-1].split(".")[0])

            # print('\ntimestamp: ', timestamp_lidar)

            # path of each json of gt
            path_gt = os.path.join(
                path_dataset, "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json")

            # print('\npath_dataset gt: ', path_gt) # corrected for reading val, all log files
            #/media/basic/Transcend/argoai_tracking/argoverse-tracking/val/00c561b9-2057-358d-82c6-5b06d76cebcf/per_sweep_annotations_amodal/tracked_object_labels_315969629019741000.json

            # gt_data = read_json_file(path_gt)

            if not os.path.exists(path_gt):
                # logger.warning("Missing ", path_gt)
                continue

            gt_data = read_json_file(path_gt)

            # get data from gt
            for i in range(len(gt_data)):

                if gt_data[i]["label_class"] not in tracking_names:
                    # print('\nGT ignored: ', gt_data[i]["label_class"])

                    continue

                bbox, orientation = label_to_bbox(gt_data[i])

                x, y, z = gt_data[i]["center"]["x"], \
                 gt_data[i]["center"]["y"], \
                 gt_data[i]["center"]["z"]

                center = np.array([
                    gt_data[i]["center"]["x"], gt_data[i]["center"]["y"],
                    gt_data[i]["center"]["z"]
                ])

                w = gt_data[i]['width']
                l = gt_data[i]["length"]
                h = gt_data[i]["height"]

                rotation = [
                    gt_data[i]['rotation']['x'], gt_data[i]['rotation']['y'],
                    gt_data[i]['rotation']['z'], gt_data[i]['rotation']['w']
                ]

                # print('\nx,y,z, w, l, h: ', x, y, z, w, l, h)

                z_angle = rotation_to_positive_z_angle(rotation)

                # print('\nz_angle: ', z_angle)

                box_data = np.array([
                    h, w, l, x, y, z, z_angle
                    # 0, 0, 0, 0 # x_dot, y_dot, z_dot, a_dot
                ])

                # print('\nbox_data: ', box_data)
                track_label_uuid = gt_data[i]["track_label_uuid"]
                cat = gt_data[i]["label_class"]

                # get all gt

                # print('\n', 'GT '*20, '\n')
                # print('\ncat: ', cat)
                # print('\nlog_id: ', log_id)
                # print('\ntrack_label_uuid: ', track_label_uuid)
                # print('\nind_frame: ', ind_frame)

                # gt_trajectory_map[cat][log_id][track_label_uuid][ind_frame] = box_data
                gt_trajectory_map[cat][track_label_uuid][ind_frame] = box_data
                tmp_gts.append(box_data)
                # gts = np.stack([np.array([box_data])], axis=0)

                gts_ids.append(track_label_uuid)
    gts = np.stack(tmp_gts, axis=0)

    # here to get preds
    path_datasets_output = glob.glob(
        os.path.join(args.path_tracker_output, "*"))

    for path_dataset_trck in path_datasets_output:
        # get tracked data

        log_id = pathlib.Path(path_dataset_trck).name
        if len(log_id) == 0 or log_id.startswith("_"):
            continue
        path_tracker_output = os.path.join(args.path_tracker_output, log_id)

        print('\npath_tracker_output: ',
              path_tracker_output)  # read tracked log files ALL, worked
        # /media/basic/Transcend/argoai_tracking/argoverse-tracking/val_output/val-split-track-preds-maxage15-minhits5-conf0.3/033669d3-3d6b-3d3d-bd93-7985d86653ea


        path_track_data = sorted(
         glob.glob(os.path.join(os.fspath(path_tracker_output), \
          "per_sweep_annotations_amodal", "*"))
        )

        print("log_id = %s", log_id)

        for ind_frame_track in range(len(path_track_data)):

            if ind_frame_track % 50 == 0:
                # print("%d/%d" % (ind_frame_track, len(path_track_data)))
                print("%d/%d" % (ind_frame_track, len(path_track_data)))

            timestamp_lidar = int(path_track_data[ind_frame_track].split("/")
                                  [-1].split("_")[-1].split(".")[0])

            path_pr = os.path.join(
                path_dataset_trck, "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json")

            # print('\npath_dataset gt: ', path_pr) # corrected for reading val, all log files
            #/media/basic/Transcend/argoai_tracking/argoverse-tracking/val/00c561b9-2057-358d-82c6-5b06d76cebcf/per_sweep_annotations_amodal/tracked_object_labels_315969629019741000.json

            if not os.path.exists(path_pr):
                print("Missing pr", path_pr)
                continue

            pr_data = read_json_file(path_pr)

            # get data from pr
            for i in range(len(pr_data)):

                if pr_data[i]["label_class"] not in tracking_names:
                    # print('\nPR ignored: ', pr_data[i]["label_class"])

                    continue

                bbox, orientation = label_to_bbox(pr_data[i])

                x, y, z = pr_data[i]["center"]["x"], \
                 pr_data[i]["center"]["y"], \
                 pr_data[i]["center"]["z"]

                center = np.array([
                    pr_data[i]["center"]["x"], pr_data[i]["center"]["y"],
                    pr_data[i]["center"]["z"]
                ])

                w = pr_data[i]['width']
                l = pr_data[i]["length"]
                h = pr_data[i]["height"]

                rotation = [
                    pr_data[i]['rotation']['x'], pr_data[i]['rotation']['y'],
                    pr_data[i]['rotation']['z'], pr_data[i]['rotation']['w']
                ]

                # print('\nx,y,z, w, l, h: ', x, y, z, w, l, h)

                z_angle = rotation_to_positive_z_angle(rotation)

                # print('\nz_angle: ', z_angle)

                box_data = np.array([
                    h, w, l, x, y, z, z_angle
                    # 0, 0, 0, 0 # x_dot, y_dot, z_dot, a_dot
                ])

                # print('\nbox_data: ', box_data)
                track_label_uuid = pr_data[i]["track_label_uuid"]
                cat = pr_data[i]["label_class"]

                # print('\n', 'PR '*20, '\n')
                # print('\ncat: ', cat)
                # print('\nlog_id: ', log_id)
                # print('\ntrack_label_uuid: ', track_label_uuid)
                # print('\nind_frame: ', ind_frame_track)

                # get all gt
                # pr_trajectory_map[cat][log_id][track_label_uuid][ind_frame_track] = box_data
                pr_trajectory_map[cat][track_label_uuid][
                    ind_frame_track] = box_data

                tmp_prs.append(box_data)
                # prs = np.stack([np.array([box_data])], axis=0)

                prs_ids.append(track_label_uuid)

    prs = np.stack(tmp_prs, axis=0)

    prs = prs[101:1000, reorder]
    gts = gts[101:1000, reorder]

    # if matching_dist == '3d_iou':

    dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in prs]
    gts_8corner = [convert_3dbox_to_8corner(gt_tmp) for gt_tmp in gts]
    print('\n Computing distance matrix...')
    print('\n dets len: ', len(dets_8corner))
    print('\n gts_8corner len: ', len(gts_8corner))

    iou_matrix = np.zeros((len(dets_8corner), len(gts_8corner)),
                          dtype=np.float32)

    for d, det in enumerate(dets_8corner):
        for g, gt in enumerate(gts_8corner):
            iou_matrix[d, g] = iou3d(det, gt)[0]

    #print('iou_matrix: ', iou_matrix)
    distance_matrix = -iou_matrix
    threshold = 0.1

    print('\n linear_assignment...')

    matched_indices = linear_assignment(distance_matrix)

    #print('matched_indices: ', matched_indices)
    prs = prs[:, reorder_back]
    gts = gts[:, reorder_back]

    print('\n linear_assignment...DONE')

    # loop each category/tracking_name
    fl = False
    for tracking_name in tracking_names:
        # get pair id
        for pair_id in range(matched_indices.shape[0]):

            # compute diff_values
            if distance_matrix[matched_indices[pair_id][0]][matched_indices[pair_id][1]] \
            < threshold:

                print('\n Computing diff_value...')
                diff_value = prs[matched_indices[pair_id][0]] - gts[
                    matched_indices[pair_id][1]]
                diff[tracking_name].append(diff_value)

                gt_track_id = gts_ids[matched_indices[pair_id][1]]

                # update match_diff_t_map
                if ind_frame not in match_diff_t_map[tracking_name]:
                    match_diff_t_map[tracking_name][ind_frame] = {
                        gt_track_id: diff_value
                    }

                else:
                    match_diff_t_map[tracking_name][ind_frame][
                        gt_track_id] = diff_value
            # check if we have previous time_step's matching pair for current gt object
            #print('t: ', t)
            #print('len(match_diff_t_map): ', len(match_diff_t_map))

            # compute diff_vel
                try:
                    if ind_frame > 0 and ind_frame-1 in match_diff_t_map[tracking_name] \
                      and gt_track_id in match_diff_t_map[tracking_name][ind_frame-1]:

                        diff_vel_value = diff_value - \
                          match_diff_t_map[tracking_name][ind_frame-1][gt_track_id]
                        diff_vel[tracking_name].append(diff_vel_value)
                except ValueError:
                    fl = True

    diff = {
        tracking_name: np.stack(diff[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    mean = {
        tracking_name: np.mean(diff[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    std = {
        tracking_name: np.std(diff[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    var = {
        tracking_name: np.var(diff[tracking_name], axis=0)
        for tracking_name in tracking_names
    }
    print('Diff: Global coordinate system')
    print('h, w, l, x, y, z, a\n')
    print('mean: ', mean)
    print('std: ', std)
    print('var: ', var)
    if not fl:
        diff_vel = {
            tracking_name: np.stack(diff_vel[tracking_name], axis=0)
            for tracking_name in tracking_names
        }
        mean_vel = {
            tracking_name: np.mean(diff_vel[tracking_name], axis=0)
            for tracking_name in tracking_names
        }
        std_vel = {
            tracking_name: np.std(diff_vel[tracking_name], axis=0)
            for tracking_name in tracking_names
        }
        var_vel = {
            tracking_name: np.var(diff_vel[tracking_name], axis=0)
            for tracking_name in tracking_names
        }
        print('Diff: Global coordinate system')
        print('h, w, l, x, y, z, a\n')
        print('mean: ', mean)
        print('std: ', std)
        print('var: ', var)
        print('\nh_dot, w_dot, l_dot, x_dot, y_dot, z_dot, a_dot\n')
        print('mean_vel: ', mean_vel)
        print('std_vel: ', std_vel)
        print('var_vel: ', var_vel)

    else:
        print('Diff: Global coordinate system')
        print('h, w, l, x, y, z, a\n')
        print('mean: ', mean)
        print('std: ', std)
        print('var: ', var)
        print(mean, std, var)