def run_ab3dmot(
        classname: str,
        pose_dir: str,
        dets_dump_dir: str,
        tracks_dump_dir: str,
        min_conf: float = 0.3,
        match_algorithm: str = 'h',  #hungarian
        match_threshold: float = 4,
        match_distance: float = 'iou',
        p: np.ndarray = np.eye(10),
        thr_estimate: float = 0.8,
        thr_prune: float = 0.1,
        ps: float = 0.9) -> None:
    """
    #path to argoverse tracking dataset test set, we will add our predicted labels into per_sweep_annotations_amodal/ 
    #inside this folder

    Filtering occurs in the city frame, not the egovehicle frame.

        Args:
        -   classname: string, either 'VEHICLE' or 'PEDESTRIAN'
        -   pose_dir: string
        -   dets_dump_dir: string
        -   tracks_dump_dir: string
        -   max_age: integer
        -   min_hits: integer

        Returns:
        -   None
    """
    dl = SimpleArgoverseTrackingDataLoader(data_dir=pose_dir,
                                           labels_dir=dets_dump_dir)

    am = ArgoverseMap()

    for log_id in tqdm(dl.sdb.get_valid_logs()):

        print(log_id)

        city_name = dl.get_city_name(log_id)

        labels_folder = dets_dump_dir + "/" + log_id + "/per_sweep_annotations_amodal/"
        lis = os.listdir(labels_folder)
        lidar_timestamps = [
            int(file.split(".")[0].split("_")[-1]) for file in lis
        ]
        lidar_timestamps.sort()
        previous_frame_bbox = []

        ab3dmot = AB3DMOT(thr_estimate=thr_estimate,
                          thr_prune=thr_prune,
                          ps=ps)

        print(labels_folder)
        tracked_labels_copy = []

        for j, current_lidar_timestamp in enumerate(lidar_timestamps):

            dets = dl.get_labels_at_lidar_timestamp(log_id,
                                                    current_lidar_timestamp)

            dets_copy = dets
            transforms = []

            city_SE3_egovehicle = dl.get_city_to_egovehicle_se3(
                log_id, current_lidar_timestamp)
            egovehicle_SE3_city = city_SE3_egovehicle.inverse()
            transformed_labels = []
            conf = []

            for l_idx, l in enumerate(dets):

                if l['label_class'] != classname:
                    # will revisit in other tracking pass
                    continue
                if l["score"] < min_conf:
                    # print('Skipping det with confidence ', l["score"])
                    continue

                det_obj = json_label_dict_to_obj_record(l)
                det_corners_egovehicle_fr = det_obj.as_3d_bbox()

                transforms += [city_SE3_egovehicle]
                if city_SE3_egovehicle is None:
                    print('Was None')

                # convert detection from egovehicle frame to city frame
                det_corners_city_fr = city_SE3_egovehicle.transform_point_cloud(
                    det_corners_egovehicle_fr)
                ego_xyz = np.mean(det_corners_city_fr, axis=0)

                # Check the driveable/roi area
                #da = am.remove_non_driveable_area_points(np.array([ego_xyz]), city_name=city_name)
                # if len(da) == 0 and l['label_class'] == 'VEHICLE':
                #     continue

                # roi = am.remove_non_roi_points(np.array([ego_xyz]), city_name=city_name)
                # if len(roi) == 0:
                #     continue

                yaw = yaw_from_bbox_corners(det_corners_city_fr)
                transformed_labels += [[
                    ego_xyz[0], ego_xyz[1], ego_xyz[2], yaw, l["length"],
                    l["width"], l["height"]
                ]]
                conf += [l["score"]]

            if len(transformed_labels) > 0:
                transformed_labels = np.array(transformed_labels)
            else:
                transformed_labels = np.empty((0, 7))

            dets_all = {
                "dets": transformed_labels,
                "info": np.zeros(transformed_labels.shape),
                "conf": conf
            }

            # perform measurement update in the city frame.
            dets_with_object_id = ab3dmot.update(dets_all, match_distance,
                                                 match_threshold,
                                                 match_algorithm, p)

            tracked_labels = []
            for det in dets_with_object_id:
                # move city frame tracks back to ego-vehicle frame
                xyz_city = np.array(
                    [det[0].item(), det[1].item(),
                     det[2].item()]).reshape(1, 3)
                city_yaw_object = det[3]
                city_se2_object = SE2(rotation=rotmat2d(city_yaw_object),
                                      translation=xyz_city.squeeze()[:2])
                city_se2_egovehicle, city_yaw_ego = get_B_SE2_A(
                    city_SE3_egovehicle)
                ego_se2_city = city_se2_egovehicle.inverse()
                egovehicle_se2_object = ego_se2_city.right_multiply_with_se2(
                    city_se2_object)

                # recreate all 8 points
                # transform them
                # compute yaw from 8 points once more
                egovehicle_SE3_city = city_SE3_egovehicle.inverse()
                xyz_ego = egovehicle_SE3_city.transform_point_cloud(
                    xyz_city).squeeze()
                # update for new yaw
                # transform all 8 points at once, then compute yaw on the fly

                ego_yaw_obj = se2_to_yaw(egovehicle_se2_object)
                qx, qy, qz, qw = yaw_to_quaternion3d(ego_yaw_obj)

                tracked_labels.append({
                    "center": {
                        "x": xyz_ego[0],
                        "y": xyz_ego[1],
                        "z": xyz_ego[2]
                    },
                    "rotation": {
                        "x": qx,
                        "y": qy,
                        "z": qz,
                        "w": qw
                    },
                    "length":
                    det[4],
                    "width":
                    det[5],
                    "height":
                    det[6],
                    "track_label_uuid":
                    uuid_gen.get_uuid(det[7]),
                    "timestamp":
                    current_lidar_timestamp,
                    "label_class":
                    classname
                })

            tracked_labels_copy = copy.deepcopy(tracked_labels)

            label_dir = os.path.join(tracks_dump_dir, log_id,
                                     "per_sweep_annotations_amodal")
            check_mkdir(label_dir)
            json_fname = f"tracked_object_labels_{current_lidar_timestamp}.json"
            json_fpath = os.path.join(label_dir, json_fname)

            if Path(json_fpath).exists():
                # accumulate tracks of another class together
                prev_tracked_labels = read_json_file(json_fpath)
                tracked_labels.extend(prev_tracked_labels)

            save_json_dict(json_fpath, tracked_labels)
Exemplo n.º 2
0
def run_tracking(classname, labels_folder, output):
    lis = os.listdir(labels_folder)
    lidar_timestamps = [int(f.split(".")[0]) for f in lis]
    lidar_timestamps.sort()
    ab3dmot = AB3DMOT()
    # print(labels_folder)

    for i, stamp in tqdm(enumerate(lidar_timestamps)):
        with open(labels_folder + "/" + str(stamp) + ".json") as dets_file:
            j_file = json.load(dets_file)
            # print(j_file)
            # Process each detection instances
            labels = []
            for det in j_file:
                det["center"]["x"] = float(det["center"]["x"])
                det["center"]["y"] = float(det["center"]["y"])
                det["center"]["z"] = float(det["center"]["z"])
                det["length"] = float(det["length"])
                det["width"] = float(det["width"])
                det["height"] = float(det["height"])
                det_obj = json_label_dict_to_obj_record(det)
                # print(det)
                det_bbox = det_obj.as_3d_bbox()
                yaw = yaw_from_bbox_corners(det_bbox)
                labels.append([
                    det_obj.translation[0], det_obj.translation[0],
                    det_obj.translation[0], yaw, det["length"], det["width"],
                    det["height"]
                ])
            labels = np.array(labels)
            dets_all = {"dets": labels, "info": np.zeros(labels.shape)}
            # print(dets_all)
            dets_with_object_id = ab3dmot.update(dets_all, classname)
            # print(dets_with_object_id)
            tracked_labels = []
            for det in dets_with_object_id:
                qx, qy, qz, qw = yaw_to_quaternion3d(det[3])
                tracked_labels.append({
                    "center": {
                        "x": det[0],
                        "y": det[1],
                        "z": det[2]
                    },
                    "rotation": {
                        "x": qx,
                        "y": qy,
                        "z": qz,
                        "w": qw
                    },
                    "length": det[4],
                    "width": det[5],
                    "height": det[6],
                    "track_label_uuid": det[7],
                    "timestamp": stamp,
                    "label_class": classname
                })

            # print(tracked_labels)
            if not os.path.exists(output):
                os.mkdir(output)
            output_name = os.path.join(output, str(stamp) + ".json")
            if os.path.exists(output_name):
                prev_labels = read_json_file(output_name)
                tracked_labels.extend(prev_labels)
            save_json_dict(output_name, tracked_labels)