コード例 #1
0
ファイル: generate_fake_data.py プロジェクト: mgladkova/bamot
def main(base_dir):
    for scene in tqdm.trange(21):
        out_fname = base_dir / (str(scene).zfill(4) + ".csv")
        pcl_dir = base_dir / str(scene).zfill(4)
        pcl_dir.mkdir(exist_ok=True, parents=True)
        scenes = []
        track_ids = []
        img_ids = []
        num_poses = []
        num_other_tracks = []
        fnames = []
        gt_poses = get_gt_poses_from_kitti(kitti_path=cfg.KITTI_PATH, scene=scene)
        label_data = get_gt_detection_data_from_kitti(
            kitti_path=cfg.KITTI_PATH, scene=scene, poses=gt_poses
        )
        for track_id, track_data in tqdm.tqdm(
            label_data.items(), position=1, total=len(label_data)
        ):
            for img_id, row_data in track_data.items():
                pcl = _generate_point_cloud(row_data)
                pcl_fname = pcl_dir / (
                    str(track_id).zfill(3) + str(img_id).zfill(4) + ".npy"
                )

                np.save(pcl_fname, pcl)
                scenes.append(scene)
                track_ids.append(track_id)
                img_ids.append(img_id)
                num_poses.append(max(track_data.keys()) - min(track_data.keys()))
                num_other_tracks.append(
                    len(label_data)
                )  # not correct, but not needed for fake data
                fnames.append(pcl_fname)
        df = pd.DataFrame(
            dict(
                track_id=track_ids,
                pointcloud_fname=fnames,
                num_poses=num_poses,
                num_other_tracks=num_other_tracks,
                img_id=img_ids,
                scene=scenes,
            )
        )
        df.to_csv(out_fname, index=False)
コード例 #2
0
        flag_class = threading.Event
        process_class = threading.Thread
    shared_data = queue_class()
    returned_data = queue_class()
    writer_data_2d = queue_class()
    writer_data_3d = queue_class()
    slam_data = queue_class()
    stop_flag = flag_class()
    next_step = flag_class()
    img_shape = get_image_shape(kitti_path, scene)
    image_stream = _get_image_stream(kitti_path,
                                     scene,
                                     stop_flag,
                                     offset=args.offset)
    stereo_cam, T02 = get_cameras_from_kitti(kitti_path, scene)
    gt_poses = get_gt_poses_from_kitti(kitti_path, scene)
    detection_stream = get_detection_stream(
        obj_detections_path,
        scene=scene,
        offset=args.offset,
        object_ids=[int(idx)
                    for idx in args.indeces] if args.indeces else None,
        classes=args.classes,
    )

    slam_process = process_class(target=_fake_slam,
                                 args=[slam_data, gt_poses, args.offset],
                                 name="Fake SLAM")
    write_2d_process = process_class(
        target=_write_2d_detections,
        kwargs={
コード例 #3
0
    def setup(self, stage: Optional[str] = None):
        all_files = list(
            filter(lambda f: f.suffix == ".csv", self._dataset_dir.iterdir()))
        if not all_files:
            raise ValueError(f"No `.csv` files found at `{self._dataset_dir}`")
        first_file = True
        for f in all_files:
            df = pd.read_csv(f, index_col=False)
            if first_file:
                dataset = df
                first_file = False
            else:
                dataset = dataset.append(df)
        dataset.dropna(inplace=True)
        dataset.reset_index(inplace=True, drop=True)

        target_yaw = []
        target_pos = []
        all_gt_data = {}
        idx_to_remove = []
        for idx, row in enumerate(dataset.itertuples()):
            scene = int(row.scene)
            if scene not in all_gt_data:  # load gt data dynamically
                gt_poses = get_gt_poses_from_kitti(
                    kitti_path=config.KITTI_PATH, scene=scene)
                label_data = get_gt_detection_data_from_kitti(
                    kitti_path=config.KITTI_PATH, scene=scene, poses=gt_poses)
                all_gt_data[scene] = label_data

            img_id = int(row.img_id)
            if self._track_id_mapping:
                track_id = self._track_id_mapping.get(row.track_id)
                if track_id is None:
                    idx_to_remove.append(idx)
                    continue
            else:
                track_id = int(row.track_id)
            gt_track_data = all_gt_data[scene].get(track_id)
            if gt_track_data is None:
                idx_to_remove.append(idx)
                continue
            gt_data = gt_track_data.get(img_id)
            if gt_data is None:
                idx_to_remove.append(idx)
                continue  # no corresponding GT detection
            target_yaw.append(gt_data.rot_angle)
            target_pos.append(gt_data.cam_pos)
        dataset.drop(idx_to_remove, inplace=True)
        dataset["target_yaw"] = target_yaw
        dataset["target_pos"] = target_pos
        size = len(dataset)
        val_size = int(
            size *
            (self._train_val_test_ratio[1] / sum(self._train_val_test_ratio)))
        test_size = int(
            size *
            (self._train_val_test_ratio[2] / sum(self._train_val_test_ratio)))
        train_size = size - val_size - test_size
        # shuffle dataframe first
        dataset = dataset.sample(frac=1, random_state=42)
        self._dataset["train"] = dataset.iloc[:train_size]
        self._dataset["val"] = dataset.iloc[train_size:train_size + val_size]
        self._dataset["test"] = dataset.iloc[-test_size:]