Example #1
0
    def __init__(self, root, split=None):

        self.data = LyftDataset(data_path=root,
                                json_path=os.path.join(
                                    root,
                                    os.path.basename(os.path.normpath(root))),
                                verbose=False)

        self.explorer = LyftDatasetExplorer(self.data)

        super().__init__(root, split)
def render_ann(ann_token, dataLyft3D):
    ann_record = dataLyft3D.get("sample_annotation", ann_token)
    sample_record = dataLyft3D.get("sample", ann_record["sample_token"])

    fig, axes = plt.subplots(1, 1, figsize=(12, 6))

    # Figure out which camera the object is fully visible in (this may return nothing)
    boxes, cam = [], []
    cams = [key for key in sample_record["data"].keys() if "CAM" in key]
    for cam in cams:
        _, boxes, _ = dataLyft3D.get_sample_data(
            sample_record["data"][cam],
            box_vis_level=BoxVisibility.ANY,
            selected_anntokens=[ann_token])
        if len(boxes) > 0:
            break  # We found an image that matches. Let's abort.
    cam = sample_record["data"][cam]
    data_path, boxes, camera_intrinsic = dataLyft3D.get_sample_data(
        cam, selected_anntokens=[ann_token])
    im = Image.open(data_path)
    axes.imshow(im)
    axes.set_title(dataLyft3D.get("sample_data", cam)["channel"])
    axes.axis("off")
    axes.set_aspect("equal")
    for box in boxes:
        c = np.array(LyftDatasetExplorer.get_color(box.name)) / 255.0
        render_box(box,
                   axes,
                   view=camera_intrinsic,
                   normalize=True,
                   colors=(c, c, c),
                   im=im)
Example #3
0
    def __init__(self,
                 data_path: str,
                 json_path: str,
                 verbose: bool = True,
                 map_resolution: float = 0.1):
        """Loads database and creates reverse indexes and shortcuts.
        Args:
            data_path: Path to the tables and data.
            json_path: Path to the folder with json files
            verbose: Whether to print status messages during load.
            map_resolution: Resolution of maps (meters).
        """

        self.data_path = Path(data_path).expanduser().absolute()
        self.json_path = Path(json_path)

        self.table_names = [
            "category",
            "attribute",
            "sensor",
            "calibrated_sensor",
            "ego_pose",
            "log",
            "scene",
            "sample",
            "sample_data",
            "map",
        ]

        start_time = time.time()

        # Explicitly assign tables to help the IDE determine valid class members.
        self.category = self.__load_table__("category")
        self.attribute = self.__load_table__("attribute")

        self.sensor = self.__load_table__("sensor")
        self.calibrated_sensor = self.__load_table__("calibrated_sensor")
        self.ego_pose = self.__load_table__("ego_pose")
        self.log = self.__load_table__("log")
        self.scene = self.__load_table__("scene")
        self.sample = self.__load_table__("sample")
        self.sample_data = self.__load_table__("sample_data")

        self.map = self.__load_table__("map")

        if verbose:
            for table in self.table_names:
                print("{} {},".format(len(getattr(self, table)), table))
            print(
                "Done loading in {:.1f} seconds.\n======".format(time.time() -
                                                                 start_time))

        # Initialize LyftDatasetExplorer class
        self.explorer = LyftDatasetExplorer(self)
        # Make reverse indexes for common lookups.
        self.__make_reverse_index__(verbose)
Example #4
0
def get_sample_images(sample_token: str, ax=None, lyftd=level5data):
    record = lyftd.get("sample", sample_token)

    # Separate RADAR from LIDAR and vision.
    radar_data = {}
    nonradar_data = {}

    for channel, token in record["data"].items():
        sd_record = lyftd.get("sample_data", token)
        sensor_modality = sd_record["sensor_modality"]
        if sensor_modality in ["lidar", "camera"]:
            nonradar_data[channel] = token
        else:
            radar_data[channel] = token

    # get projective matrix

    for channel, token in nonradar_data.items():
        sd_record = lyftd.get("sample_data", token)
        sensor_modality = sd_record["sensor_modality"]

        if sensor_modality == "camera":
            # Load boxes and image.
            data_path, boxes, camera_intrinsic = lyftd.get_sample_data(token, box_vis_level=BoxVisibility.ANY)

            data = Image.open(data_path)

            # Init axes.
            fig, ax = plt.subplots(1, 1, figsize=(9, 16))

            # Show image.
            ax.imshow(data)

            for box in boxes:
                c = np.array(LyftDatasetExplorer.get_color(box.name)) / 255.0
                box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

            fig.savefig("./temp_figs/{}.png".format(token), dpi=300)
            plt.close()
def render_sample(sample_token, dataLyft3D):

    fig, axes = plt.subplots(1, 1, figsize=(12, 6))

    cam = dataLyft3D.get("sample_data", sample_token)
    data_path, boxes, camera_intrinsic = dataLyft3D.get_sample_data(
        sample_token, selected_anntokens=None)
    im = Image.open(data_path)
    axes.imshow(im)
    axes.set_title(cam["channel"])
    axes.axis("off")
    axes.set_aspect("equal")
    for box in boxes:
        c = np.array(LyftDatasetExplorer.get_color(box.name)) / 255.0
        render_box(box,
                   axes,
                   view=camera_intrinsic,
                   normalize=True,
                   colors=(c, c, c),
                   im=im)
Example #6
0
# os.symlink("/media/pzr/软件/kaggle/3D/train_images", 'images')
# os.symlink("/media/pzr/软件/kaggle/3D/train_maps", 'maps')
# os.symlink("/media/pzr/软件/kaggle/3D/train_lidar", 'lidar')

# Our code will generate data, visualization and model checkpoints, they will be persisted to disk in this folder
DATA_PATH = '/media/pzr/软件/kaggle/3D/train_data'
ARTIFACTS_FOLDER = "/media/pzr/软件/kaggle/3D/artifacts/"

# 这里我软链接建立了但是还是链接不上map_raster_palo_alto.png文件,所以改了一下map.json 的最后一行为"filename": "test_maps/map_raster_palo_alto.png", "category": "semantic_prior"}]
# 在LyftDataset源码可以看到里面会读train_data里面的所有json 文件
# 所有修改过的 或者 我添加的代码在前面用pzr表示
level5data = LyftDataset(data_path='.', json_path=DATA_PATH, verbose=True)
os.makedirs(ARTIFACTS_FOLDER, exist_ok=True)

# pzr 这个Explorer 可以输出一些数据信息
dataHelper = LyftDatasetExplorer(level5data)
dataHelper.list_categories()

# Category stats
# animal                      n=  186, width= 0.36±0.12, len= 0.73±0.19, height= 0.51±0.16, lw_aspect= 2.16±0.56
# bicycle                     n=20928, width= 0.63±0.24, len= 1.76±0.29, height= 1.44±0.37, lw_aspect= 3.20±1.17
# bus                         n= 8729, width= 2.96±0.24, len=12.34±3.41, height= 3.44±0.31, lw_aspect= 4.17±1.10
# car                         n=534911, width= 1.93±0.16, len= 4.76±0.53, height= 1.72±0.24, lw_aspect= 2.47±0.22
# emergency_vehicle           n=  132, width= 2.45±0.43, len= 6.52±1.44, height= 2.39±0.59, lw_aspect= 2.66±0.28
# motorcycle                  n=  818, width= 0.96±0.20, len= 2.35±0.22, height= 1.59±0.16, lw_aspect= 2.53±0.50
# other_vehicle               n=33376, width= 2.79±0.30, len= 8.20±1.71, height= 3.23±0.50, lw_aspect= 2.93±0.53
# pedestrian                  n=24935, width= 0.77±0.14, len= 0.81±0.17, height= 1.78±0.16, lw_aspect= 1.06±0.20
# truck                       n=14164, width= 2.84±0.32, len=10.24±4.09, height= 3.44±0.62, lw_aspect= 3.56±1.25

dataHelper.list_attributes()
Example #7
0
class Lyft(SequenceDataset):
    def __init__(self, root, split=None):

        self.data = LyftDataset(data_path=root,
                                json_path=os.path.join(
                                    root,
                                    os.path.basename(os.path.normpath(root))),
                                verbose=False)

        self.explorer = LyftDatasetExplorer(self.data)

        super().__init__(root, split)

    def _get_sequences(self):
        return [(scene["first_sample_token"], "CAM_FRONT")
                for scene in self.data.scene]

    def _get_samples(self, sequence):
        FIRST_TOKEN, CAMERA = sequence
        next_token = FIRST_TOKEN
        samples = []
        while next_token != "":
            sample = self.data.get("sample", next_token)
            next_token = sample["next"]
            image = sample["data"][CAMERA]
            samples.append(image)
        return samples

    def _load_sample(self, seq_i, sample):

        sd_record = self.data.get("sample_data", sample)
        cs_record = self.data.get("calibrated_sensor",
                                  sd_record["calibrated_sensor_token"])
        pose_record = self.data.get("ego_pose", sd_record["ego_pose_token"])

        K = np.array(cs_record["camera_intrinsic"]).astype(np.float32)

        T = Quaternion(pose_record["rotation"]).transformation_matrix
        T[:3, 3] = np.array(pose_record["translation"])
        #print("before", T)
        T = T @ np.array([
            [0, 0, 1, 0],
            [-1, 0, 0, 0],
            [0, -1, 0, 0],
            [0, 0, 0, 1],
        ])
        #print("after", T)

        img, K = self._load_image_from_disk(
            self.data.get_sample_data_path(sample), K)

        return [img, T, K]

    def _load_depth(self, sample):
        sample_data = self.data.get("sample_data", sample)
        LIDAR_TOKEN = self.data.get(
            "sample", sample_data["sample_token"])["data"]["LIDAR_TOP"]
        points, depths, image = self.explorer.map_pointcloud_to_image(
            LIDAR_TOKEN, sample)
        W, H = image.size
        crop, scale = self.calc_crop(H, W)
        dmap = np.zeros(self.imHW, dtype=np.float64)
        coords = points[:2, :] * scale
        coords[1] -= crop
        mask = np.logical_and(coords[1, :] >= 0, coords[1, :] < self.imHW[0])
        coords = coords[:, mask].astype(np.int)
        depths = depths[mask]
        dmap[coords[1], coords[0]] = depths
        return dmap