Example #1
0
def transform_xyz(xyz: np.ndarray, pose1: SE3, pose2: SE3) -> np.ndarray:
    # transform xyz from pose1 to pose2

    # convert to city coordinate
    city_coord = pose1.transform_point_cloud(xyz)

    return pose2.inverse_transform_point_cloud(city_coord)
    def render_bev_labels_mpl_tmp(
        self,
        city_name: str,
        ax: plt.Axes,
        axis: str,
        lidar_pts: np.ndarray,
        local_lane_polygons: np.ndarray,
        local_das: np.ndarray,
        city_to_egovehicle_se3: SE3,
        avm: ArgoverseMap,
    ) -> None:
        """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes.

        Args:
            city_name: The name of a city, e.g. `"PIT"`
            ax: Matplotlib axes
            axis: string, either 'ego_axis' or 'city_axis' to demonstrate the
            lidar_pts:  Numpy array of shape (N,3)
            local_lane_polygons: Polygons representing the local lane set
            local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3)
            city_to_egovehicle_se3: Transformation from egovehicle frame to city frame
            avm: ArgoverseMap instance
        """
        if axis is not "city_axis":
            # rendering instead in the egovehicle reference frame
            for da_idx, local_da in enumerate(local_das):
                local_da = city_to_egovehicle_se3.inverse_transform_point_cloud(local_da)
                local_das[da_idx] = rotate_polygon_about_pt(local_da, city_to_egovehicle_se3.rotation, np.zeros(3))

            for lane_idx, local_lane_polygon in enumerate(local_lane_polygons):
                local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud(local_lane_polygon)
                local_lane_polygons[lane_idx] = rotate_polygon_about_pt(
                    local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3)
                )

        draw_lane_polygons(ax, local_lane_polygons)
        draw_lane_polygons(ax, local_das, color="tab:pink")

        if axis is not "city_axis":
            lidar_pts = rotate_polygon_about_pt(lidar_pts, city_to_egovehicle_se3.rotation, np.zeros((3,)))
            draw_point_cloud_bev(ax, lidar_pts)
Example #3
0
    def get_future_traj(self, axes: Axes, log_id: str, timestamp_ind: int,
                        city_to_egovehicle_se3: SE3):

        traj = np.zeros((FUTURE_TIME_STEP, 2))
        for ind, i in enumerate(
                range(timestamp_ind + 1,
                      timestamp_ind + FUTURE_TIME_STEP + 1)):
            timestamp = self.timestamps[i]
            pose_city_to_ego = self.log_egopose_dict[log_id][timestamp]
            relative_pose = city_to_egovehicle_se3.inverse_transform_point_cloud(
                pose_city_to_ego["translation"])
            traj[ind, :] = relative_pose[0:2]

        # axes.scatter(traj[:, 0].tolist(), traj[:, 1].tolist(), s=5, c='g')
        return traj
Example #4
0
    def render_ego_past_traj(
        self,
        axes: Axes,
        log_id: str,
        timestamp_ind: int,
        city_to_egovehicle_se3: SE3,
    ) -> None:

        x = [0]
        y = [0]
        for i in range(timestamp_ind - EGO_TIME_STEP, timestamp_ind):
            timestamp = self.timestamps[i]
            pose_city_to_ego = self.log_egopose_dict[log_id][timestamp]
            relative_pose = city_to_egovehicle_se3.inverse_transform_point_cloud(
                pose_city_to_ego["translation"])
            x.append(relative_pose[0])
            y.append(relative_pose[1])
        axes.scatter(x, y, s=5, c='b', alpha=1, zorder=2)
Example #5
0
    def render_bev_labels_mpl(
        self,
        city_name: str,
        ax: plt.Axes,
        axis: str,
        lidar_pts: np.ndarray,
        local_lane_polygons: np.ndarray,
        local_das: np.ndarray,
        log_id: str,
        timestamp: int,
        city_to_egovehicle_se3: SE3,
        avm: ArgoverseMap,
    ) -> None:
        """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes.

        Args:
            city_name: The name of a city, e.g. `"PIT"`
            ax: Matplotlib axes
            axis: string, either 'ego_axis' or 'city_axis' to demonstrate the
            lidar_pts:  Numpy array of shape (N,3)
            local_lane_polygons: Polygons representing the local lane set
            local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3)
            log_id: The ID of a log
            timestamp: In nanoseconds
            city_to_egovehicle_se3: Transformation from egovehicle frame to city frame
            avm: ArgoverseMap instance
        """
        if axis is not "city_axis":
            # rendering instead in the egovehicle reference frame
            for da_idx, local_da in enumerate(local_das):
                local_da = city_to_egovehicle_se3.inverse_transform_point_cloud(
                    local_da)
                local_das[da_idx] = rotate_polygon_about_pt(
                    local_da, city_to_egovehicle_se3.rotation, np.zeros(3))

            for lane_idx, local_lane_polygon in enumerate(local_lane_polygons):
                local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud(
                    local_lane_polygon)
                local_lane_polygons[lane_idx] = rotate_polygon_about_pt(
                    local_lane_polygon, city_to_egovehicle_se3.rotation,
                    np.zeros(3))

        draw_lane_polygons(ax, local_lane_polygons)
        draw_lane_polygons(ax, local_das, color="tab:pink")

        if axis is not "city_axis":
            lidar_pts = rotate_polygon_about_pt(
                lidar_pts, city_to_egovehicle_se3.rotation, np.zeros((3, )))
            draw_point_cloud_bev(ax, lidar_pts)

        objects = self.log_timestamp_dict[log_id][timestamp]

        all_occluded = True
        for frame_rec in objects:
            if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                all_occluded = False

        if not all_occluded:
            for i, frame_rec in enumerate(objects):
                bbox_city_fr = frame_rec.bbox_city_fr
                bbox_ego_frame = frame_rec.bbox_ego_frame
                color = frame_rec.color

                if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                    bbox_ego_frame = rotate_polygon_about_pt(
                        bbox_ego_frame, city_to_egovehicle_se3.rotation,
                        np.zeros((3, )))
                    if axis is "city_axis":
                        plot_bbox_2D(ax, bbox_city_fr, color)
                        if self.plot_lane_tangent_arrows:
                            bbox_center = np.mean(bbox_city_fr, axis=0)
                            tangent_xy, conf = avm.get_lane_direction(
                                query_xy_city_coords=bbox_center[:2],
                                city_name=city_name)
                            dx = tangent_xy[0] * LANE_TANGENT_VECTOR_SCALING
                            dy = tangent_xy[1] * LANE_TANGENT_VECTOR_SCALING
                            ax.arrow(bbox_center[0],
                                     bbox_center[1],
                                     dx,
                                     dy,
                                     color="r",
                                     width=0.5,
                                     zorder=2)
                    else:
                        plot_bbox_2D(ax, bbox_ego_frame, color)
                        cuboid_lidar_pts, _ = filter_point_cloud_to_bbox_2D_vectorized(
                            bbox_ego_frame[:, :2], copy.deepcopy(lidar_pts))
                        if cuboid_lidar_pts is not None:
                            draw_point_cloud_bev(ax, cuboid_lidar_pts, color)

        else:
            logger.info(f"all occluded at {timestamp}")
            xcenter = city_to_egovehicle_se3.translation[0]
            ycenter = city_to_egovehicle_se3.translation[1]
            ax.text(xcenter - 146,
                    ycenter + 50,
                    "ALL OBJECTS OCCLUDED",
                    fontsize=30)
def draw_ground_pts_in_image(
    sdb: SynchronizationDB,
    lidar_points: np.ndarray,
    city_to_egovehicle_se3: SE3,
    dataset_map: ArgoverseMap,
    log_id: str,
    lidar_timestamp: int,
    city_name: str,
    dataset_dir: str,
    experiment_prefix: str,
    plot_ground: bool = True,
    motion_compensate: bool = False,
    camera: Optional[str] = None,
) -> Union[None, np.ndarray]:
    """Write an image to disk with rendered ground points for every camera.

    Args:
        sdb: instance of SynchronizationDB
        lidar_points: Numpy array of shape (N,3) in egovehicle frame
        city_to_egovehicle_se3: SE3 instance which takes a point in egovehicle frame and brings it into city frame
        dataset_map: Map dataset instance
        log_id: ID of the log
        city_name: A city's name (e.g. 'MIA' or 'PIT')
        motion_compensate: Whether to bring lidar points from world frame @ lidar timestamp, to world frame @ camera
                           timestamp
        camera: camera name, if specified will return image of that specific camera, if None, will save all camera to
                disk and return None

    """
    # put into city coords, then prune away ground and non-RoI points
    lidar_points = city_to_egovehicle_se3.transform_point_cloud(lidar_points)
    lidar_points = dataset_map.remove_non_driveable_area_points(
        lidar_points, city_name)
    _, not_ground_logicals = dataset_map.remove_ground_surface(
        copy.deepcopy(lidar_points), city_name, return_logicals=True)
    lidar_points = lidar_points[np.logical_not(not_ground_logicals)
                                if plot_ground else not_ground_logicals]

    # put back into ego-vehicle coords
    lidar_points = city_to_egovehicle_se3.inverse_transform_point_cloud(
        lidar_points)

    calib_fpath = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_fpath)

    # repeat green to red colormap every 50 m.
    colors_arr = np.array([
        [color_obj.rgb]
        for color_obj in Color("red").range_to(Color("green"), NUM_RANGE_BINS)
    ]).squeeze()
    np.fliplr(colors_arr)

    for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                          STEREO_CAMERA_LIST):
        im_dir = f"{dataset_dir}/{log_id}/{camera_name}"

        # load images, e.g. 'image_raw_ring_front_center_000000486.jpg'
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            continue

        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"

        # Swap channel order as OpenCV expects it -- BGR not RGB
        # must make a copy to make memory contiguous
        img = imageio.imread(im_fpath)[:, :, ::-1].copy()
        points_h = point_cloud_to_homogeneous(copy.deepcopy(lidar_points)).T

        if motion_compensate:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
                points_h,  # these are recorded at lidar_time
                copy.deepcopy(calib_data),
                camera_name,
                cam_timestamp,
                lidar_timestamp,
                dataset_dir,
                log_id,
                False,
            )
        else:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img(
                points_h, copy.deepcopy(calib_data), camera_name, False)

        if valid_pts_bool is None or uv is None or uv_cam is None:
            continue

        if valid_pts_bool.sum() == 0:
            continue

        uv = np.round(uv[valid_pts_bool]).astype(np.int32)
        uv_cam = uv_cam.T[valid_pts_bool]
        pt_ranges = np.linalg.norm(uv_cam[:, :3], axis=1)
        rgb_bins = np.round(pt_ranges).astype(np.int32)
        # account for moving past 100 meters, loop around again
        rgb_bins = rgb_bins % NUM_RANGE_BINS
        uv_colors = (255 * colors_arr[rgb_bins]).astype(np.int32)

        img = draw_point_cloud_in_img_cv2(img, uv, np.fliplr(uv_colors))

        if not Path(f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
                    ).exists():
            os.makedirs(
                f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}")

        save_dir = f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
        cv2.imwrite(f"{save_dir}/{camera_name}_{lidar_timestamp}.jpg", img)
        if camera == camera_name:
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return None
Example #7
0
    def render_bev_labels_mpl(
        self,
        city_name: str,
        ax: plt.Axes,
        axis: str,
        # lidar_pts: np.ndarray,
        local_lane_polygons: np.ndarray,
        local_das: np.ndarray,
        log_id: str,
        timestamp: int,
        timestamp_ind: int,
        city_to_egovehicle_se3: SE3,
        avm: ArgoverseMap,
    ) -> None:
        """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes.

        Args:
            city_name: The name of a city, e.g. `"PIT"`
            ax: Matplotlib axes
            axis: string, either 'ego_axis' or 'city_axis' to demonstrate the
            lidar_pts:  Numpy array of shape (N,3)
            local_lane_polygons: Polygons representing the local lane set
            local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3)
            log_id: The ID of a log
            timestamp: In nanoseconds
            city_to_egovehicle_se3: Transformation from egovehicle frame to city frame
            avm: ArgoverseMap instance
        """
        if axis is not "city_axis":
            # rendering instead in the egovehicle reference frame
            for da_idx, local_da in enumerate(local_das):
                local_da = city_to_egovehicle_se3.inverse_transform_point_cloud(
                    local_da)
                # local_das[da_idx] = rotate_polygon_about_pt(local_da, city_to_egovehicle_se3.rotation, np.zeros(3))
                local_das[da_idx] = local_da

            for lane_idx, local_lane_polygon in enumerate(local_lane_polygons):
                local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud(
                    local_lane_polygon)
                # local_lane_polygons[lane_idx] = rotate_polygon_about_pt(
                #     local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3)
                # )
                local_lane_polygons[lane_idx] = local_lane_polygon

        draw_lane_polygons(ax, local_lane_polygons, color="tab:gray")
        draw_lane_polygons(ax, local_das, color=(1, 0, 1))

        # render ego vehicle
        bbox_ego = np.array([[-1.0, -1.0, 0.0], [3.0, -1.0, 0.0],
                             [-1.0, 1.0, 0.0], [3.0, 1.0, 0.0]])
        # bbox_ego = rotate_polygon_about_pt(bbox_ego, city_to_egovehicle_se3.rotation, np.zeros((3,)))
        plot_bbox_2D(ax, bbox_ego, 'g')

        # render surrounding vehicle and pedestrians
        objects = self.log_timestamp_dict[log_id][timestamp]
        hist_objects = {}

        all_occluded = True
        for frame_rec in objects:
            if frame_rec.occlusion_val != IS_OCCLUDED_FLAG:
                all_occluded = False
            hist_objects[frame_rec.track_uuid] = [
                None for i in range(0, SURR_TIME_STEP + 1)
            ]
            hist_objects[
                frame_rec.track_uuid][SURR_TIME_STEP] = frame_rec.bbox_city_fr

        if not all_occluded:
            for ind, i in enumerate(
                    range(timestamp_ind - SURR_TIME_STEP, timestamp_ind)):
                objects = self.log_timestamp_dict[log_id][self.timestamps[i]]
                for frame_rec in objects:
                    if frame_rec.track_uuid in hist_objects:
                        hist_objects[
                            frame_rec.track_uuid][ind] = frame_rec.bbox_city_fr

            # render color with decreasing lightness
            color_lightness = np.linspace(1, 0,
                                          SURR_TIME_STEP + 2)[1:].tolist()
            for track_id in hist_objects:
                for ind_color, bbox_city_fr in enumerate(
                        hist_objects[track_id]):
                    if bbox_city_fr is None:
                        continue
                    bbox_relative = city_to_egovehicle_se3.inverse_transform_point_cloud(
                        bbox_city_fr)

                    x = bbox_relative[:, 0].tolist()
                    y = bbox_relative[:, 1].tolist()
                    x[1], x[2], x[3], y[1], y[2], y[3] = x[2], x[3], x[1], y[
                        2], y[3], y[1]
                    ax.fill(x,
                            y,
                            c=(1, 1, color_lightness[ind_color]),
                            alpha=1,
                            zorder=1)

        else:
            logger.info(f"all occluded at {timestamp}")
            xcenter = city_to_egovehicle_se3.translation[0]
            ycenter = city_to_egovehicle_se3.translation[1]
            ax.text(xcenter - 146,
                    ycenter + 50,
                    "ALL OBJECTS OCCLUDED",
                    fontsize=30)