def draw_rect(selected_corners: np.array, color: Tuple[int, int, int]) -> None: prev = selected_corners[-1] for corner in selected_corners: draw_clipped_line_segment(img, prev.copy(), corner.copy(), camera_config, linewidth, planes, color) prev = corner
def plot_lane_centerlines_in_img( lidar_pts: np.ndarray, city_to_egovehicle_se3: SE3, img: np.ndarray, city_name: str, avm: ArgoverseMap, camera_config: CameraConfig, planes: Iterable[Tuple[np.array, np.array, np.array, np.array, np.array]], color: Tuple[int, int, int] = (0, 255, 255), linewidth: Number = 10, ) -> np.ndarray: """ Args: city_to_egovehicle_se3: SE3 transformation representing egovehicle to city transformation img: Array of shape (M,N,3) representing updated image city_name: str, string representing city name, i.e. 'PIT' or 'MIA' avm: instance of ArgoverseMap camera_config: instance of CameraConfig planes: five frustum clipping planes color: RGB-tuple representing color linewidth: Number = 10) -> np.ndarray Returns: img: Array of shape (M,N,3) representing updated image """ R = camera_config.extrinsic[:3, :3] t = camera_config.extrinsic[:3, 3] cam_SE3_egovehicle = SE3(rotation=R, translation=t) query_x, query_y, _ = city_to_egovehicle_se3.translation local_centerlines = avm.find_local_lane_centerlines( query_x, query_y, city_name) for centerline_city_fr in local_centerlines: color = [ intensity + np.random.randint(0, LANE_COLOR_NOISE) - LANE_COLOR_NOISE // 2 for intensity in color ] ground_heights = avm.get_ground_height_at_xy(centerline_city_fr, city_name) valid_idx = np.isnan(ground_heights) centerline_city_fr = centerline_city_fr[~valid_idx] centerline_egovehicle_fr = city_to_egovehicle_se3.inverse( ).transform_point_cloud(centerline_city_fr) centerline_uv_cam = cam_SE3_egovehicle.transform_point_cloud( centerline_egovehicle_fr) # can also clip point cloud to nearest LiDAR point depth centerline_uv_cam = clip_point_cloud_to_visible_region( centerline_uv_cam, lidar_pts) for i in range(centerline_uv_cam.shape[0] - 1): draw_clipped_line_segment(img, centerline_uv_cam[i], centerline_uv_cam[i + 1], camera_config, linewidth, planes, color) return img
def render_clip_frustum_cv2( self, img: np.array, corners: np.array, planes: List[Tuple[np.array, np.array, np.array, np.array, np.array]], camera_config: CameraConfig, colors: Tuple[Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int]] = ( BLUE_RGB, RED_RGB, GREEN_RGB, ), linewidth: int = 2, ) -> np.ndarray: r"""We bring the 3D points into each camera, and do the clipping there. Renders box using OpenCV2. Roughly based on https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes_utils/data_classes.py :: 5------4 |\\ |\\ | \\ | \\ 6--\\--7 \\ \\ \\ \\ \\ l \\ 1-------0 h e \\ || \\ || e n \\|| \\|| i g \\2------3 g t width. h h. t. Args: img: Numpy array of shape (M,N,3) corners: Numpy array of shape (8,3) in camera coordinate frame. planes: Iterable of 5 clipping planes. Each plane is defined by 4 points. camera_config: CameraConfig object colors: tuple of RGB 3-tuples, Colors for front, side & rear. defaults are 0. blue (0,0,255) in RGB and (255,0,0) in OpenCV's BGR 1. red (255,0,0) in RGB and (0,0,255) in OpenCV's BGR 2. green (0,255,0) in RGB and BGR alike. linewidth: integer, linewidth for plot Returns: img: Numpy array of shape (M,N,3), representing updated image """ def draw_rect(selected_corners: np.array, color: Tuple[int, int, int]) -> None: prev = selected_corners[-1] for corner in selected_corners: draw_clipped_line_segment( img, prev.copy(), corner.copy(), camera_config, linewidth, planes, color, ) prev = corner # Draw the sides in green for i in range(4): # between front and back corners draw_clipped_line_segment( img, corners[i], corners[i + 4], camera_config, linewidth, planes, colors[2][::-1], ) # Draw front (first 4 corners) in blue draw_rect(corners[:4], colors[0][::-1]) # Draw rear (last 4 corners) in red draw_rect(corners[4:], colors[1][::-1]) # grab the top vertices center_top = np.mean(corners[TOP_VERT_INDICES], axis=0) uv_ct, _, _, _ = proj_cam_to_uv(center_top.reshape(1, 3), camera_config) uv_ct = uv_ct.squeeze().astype(np.int32) # cast to integer if label_is_closeby(center_top) and uv_coord_is_valid(uv_ct, img): top_left = (uv_ct[0] - BKGRND_RECT_OFFS_LEFT, uv_ct[1] - BKGRND_RECT_OFFS_UP) bottom_right = (uv_ct[0] + BKGRND_RECT_OFFS_LEFT, uv_ct[1] + BKGRND_RECT_OFFS_DOWN) img = draw_alpha_rectangle(img, top_left, bottom_right, EMERALD_RGB, alpha=BKGRND_RECT_ALPHA) add_text_cv2(img, text=str(self.label_class), x=uv_ct[0] - TEXT_OFFS_LEFT, y=uv_ct[1], color=WHITE_BGR) # Draw blue line indicating the front half center_bottom_forward = np.mean(corners[2:4], axis=0) center_bottom = np.mean(corners[[2, 3, 7, 6]], axis=0) draw_clipped_line_segment( img, center_bottom, center_bottom_forward, camera_config, linewidth, planes, colors[0][::-1], ) return img
def render_clip_frustum_cv2( self, img: np.array, corners: np.array, planes: List[Tuple[np.array, np.array, np.array, np.array, np.array]], camera_config: CameraConfig, colors: Tuple[Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int]] = ( (0, 0, 255), (255, 0, 0), (0, 255, 0), ), linewidth: int = 2, ) -> np.ndarray: r"""We bring the 3D points into each camera, and do the clipping there. Renders box using OpenCV2. Roughly based on https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes_utils/data_classes.py :: 5------4 |\\ |\\ | \\ | \\ 6--\\--7 \\ \\ \\ \\ \\ l \\ 1-------0 h e \\ || \\ || e n \\|| \\|| i g \\2------3 g t width. h h. t. Args: img: Numpy array of shape (M,N,3) corners: Numpy array of shape (8,3) in camera coordinate frame. planes: Iterable of 5 clipping planes. Each plane is defined by 4 points. camera_config: CameraConfig object colors: tuple of RGB 3-tuples, Colors for front, side & rear. defaults are 0. blue (0,0,255) in RGB and (255,0,0) in OpenCV's BGR 1. red (255,0,0) in RGB and (0,0,255) in OpenCV's BGR 2. green (0,255,0) in RGB and BGR alike. linewidth: integer, linewidth for plot Returns: img: Numpy array of shape (M,N,3), representing updated image """ def draw_rect(selected_corners: np.array, color: Tuple[int, int, int]) -> None: prev = selected_corners[-1] for corner in selected_corners: draw_clipped_line_segment(img, prev.copy(), corner.copy(), camera_config, linewidth, planes, color) prev = corner # Draw the sides in green for i in range(4): # between front and back corners draw_clipped_line_segment(img, corners[i], corners[i + 4], camera_config, linewidth, planes, colors[2][::-1]) # Draw front (first 4 corners) in blue draw_rect(corners[:4], colors[0][::-1]) # Draw rear (last 4 corners) in red draw_rect(corners[4:], colors[1][::-1]) # Draw blue line indicating the front half center_bottom_forward = np.mean(corners[2:4], axis=0) center_bottom = np.mean(corners[[2, 3, 7, 6]], axis=0) draw_clipped_line_segment(img, center_bottom, center_bottom_forward, camera_config, linewidth, planes, colors[0][::-1]) return img