def test_SE2_chaining_transforms(): """Test for correctness of SE2 chaining / composing.""" theta = np.pi rotation_matrix = rotation_matrix_from_rotation(theta) translation_vector = np.array([0, 1]) fr2_se2_fr1 = SE2(rotation=rotation_matrix, translation=translation_vector) fr1_se2_fr0 = SE2(rotation=rotation_matrix, translation=translation_vector) fr2_se2_fr0 = fr2_se2_fr1.right_multiply_with_se2(fr1_se2_fr0) pts = np.array([[1, 0], [2, 0], [3, 0]]) transformed_pts = fr2_se2_fr0.transform_point_cloud(pts.copy()) assert np.allclose(pts, transformed_pts) assert np.allclose(fr2_se2_fr0.transform_matrix, np.eye(3))
def get_ground_height_at_xy(self, point_cloud: np.ndarray, city_name: str) -> np.ndarray: """ Get ground height for each of the xy location in point_cloud Args: point_cloud: Numpy array of shape (k,2) or (k,3) city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh Returns: ground_height_values: Numpy array of shape (k,) """ ground_height_mat, npyimage_to_city_se2_mat = self.get_rasterized_ground_height( city_name) city_coords = np.round(point_cloud[:, :2]).astype(np.int64) se2_rotation = npyimage_to_city_se2_mat[:2, :2] se2_trans = npyimage_to_city_se2_mat[:2, 2] npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans) npyimage_coords = npyimage_to_city_se2.transform_point_cloud( city_coords) npyimage_coords = npyimage_coords.astype(np.int64) ground_height_values = np.full((npyimage_coords.shape[0]), np.nan) ind_valid_pts = ( npyimage_coords[:, 1] < ground_height_mat.shape[0]) * ( npyimage_coords[:, 0] < ground_height_mat.shape[1]) ground_height_values[ind_valid_pts] = ground_height_mat[ npyimage_coords[ind_valid_pts, 1], npyimage_coords[ind_valid_pts, 0]] return ground_height_values
def get_da_contours(self, city_name: str) -> List[np.hstack]: """ We threshold the binary driveable area or ROI image and obtain contour lines. These contour lines represent the boundary. Args: city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh Returns: Drivable area contours """ da_imgray = self.city_rasterized_da_roi_dict[city_name]["da_mat"] contours = get_img_contours(da_imgray) city_contours: List[np.ndarray] = [] for i, contour_im_coords in enumerate(contours): contour_im_coords = contour_im_coords.squeeze() contour_im_coords = contour_im_coords.astype(np.float64) npyimage_to_city_se2_mat = self.city_rasterized_da_roi_dict[ city_name]["npyimage_to_city_se2"] se2_rotation = npyimage_to_city_se2_mat[:2, :2] se2_trans = npyimage_to_city_se2_mat[:2, 2] npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans) contour_city_coords = npyimage_to_city_se2.inverse_transform_point_cloud( contour_im_coords) city_contours.append( self.append_height_to_2d_city_pt_cloud(contour_city_coords, city_name)) return city_contours
def get_ground_height_at_xy(self, point_cloud: np.ndarray, city_name: str) -> np.ndarray: """ Get ground height for each of the xy location in point_cloud Args: point_cloud: Numpy array of shape (k,2) or (k,3) city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh Returns: ground_height_values: Numpy array of shape (k,) """ ground_height_mat, npyimage_to_city_se2_mat = self.get_rasterized_ground_height( city_name) city_coords = np.round(point_cloud[:, :2]).astype(np.int64) se2_rotation = npyimage_to_city_se2_mat[:2, :2] se2_trans = npyimage_to_city_se2_mat[:2, 2] npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans) npyimage_coords = npyimage_to_city_se2.transform_point_cloud( city_coords) npyimage_coords = npyimage_coords.astype(np.int64) # index in at (x,y) locations, which are (y,x) in the image ground_height_values = ground_height_mat[npyimage_coords[:, 1], npyimage_coords[:, 0]] return ground_height_values
def get_da_contours(self, city_name: str) -> List[np.ndarray]: """ We threshold the binary driveable area or ROI image and obtain contour lines. These contour lines represent the boundary. Args: city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh Returns: Drivable area contours """ da_imgray = self.city_rasterized_da_roi_dict[city_name]["da_mat"] contours = get_img_contours(da_imgray) # pull out 3x3 matrix parameterizing the SE(2) transformation from city coords -> npy image npyimage_T_city = self.city_rasterized_da_roi_dict[city_name]["npyimage_to_city_se2"] R = npyimage_T_city[:2, :2] t = npyimage_T_city[:2, 2] npyimage_SE2_city = SE2(rotation=R, translation=t) city_SE2_npyimage = npyimage_SE2_city.inverse() city_contours: List[np.ndarray] = [] for i, contour_im_coords in enumerate(contours): contour_im_coords = contour_im_coords.squeeze() contour_im_coords = contour_im_coords.astype(np.float64) contour_city_coords = city_SE2_npyimage.transform_point_cloud(contour_im_coords) city_contours.append(self.append_height_to_2d_city_pt_cloud(contour_city_coords, city_name)) return city_contours
def get_raster_layer_points_boolean(self, point_cloud: np.ndarray, city_name: str, layer_name: str) -> np.ndarray: """ driveable area is "da" Args: point_cloud: Numpy array of shape (N,3) city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh layer_name: indicating layer name, either "roi" or "driveable area" Returns: is_ground_boolean_arr: Numpy array of shape (N,) where ith entry is True if the LiDAR return is likely a hit from the ground surface. """ if layer_name == "roi": layer_raster_mat, npyimage_to_city_se2_mat = self.get_rasterized_roi(city_name) elif layer_name == "driveable_area": layer_raster_mat, npyimage_to_city_se2_mat = self.get_rasterized_driveable_area(city_name) else: raise ValueError("layer_name should be wither roi or driveable_area.") city_coords = np.round(point_cloud[:, :2]).astype(np.int64) se2_rotation = npyimage_to_city_se2_mat[:2, :2] se2_trans = npyimage_to_city_se2_mat[:2, 2] npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans) npyimage_coords = npyimage_to_city_se2.transform_point_cloud(city_coords) npyimage_coords = npyimage_coords.astype(np.int64) # index in at (x,y) locations, which are (y,x) in the image layer_values = layer_raster_mat[npyimage_coords[:, 1], npyimage_coords[:, 0]] is_layer_boolean_arr = layer_values == 1.0 return is_layer_boolean_arr
def get_B_SE2_A(B_SE3_A: SE3): """ Can take city_SE3_egovehicle -> city_SE2_egovehicle Can take egovehicle_SE3_object -> egovehicle_SE2_object Doesn't matter if we stretch square by h,w,l since triangles will be similar regardless Args: - B_SE3_A Returns: - B_SE2_A - B_yaw_A """ x_corners = np.array([1, 1, 1, 1, -1, -1, -1, -1]) y_corners = np.array([1, -1, -1, 1, 1, -1, -1, 1]) z_corners = np.array([1, 1, -1, -1, 1, 1, -1, -1]) corners_A_frame = np.vstack((x_corners, y_corners, z_corners)).T corners_B_frame = B_SE3_A.transform_point_cloud(corners_A_frame) p1 = corners_B_frame[1] p5 = corners_B_frame[5] dy = p1[1] - p5[1] dx = p1[0] - p5[0] # the orientation angle of the car B_yaw_A = np.arctan2(dy, dx) t = B_SE3_A.transform_matrix[:2, 3] # get x,y only B_SE2_A = SE2(rotation=rotmat2d(B_yaw_A), translation=t) return B_SE2_A, B_yaw_A
def test_SE2_inverse_transform_point_cloud_identity(): """Test that inverse transforming by Identity does not affect the pointclouds.""" transformed_pts = np.array([[0.5, 0], [1, -0.5], [1.5, 0], [2, -1]]) dst_se2_src = SE2(rotation=np.eye(2), translation=np.zeros(2)) pts = dst_se2_src.inverse_transform_point_cloud(transformed_pts.copy()) assert np.allclose(pts, transformed_pts) with pytest.raises(ValueError): dst_se2_src.transform_point_cloud(np.random.rand(1, 3))
def test_SE2_inverse_transform_point_cloud_pi_radians(): """Test for validity of inverse transformation by an SE2.""" transformed_pts = np.array([[1.5, 2.0], [1, 2.5], [0.5, 2], [0, 3.0]]) theta = np.pi rotation_matrix = rotation_matrix_from_rotation(theta) translation_vector = np.array([2.0, 2.0]) dst_se2_src = SE2(rotation=rotation_matrix, translation=translation_vector) pts = dst_se2_src.inverse_transform_point_cloud(transformed_pts) gt_pts = np.array([[0.5, 0], [1, -0.5], [1.5, 0], [2, -1]]) assert np.allclose(pts, gt_pts)
def test_SE2_transform_point_cloud_identity(): """Test that transformation by an Identity SE2 does not change pointclouds.""" pts = np.array([[0.5, 0], [1, -0.5], [1.5, 0], [2, -1]]) dst_se2_src = SE2(rotation=np.eye(2), translation=np.zeros(2)) transformed_pts = dst_se2_src.transform_point_cloud(pts.copy()) assert np.allclose(transformed_pts, pts) with pytest.raises(ValueError): dst_se2_src.transform_point_cloud(np.random.rand(1)) with pytest.raises(ValueError): dst_se2_src.transform_point_cloud(np.random.rand(1, 3))
def test_SE2_constructor(): """Test for construction of an arbitrary SE2.""" theta = 2 * np.pi / 7.0 rotation_matrix = rotation_matrix_from_rotation(theta) translation_vector = np.array([-86.5, 0.99]) dst_se2_src = SE2(rotation=rotation_matrix.copy(), translation=translation_vector.copy()) cos_theta = np.cos(theta) sin_theta = np.sin(theta) T_mat_gt = np.array([[cos_theta, -sin_theta, translation_vector[0]], [sin_theta, cos_theta, translation_vector[1]], [0, 0, 1.0]]) assert np.allclose(dst_se2_src.rotation, rotation_matrix) assert np.allclose(dst_se2_src.translation, translation_vector) assert np.allclose(dst_se2_src.transform_matrix, T_mat_gt) with pytest.raises(ValueError): SE2(np.array([1]), translation_vector) with pytest.raises(ValueError): SE2(rotation_matrix, np.array([1, 2, 3]))
def get_ground_height_at_xy(self, point_cloud: np.ndarray, city_name: str) -> np.ndarray: """ Get ground height for each of the xy location in point_cloud Args: point_cloud: Numpy array of shape (k,2) or (k,3) city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh Returns: ground_height_values: Numpy array of shape (k,) """ ground_height_mat, npyimage_to_city_se2_mat = self.get_rasterized_ground_height( city_name) city_coords = np.round(point_cloud[:, :2]).astype(np.int64) se2_rotation = npyimage_to_city_se2_mat[:2, :2] se2_trans = npyimage_to_city_se2_mat[:2, 2] npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans) npyimage_coords = npyimage_to_city_se2.transform_point_cloud( city_coords) npyimage_coords = npyimage_coords.astype(np.int64) # index at (x,y) locations, which are (y,x) in the image max_y = np.max(npyimage_coords[:, 1]) max_x = np.max(npyimage_coords[:, 0]) height_y, height_x = np.shape(ground_height_mat) assert np.all(npyimage_coords[:, 1] > 0) and np.all( npyimage_coords[:, 0] > 0 ), "Invalid coordinates, please make sure the query location is in a valid city coordinate" if max_x > height_x or max_y > height_y: # expand ground height npy image, fill with NaN ground_height_mat_pad = np.full((max_y, max_x), np.nan) ground_height_mat_pad[0:max_y, 0:max_x] = ground_height_mat ground_height_mat = copy.deepcopy(ground_height_mat_pad) ground_height_values = ground_height_mat[npyimage_coords[:, 1], npyimage_coords[:, 0]] return ground_height_values
def test_SE2_inverse(): """Test for numerical correctess of the inverse functionality.""" src_pts_gt = np.array([[1, 0], [2, 0]]) dst_pts_gt = np.array([[-2, -1], [-2, 0]]) theta = np.pi / 2.0 rotation_matrix = rotation_matrix_from_rotation(theta) translation_vector = np.array([-2, -2]) dst_se2_src = SE2(rotation=rotation_matrix, translation=translation_vector) src_se2_dst = dst_se2_src.inverse() dst_pts = dst_se2_src.transform_point_cloud(src_pts_gt.copy()) src_pts = src_se2_dst.transform_point_cloud(dst_pts_gt.copy()) assert np.allclose(dst_pts, dst_pts_gt) assert np.allclose(src_pts, src_pts_gt) gt_inv_mat = np.linalg.inv(dst_se2_src.transform_matrix) assert np.allclose(src_se2_dst.transform_matrix, gt_inv_mat)
def test_transform_point_cloud() -> None: """Guarantee we can implement the SE(2) inferface, w/ scale=1.0 Sample 1000 random 2d rigid body transformations (R,t) and ensure that 2d points are transformed equivalently with SE(2) or Sim(3) w/ unit scale. """ for sample in range(1000): # generate random 2x2 rotation matrix theta = np.random.rand() * 2 * np.pi R = rotmat2d(theta) t = np.random.randn(2) pts_b = np.random.randn(25, 2) aTb = SE2(copy.deepcopy(R), copy.deepcopy(t)) aSb = Sim2(copy.deepcopy(R), copy.deepcopy(t), s=1.0) pts_a = aTb.transform_point_cloud(copy.deepcopy(pts_b)) pts_a_ = aSb.transform_point_cloud(copy.deepcopy(pts_b)) assert np.allclose(pts_a, pts_a_, atol=1e-7)
def render_global_city_map_bev( lane_centerlines: LaneCenterline, driveable_areas: np.ndarray, city_name: str, avm: MapProtocol, plot_rasterized_roi: bool = True, plot_vectormap_roi: bool = False, centerline_color_scheme: str = "constant", ) -> None: """ Assume indegree and outdegree are always less than 7. Since 1 pixel-per meter resolution is very low, we upsample the resolution by some constant factor. Args: lane_centerlines: Line centerline data driveable_areas: Driveable area data city_name: The city name (eg. "PIT") avm: The map plot_rasterized_roi: Whether to show the rasterized ROI plot_vectormap_roi: Whether to show the vector map ROI centerline_color_scheme: `"indegree"`, `"outdegree"`, or `"constant"` """ UPSAMPLE_FACTOR = 2 GRID_NUMBER_INTERVAL = 500 NUM_COLOR_BINS = 7 warnings.filterwarnings("error") im_h, im_w, xmin, xmax, ymin, ymax = _get_int_image_bounds_from_city_coords( driveable_areas, lane_centerlines) rendered_image = np.ones( (im_h * UPSAMPLE_FACTOR, im_w * UPSAMPLE_FACTOR, 3)) image_to_city_se2 = SE2(rotation=np.eye(2), translation=np.array([-xmin, -ymin])) if plot_rasterized_roi: grid_2d_pts = get_mesh_grid_as_point_cloud(xmin + 1, xmax - 1, ymin + 1, ymax - 1) pink = np.array([180.0, 105.0, 255.0]) / 255 roi_2d_pts = avm.remove_non_driveable_area_points( grid_2d_pts, city_name) roi_2d_pts = image_to_city_se2.transform_point_cloud(roi_2d_pts) roi_2d_pts *= UPSAMPLE_FACTOR roi_2d_pts = np.round(roi_2d_pts).astype(np.int32) for pt in roi_2d_pts: row = pt[1] col = pt[0] rendered_image[row, col, :] = pink if plot_vectormap_roi: driveable_areas = copy.deepcopy(driveable_areas) for da_idx, da in enumerate(driveable_areas): da = image_to_city_se2.transform_point_cloud(da[:, :2]) rendered_image = draw_polygon_cv2(da * UPSAMPLE_FACTOR, rendered_image, pink) for x in range(0, im_w * UPSAMPLE_FACTOR, GRID_NUMBER_INTERVAL): for y in range(0, im_h * UPSAMPLE_FACTOR, GRID_NUMBER_INTERVAL): coords_str = f"{x}_{y}" cv2.putText( rendered_image, coords_str, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA, bottomLeftOrigin=True, ) # Color the graph max_outdegree = 0 max_indegree = 0 colors_arr = _get_opencv_green_to_red_colormap(NUM_COLOR_BINS) blue = [0, 0, 1][::-1] for lane_id, lane_obj in lane_centerlines.items(): centerline_2d = lane_obj.centerline centerline_2d = image_to_city_se2.transform_point_cloud(centerline_2d) centerline_2d = np.round(centerline_2d).astype(np.int32) preds = lane_obj.predecessors succs = lane_obj.successors indegree = 0 if preds is None else len(preds) outdegree = 0 if succs is None else len(succs) if indegree > max_indegree: max_indegree = indegree print("max indegree", indegree) if outdegree > max_outdegree: max_outdegree = outdegree print("max outdegree", outdegree) if centerline_color_scheme == "indegree": color = colors_arr[indegree] elif centerline_color_scheme == "outdegree": color = colors_arr[outdegree] elif centerline_color_scheme == "constant": color = blue draw_polyline_cv2( centerline_2d * UPSAMPLE_FACTOR, rendered_image, color, im_h * UPSAMPLE_FACTOR, im_w * UPSAMPLE_FACTOR, ) # provide colormap in corner for i in range(NUM_COLOR_BINS): rendered_image[0, i, :] = colors_arr[i] rendered_image = np.flipud(rendered_image) rendered_image *= 255 rendered_image = rendered_image.astype(np.uint8) cur_datetime = generate_datetime_string() img_save_fpath = f"{city_name}_{centerline_color_scheme}_{cur_datetime}.png" cv2.imwrite(filename=img_save_fpath, img=rendered_image) warnings.resetwarnings()
def run_ab3dmot( classname: str, pose_dir: str, dets_dump_dir: str, tracks_dump_dir: str, min_conf: float = 0.3, match_algorithm: str = 'h', #hungarian match_threshold: float = 4, match_distance: float = 'iou', p: np.ndarray = np.eye(10), thr_estimate: float = 0.8, thr_prune: float = 0.1, ps: float = 0.9) -> None: """ #path to argoverse tracking dataset test set, we will add our predicted labels into per_sweep_annotations_amodal/ #inside this folder Filtering occurs in the city frame, not the egovehicle frame. Args: - classname: string, either 'VEHICLE' or 'PEDESTRIAN' - pose_dir: string - dets_dump_dir: string - tracks_dump_dir: string - max_age: integer - min_hits: integer Returns: - None """ dl = SimpleArgoverseTrackingDataLoader(data_dir=pose_dir, labels_dir=dets_dump_dir) am = ArgoverseMap() for log_id in tqdm(dl.sdb.get_valid_logs()): print(log_id) city_name = dl.get_city_name(log_id) labels_folder = dets_dump_dir + "/" + log_id + "/per_sweep_annotations_amodal/" lis = os.listdir(labels_folder) lidar_timestamps = [ int(file.split(".")[0].split("_")[-1]) for file in lis ] lidar_timestamps.sort() previous_frame_bbox = [] ab3dmot = AB3DMOT(thr_estimate=thr_estimate, thr_prune=thr_prune, ps=ps) print(labels_folder) tracked_labels_copy = [] for j, current_lidar_timestamp in enumerate(lidar_timestamps): dets = dl.get_labels_at_lidar_timestamp(log_id, current_lidar_timestamp) dets_copy = dets transforms = [] city_SE3_egovehicle = dl.get_city_to_egovehicle_se3( log_id, current_lidar_timestamp) egovehicle_SE3_city = city_SE3_egovehicle.inverse() transformed_labels = [] conf = [] for l_idx, l in enumerate(dets): if l['label_class'] != classname: # will revisit in other tracking pass continue if l["score"] < min_conf: # print('Skipping det with confidence ', l["score"]) continue det_obj = json_label_dict_to_obj_record(l) det_corners_egovehicle_fr = det_obj.as_3d_bbox() transforms += [city_SE3_egovehicle] if city_SE3_egovehicle is None: print('Was None') # convert detection from egovehicle frame to city frame det_corners_city_fr = city_SE3_egovehicle.transform_point_cloud( det_corners_egovehicle_fr) ego_xyz = np.mean(det_corners_city_fr, axis=0) # Check the driveable/roi area #da = am.remove_non_driveable_area_points(np.array([ego_xyz]), city_name=city_name) # if len(da) == 0 and l['label_class'] == 'VEHICLE': # continue # roi = am.remove_non_roi_points(np.array([ego_xyz]), city_name=city_name) # if len(roi) == 0: # continue yaw = yaw_from_bbox_corners(det_corners_city_fr) transformed_labels += [[ ego_xyz[0], ego_xyz[1], ego_xyz[2], yaw, l["length"], l["width"], l["height"] ]] conf += [l["score"]] if len(transformed_labels) > 0: transformed_labels = np.array(transformed_labels) else: transformed_labels = np.empty((0, 7)) dets_all = { "dets": transformed_labels, "info": np.zeros(transformed_labels.shape), "conf": conf } # perform measurement update in the city frame. dets_with_object_id = ab3dmot.update(dets_all, match_distance, match_threshold, match_algorithm, p) tracked_labels = [] for det in dets_with_object_id: # move city frame tracks back to ego-vehicle frame xyz_city = np.array( [det[0].item(), det[1].item(), det[2].item()]).reshape(1, 3) city_yaw_object = det[3] city_se2_object = SE2(rotation=rotmat2d(city_yaw_object), translation=xyz_city.squeeze()[:2]) city_se2_egovehicle, city_yaw_ego = get_B_SE2_A( city_SE3_egovehicle) ego_se2_city = city_se2_egovehicle.inverse() egovehicle_se2_object = ego_se2_city.right_multiply_with_se2( city_se2_object) # recreate all 8 points # transform them # compute yaw from 8 points once more egovehicle_SE3_city = city_SE3_egovehicle.inverse() xyz_ego = egovehicle_SE3_city.transform_point_cloud( xyz_city).squeeze() # update for new yaw # transform all 8 points at once, then compute yaw on the fly ego_yaw_obj = se2_to_yaw(egovehicle_se2_object) qx, qy, qz, qw = yaw_to_quaternion3d(ego_yaw_obj) tracked_labels.append({ "center": { "x": xyz_ego[0], "y": xyz_ego[1], "z": xyz_ego[2] }, "rotation": { "x": qx, "y": qy, "z": qz, "w": qw }, "length": det[4], "width": det[5], "height": det[6], "track_label_uuid": uuid_gen.get_uuid(det[7]), "timestamp": current_lidar_timestamp, "label_class": classname }) tracked_labels_copy = copy.deepcopy(tracked_labels) label_dir = os.path.join(tracks_dump_dir, log_id, "per_sweep_annotations_amodal") check_mkdir(label_dir) json_fname = f"tracked_object_labels_{current_lidar_timestamp}.json" json_fpath = os.path.join(label_dir, json_fname) if Path(json_fpath).exists(): # accumulate tracks of another class together prev_tracked_labels = read_json_file(json_fpath) tracked_labels.extend(prev_tracked_labels) save_json_dict(json_fpath, tracked_labels)