def draw_local_map(avm, city_name, x_min, x_max, y_min, y_max, ax): seq_lane_bbox = avm.city_halluc_bbox_table[city_name] seq_lane_props = avm.city_lane_centerlines_dict[city_name] # fig = plt.figure(figsize=(15,15)) # ax = fig.add_subplot(111) ax.set_xlim([x_min, x_max]) ax.set_ylim([y_min, y_max]) map_range = np.array([x_min, x_max, y_min, y_max]) seq_lane_props = avm.city_lane_centerlines_dict[city_name] lane_centerlines = map_Selection(map_range, seq_lane_props) # print('PIT map visualization with highest traffic data density') for lane_cl in lane_centerlines: plt.plot(lane_cl[:, 0], lane_cl[:, 1], "--", color="grey", alpha=1, linewidth=1, zorder=0) # use built in function to plot road info local_lane_polygons = avm.find_local_lane_polygons( [x_min, x_max, y_min, y_max], city_name) draw_lane_polygons(ax, local_lane_polygons, color='#02383C')
def render_bev_labels_mpl_tmp( self, city_name: str, ax: plt.Axes, axis: str, lidar_pts: np.ndarray, local_lane_polygons: np.ndarray, local_das: np.ndarray, city_to_egovehicle_se3: SE3, avm: ArgoverseMap, ) -> None: """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes. Args: city_name: The name of a city, e.g. `"PIT"` ax: Matplotlib axes axis: string, either 'ego_axis' or 'city_axis' to demonstrate the lidar_pts: Numpy array of shape (N,3) local_lane_polygons: Polygons representing the local lane set local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3) city_to_egovehicle_se3: Transformation from egovehicle frame to city frame avm: ArgoverseMap instance """ if axis is not "city_axis": # rendering instead in the egovehicle reference frame for da_idx, local_da in enumerate(local_das): local_da = city_to_egovehicle_se3.inverse_transform_point_cloud(local_da) local_das[da_idx] = rotate_polygon_about_pt(local_da, city_to_egovehicle_se3.rotation, np.zeros(3)) for lane_idx, local_lane_polygon in enumerate(local_lane_polygons): local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud(local_lane_polygon) local_lane_polygons[lane_idx] = rotate_polygon_about_pt( local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3) ) draw_lane_polygons(ax, local_lane_polygons) draw_lane_polygons(ax, local_das, color="tab:pink") if axis is not "city_axis": lidar_pts = rotate_polygon_about_pt(lidar_pts, city_to_egovehicle_se3.rotation, np.zeros((3,))) draw_point_cloud_bev(ax, lidar_pts)
def render_bev_labels_mpl( self, city_name: str, ax: plt.Axes, axis: str, lidar_pts: np.ndarray, local_lane_polygons: np.ndarray, local_das: np.ndarray, log_id: str, timestamp: int, city_to_egovehicle_se3: SE3, avm: ArgoverseMap, ) -> None: """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes. Args: city_name: The name of a city, e.g. `"PIT"` ax: Matplotlib axes axis: string, either 'ego_axis' or 'city_axis' to demonstrate the lidar_pts: Numpy array of shape (N,3) local_lane_polygons: Polygons representing the local lane set local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3) log_id: The ID of a log timestamp: In nanoseconds city_to_egovehicle_se3: Transformation from egovehicle frame to city frame avm: ArgoverseMap instance """ if axis is not "city_axis": # rendering instead in the egovehicle reference frame for da_idx, local_da in enumerate(local_das): local_da = city_to_egovehicle_se3.inverse_transform_point_cloud( local_da) local_das[da_idx] = rotate_polygon_about_pt( local_da, city_to_egovehicle_se3.rotation, np.zeros(3)) for lane_idx, local_lane_polygon in enumerate(local_lane_polygons): local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud( local_lane_polygon) local_lane_polygons[lane_idx] = rotate_polygon_about_pt( local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3)) draw_lane_polygons(ax, local_lane_polygons) draw_lane_polygons(ax, local_das, color="tab:pink") if axis is not "city_axis": lidar_pts = rotate_polygon_about_pt( lidar_pts, city_to_egovehicle_se3.rotation, np.zeros((3, ))) draw_point_cloud_bev(ax, lidar_pts) objects = self.log_timestamp_dict[log_id][timestamp] all_occluded = True for frame_rec in objects: if frame_rec.occlusion_val != IS_OCCLUDED_FLAG: all_occluded = False if not all_occluded: for i, frame_rec in enumerate(objects): bbox_city_fr = frame_rec.bbox_city_fr bbox_ego_frame = frame_rec.bbox_ego_frame color = frame_rec.color if frame_rec.occlusion_val != IS_OCCLUDED_FLAG: bbox_ego_frame = rotate_polygon_about_pt( bbox_ego_frame, city_to_egovehicle_se3.rotation, np.zeros((3, ))) if axis is "city_axis": plot_bbox_2D(ax, bbox_city_fr, color) if self.plot_lane_tangent_arrows: bbox_center = np.mean(bbox_city_fr, axis=0) tangent_xy, conf = avm.get_lane_direction( query_xy_city_coords=bbox_center[:2], city_name=city_name) dx = tangent_xy[0] * LANE_TANGENT_VECTOR_SCALING dy = tangent_xy[1] * LANE_TANGENT_VECTOR_SCALING ax.arrow(bbox_center[0], bbox_center[1], dx, dy, color="r", width=0.5, zorder=2) else: plot_bbox_2D(ax, bbox_ego_frame, color) cuboid_lidar_pts, _ = filter_point_cloud_to_bbox_2D_vectorized( bbox_ego_frame[:, :2], copy.deepcopy(lidar_pts)) if cuboid_lidar_pts is not None: draw_point_cloud_bev(ax, cuboid_lidar_pts, color) else: logger.info(f"all occluded at {timestamp}") xcenter = city_to_egovehicle_se3.translation[0] ycenter = city_to_egovehicle_se3.translation[1] ax.text(xcenter - 146, ycenter + 50, "ALL OBJECTS OCCLUDED", fontsize=30)
def plot_log_one_at_a_time(self, avm=None, log_num: int = 0): df = self.afl[log_num].seq_df city_name = df["CITY_NAME"].values[0] time_step = self.afl[log_num].agent_traj.shape[0] self.timestamps = list(sorted(set(df["TIMESTAMP"].values.tolist()))) self.log_agent_pose = self.afl[log_num].agent_traj if not self.log_agent_pose.shape[0] == 50: sys.exit("The agent's tacking time is less than 5 seconds!") for i in range(time_step): if i < AGENT_TIME_STEP or i >= time_step - FUTURE_TIME_STEP: continue if self.save_img: file_num = self.filenames[log_num] shard_ind = ceil(file_num / FRAME_IN_SHARD) img_name = "{0}_{1:0>7d}_{2}.png".format( city_name, file_num, i) if (not self.overwrite_rendered_file) and \ Path(f"{self.dataset_dir}../rendered_image/{shard_ind}/{img_name}").exists(): continue [xcenter, ycenter] = self.afl[log_num].agent_traj[i, :] # Figure setting fig = plt.figure(num=1, figsize=(4.3, 4.3), facecolor='k') plt.clf() plt.axis("off") ax = fig.add_subplot(111) ax.set_xlim([-FIELD_VIEW / 2 + xcenter, FIELD_VIEW / 2 + xcenter]) ax.set_ylim([-FIELD_VIEW / 2 + ycenter, FIELD_VIEW / 2 + ycenter]) fig.tight_layout() # Draw map xmin = xcenter - 60 xmax = xcenter + 60 ymin = ycenter - 60 ymax = ycenter + 60 local_lane_polygons = avm.find_local_lane_polygons( [xmin, xmax, ymin, ymax], city_name) local_das = avm.find_local_driveable_areas( [xmin, xmax, ymin, ymax], city_name) # Render and get the log info viz = False if self.save_img: viz = True draw_lane_polygons(ax, local_lane_polygons, color="tab:gray") draw_lane_polygons(ax, local_das, color=(1, 0, 1)) if self.convert_tf_record: # Save the map image # plt.savefig("temp.png", dpi=100, facecolor='k', bbox_inches='tight', pad_inches=0) self.get_map_with_one_channel(local_lane_polygons, local_das, self.log_agent_pose[i, :]) past_traj = self.get_agent_past_traj(ax, i, viz) future_traj = self.get_agent_future_traj(ax, i, viz) center_lines = self.get_candidate_centerlines( ax, self.log_agent_pose[:(AGENT_TIME_STEP + 1), :], avm, city_name, viz) surr_past_pos = self.get_surr_past_traj(ax, df, i, self.log_agent_pose[i, :], viz) if self.save_img: shard_ind = 1 plt.savefig( f"{self.dataset_dir}../rendered_image/{shard_ind}/{img_name}", dpi=100, facecolor='k', bbox_inches='tight', pad_inches=0) return past_traj, future_traj, center_lines, surr_past_pos
def render_bev_labels_mpl( self, city_name: str, ax: plt.Axes, axis: str, # lidar_pts: np.ndarray, local_lane_polygons: np.ndarray, local_das: np.ndarray, log_id: str, timestamp: int, timestamp_ind: int, city_to_egovehicle_se3: SE3, avm: ArgoverseMap, ) -> None: """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes. Args: city_name: The name of a city, e.g. `"PIT"` ax: Matplotlib axes axis: string, either 'ego_axis' or 'city_axis' to demonstrate the lidar_pts: Numpy array of shape (N,3) local_lane_polygons: Polygons representing the local lane set local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3) log_id: The ID of a log timestamp: In nanoseconds city_to_egovehicle_se3: Transformation from egovehicle frame to city frame avm: ArgoverseMap instance """ if axis is not "city_axis": # rendering instead in the egovehicle reference frame for da_idx, local_da in enumerate(local_das): local_da = city_to_egovehicle_se3.inverse_transform_point_cloud( local_da) # local_das[da_idx] = rotate_polygon_about_pt(local_da, city_to_egovehicle_se3.rotation, np.zeros(3)) local_das[da_idx] = local_da for lane_idx, local_lane_polygon in enumerate(local_lane_polygons): local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud( local_lane_polygon) # local_lane_polygons[lane_idx] = rotate_polygon_about_pt( # local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3) # ) local_lane_polygons[lane_idx] = local_lane_polygon draw_lane_polygons(ax, local_lane_polygons, color="tab:gray") draw_lane_polygons(ax, local_das, color=(1, 0, 1)) # render ego vehicle bbox_ego = np.array([[-1.0, -1.0, 0.0], [3.0, -1.0, 0.0], [-1.0, 1.0, 0.0], [3.0, 1.0, 0.0]]) # bbox_ego = rotate_polygon_about_pt(bbox_ego, city_to_egovehicle_se3.rotation, np.zeros((3,))) plot_bbox_2D(ax, bbox_ego, 'g') # render surrounding vehicle and pedestrians objects = self.log_timestamp_dict[log_id][timestamp] hist_objects = {} all_occluded = True for frame_rec in objects: if frame_rec.occlusion_val != IS_OCCLUDED_FLAG: all_occluded = False hist_objects[frame_rec.track_uuid] = [ None for i in range(0, SURR_TIME_STEP + 1) ] hist_objects[ frame_rec.track_uuid][SURR_TIME_STEP] = frame_rec.bbox_city_fr if not all_occluded: for ind, i in enumerate( range(timestamp_ind - SURR_TIME_STEP, timestamp_ind)): objects = self.log_timestamp_dict[log_id][self.timestamps[i]] for frame_rec in objects: if frame_rec.track_uuid in hist_objects: hist_objects[ frame_rec.track_uuid][ind] = frame_rec.bbox_city_fr # render color with decreasing lightness color_lightness = np.linspace(1, 0, SURR_TIME_STEP + 2)[1:].tolist() for track_id in hist_objects: for ind_color, bbox_city_fr in enumerate( hist_objects[track_id]): if bbox_city_fr is None: continue bbox_relative = city_to_egovehicle_se3.inverse_transform_point_cloud( bbox_city_fr) x = bbox_relative[:, 0].tolist() y = bbox_relative[:, 1].tolist() x[1], x[2], x[3], y[1], y[2], y[3] = x[2], x[3], x[1], y[ 2], y[3], y[1] ax.fill(x, y, c=(1, 1, color_lightness[ind_color]), alpha=1, zorder=1) else: logger.info(f"all occluded at {timestamp}") xcenter = city_to_egovehicle_se3.translation[0] ycenter = city_to_egovehicle_se3.translation[1] ax.text(xcenter - 146, ycenter + 50, "ALL OBJECTS OCCLUDED", fontsize=30)