def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) self.is_lead_car = True self.name = "car_0" self.car_to_follow = "car_1" self.udp_multicast = UDPMulticastCommunicator(agent=self, mcast_group="224.1.1.1", mcast_port=5004, threaded=True, update_interval=0.025, name=self.name) self.add_threaded_module(self.udp_multicast) if self.is_lead_car: self.controller = LaneFollowingPID(agent=self) else: self.controller = UDP_PID_CONTROLLER(agent=self, distance_to_keep=1) self.prev_steerings: deque = deque(maxlen=10) # point cloud visualization self.vis = o3d.visualization.Visualizer() self.vis.create_window(width=500, height=500) self.pcd = o3d.geometry.PointCloud() self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( ) self.points_added = False # pointcloud and ground plane detection self.depth2pointcloud = DepthToPointCloudDetector(agent=self) self.max_dist = 1.5 self.height_threshold = 0.5 self.ransac_dist_threshold = 0.01 self.ransac_n = 3 self.ransac_itr = 100 # occupancy map self.scaling_factor = 100 self.occu_map = np.zeros( shape=(math.ceil(self.max_dist * self.scaling_factor), math.ceil(self.max_dist * self.scaling_factor)), dtype=np.float32) self.cx = len(self.occu_map) // 2 self.cz = 0 # modes of the vehicle self.mode = CS249AgentModes.NORMAL self.obstacle_is_at = None # obstacle avoidance hyperparameters self.avoid_throttle = 0.18 self.avoid_left_steer = -1 self.avoid_right_steer = 0.5 self.obstacle_avoid_starting_coord: Optional[Location] = None self.avoid_max_dist = 0.3 self.bypass_dist_dist = 0.5 self.recover_max_dist = 0.1
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) # initialize occupancy grid map content self.occu_map = OccupancyGridMap(agent=self) self.depth_to_pcd = DepthToPointCloudDetector(agent=self) self.ground_plane_detector = GroundPlaneDetector(agent=self) # initialize open3d related content self.vis = o3d.visualization.Visualizer() self.vis.create_window(width=500, height=500) self.pcd = o3d.geometry.PointCloud() self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame() self.points_added = False
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) self.route_file_path = Path(self.agent_settings.waypoint_file_path) self.pid_controller = PIDController(agent=self, steering_boundary=(-1, 1), throttle_boundary=(0, 1)) self.mission_planner = WaypointFollowingMissionPlanner(agent=self) # initiated right after mission plan self.behavior_planner = BehaviorPlanner(agent=self) self.local_planner = SimpleWaypointFollowingLocalPlanner( agent=self, controller=self.pid_controller, mission_planner=self.mission_planner, behavior_planner=self.behavior_planner, closeness_threshold=1) self.occupancy_map = OccupancyGridMap(absolute_maximum_map_size=2000, world_coord_resolution=10, occu_prob=0.9) # 1 m = 100 cm self.add_threaded_module( DepthToPointCloudDetector(agent=self, should_compute_global_pointcloud=True, threaded=True, scale_factor=1000)) # self.gpd = GroundPlaneDetector(self, threaded=True) # self.add_threaded_module(self.gpd) self.obstacle_detector = ObstacleDetector(self, threaded=True) self.add_threaded_module(self.obstacle_detector)
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) self.prev_steerings: deque = deque(maxlen=10) self.agent_settings.pid_config_file_path = (Path(self.agent_settings.pid_config_file_path).parent / "iOS_pid_config.json").as_posix() self.controller = ImageBasedPIDController(agent=self) # START LOC self.start_loc: Optional[Transform] = None self.start_loc_bound: float = 0.2 self.has_exited_start_loc: bool = False # STOP Mid step self.ip_addr = "10.0.0.2" # Waypoint Following self.waypoints: List[Transform] = [] self.curr_waypoint_index = 0 self.closeness_threshold = 0.4 # occupancy grid map # point cloud visualization # self.vis = o3d.visualization.Visualizer() # self.vis.create_window(width=500, height=500) # self.pcd = o3d.geometry.PointCloud() # self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame() # self.points_added = False # pointcloud and ground plane detection self.depth2pointcloud = DepthToPointCloudDetector(agent=self) self.max_dist = 1.5 self.height_threshold = 1 self.ransac_dist_threshold = 0.01 self.ransac_n = 3 self.ransac_itr = 100 self.waypoint_map: Optional[Map] = None buffer = 10 x_scale = 20 y_scale = 20 x_offset = 100 y_offset = 100 self.occu_map = Map( x_offset=x_offset, y_offset=y_offset, x_scale=x_scale, y_scale=y_scale, x_width=2500, y_height=2500, buffer=10, name="occupancy map" ) self.m = np.zeros(shape=(self.occu_map.map.shape[0], self.occu_map.map.shape[1], 3))
class OccupancyMapAgent(Agent): def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) # initialize occupancy grid map content self.occu_map = OccupancyGridMap(agent=self) self.depth_to_pcd = DepthToPointCloudDetector(agent=self) self.ground_plane_detector = GroundPlaneDetector(agent=self) # initialize open3d related content self.vis = o3d.visualization.Visualizer() self.vis.create_window(width=500, height=500) self.pcd = o3d.geometry.PointCloud() self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame() self.points_added = False def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(OccupancyMapAgent, self).run_step(sensors_data, vehicle) if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None: depth_img = self.front_depth_camera.data.copy() pcd = self.depth_to_pcd.run_in_series(depth_image=depth_img) self.non_blocking_pcd_visualization(pcd=pcd, should_center=True, should_show_axis=True, axis_size=1) return self.vehicle.control def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud, should_center=False, should_show_axis=False, axis_size: float = 0.1): points = np.asarray(pcd.points) colors = np.asarray(pcd.colors) if should_center: points = points - np.mean(points, axis=0) if self.points_added is False: self.pcd = o3d.geometry.PointCloud() self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size, origin=np.mean(points, axis=0)) self.vis.add_geometry(self.coordinate_frame) self.vis.add_geometry(self.pcd) self.points_added = True else: # print(np.shape(np.vstack((np.asarray(self.pcd.points), points)))) self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size, origin=np.mean(points, axis=0)) self.vis.update_geometry(self.coordinate_frame) self.vis.update_geometry(self.pcd) self.vis.poll_events() self.vis.update_renderer()
def __init__(self, **kwargs): super(PointCloudRecordingAgent, self).__init__(**kwargs) """ self.gp_pointcloud_detector = GroundPlanePointCloudDetector(agent=self, max_points_to_convert=10000, nb_neighbors=100, std_ratio=1) """ self.depth_to_point_cloud_detector = DepthToPointCloudDetector( agent=self) self.pointcloud_output_folder_path = self.output_folder_path / "pointcloud_roar_sim" / "pointcloud" self.pointcloud_rgb_output_folder_path = self.output_folder_path / "pointcloud_roar_sim" / "rgb" if self.pointcloud_output_folder_path.exists() is False: self.pointcloud_output_folder_path.mkdir(parents=True, exist_ok=True) if self.pointcloud_rgb_output_folder_path.exists() is False: self.pointcloud_rgb_output_folder_path.mkdir(parents=True, exist_ok=True)
class PointCloudRecordingAgent(Agent): def __init__(self, **kwargs): super(PointCloudRecordingAgent, self).__init__(**kwargs) """ self.gp_pointcloud_detector = GroundPlanePointCloudDetector(agent=self, max_points_to_convert=10000, nb_neighbors=100, std_ratio=1) """ self.depth_to_point_cloud_detector = DepthToPointCloudDetector( agent=self) self.pointcloud_output_folder_path = self.output_folder_path / "pointcloud_roar_sim" / "pointcloud" self.pointcloud_rgb_output_folder_path = self.output_folder_path / "pointcloud_roar_sim" / "rgb" if self.pointcloud_output_folder_path.exists() is False: self.pointcloud_output_folder_path.mkdir(parents=True, exist_ok=True) if self.pointcloud_rgb_output_folder_path.exists() is False: self.pointcloud_rgb_output_folder_path.mkdir(parents=True, exist_ok=True) def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(PointCloudRecordingAgent, self).run_step(sensors_data, vehicle) try: if self.front_rgb_camera.data is not None: points = self.depth_to_point_cloud_detector.run_in_series( ) # (N x 3) output_file_path = self.pointcloud_output_folder_path / f"{self.time_counter}.npy" np.save(output_file_path, arr=points) # cv2.imshow("rgb", self.front_rgb_camera.data) # cv2.waitKey(1) cv2.imwrite((self.pointcloud_rgb_output_folder_path / f"{self.time_counter}.png").as_posix(), self.front_rgb_camera.data) except Exception as e: self.logger.error(f"Point cloud RunStep Error: {e}") finally: return VehicleControl()
class CS249Agent(Agent): def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) self.is_lead_car = True self.name = "car_0" self.car_to_follow = "car_1" self.udp_multicast = UDPMulticastCommunicator(agent=self, mcast_group="224.1.1.1", mcast_port=5004, threaded=True, update_interval=0.025, name=self.name) self.add_threaded_module(self.udp_multicast) if self.is_lead_car: self.controller = LaneFollowingPID(agent=self) else: self.controller = UDP_PID_CONTROLLER(agent=self, distance_to_keep=1) self.prev_steerings: deque = deque(maxlen=10) # point cloud visualization self.vis = o3d.visualization.Visualizer() self.vis.create_window(width=500, height=500) self.pcd = o3d.geometry.PointCloud() self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( ) self.points_added = False # pointcloud and ground plane detection self.depth2pointcloud = DepthToPointCloudDetector(agent=self) self.max_dist = 1.5 self.height_threshold = 0.5 self.ransac_dist_threshold = 0.01 self.ransac_n = 3 self.ransac_itr = 100 # occupancy map self.scaling_factor = 100 self.occu_map = np.zeros( shape=(math.ceil(self.max_dist * self.scaling_factor), math.ceil(self.max_dist * self.scaling_factor)), dtype=np.float32) self.cx = len(self.occu_map) // 2 self.cz = 0 # modes of the vehicle self.mode = CS249AgentModes.NORMAL self.obstacle_is_at = None # obstacle avoidance hyperparameters self.avoid_throttle = 0.18 self.avoid_left_steer = -1 self.avoid_right_steer = 0.5 self.obstacle_avoid_starting_coord: Optional[Location] = None self.avoid_max_dist = 0.3 self.bypass_dist_dist = 0.5 self.recover_max_dist = 0.1 def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) # if no obstacle detected if self.is_lead_car: return self.lead_car_step() else: return self.follower_step() def avoid_obstacle(self): if self.obstacle_is_at is not None: curr_coord = self.vehicle.transform.location.copy() if curr_coord.distance( self.obstacle_avoid_starting_coord) < self.avoid_max_dist: if self.obstacle_is_at == ObstacleEnum.LEFT: self.logger.info("Avoiding obstacle --> TURING RIGHT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_right_steer, brake=False) else: self.logger.info("Avoiding obstacle --> TURING LEFT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_left_steer, brake=False) else: self.logger.info( "Avoiding obstacle --> DONE! shifting to obstacle bypass") self.mode = CS249AgentModes.OBSTACLE_BYPASS self.obstacle_avoid_starting_coord = self.vehicle.transform.location.copy( ) return VehicleControl() else: self.logger.error("No obstacle to avoid") return VehicleControl(brake=True) def bypass_obstacle(self): if self.obstacle_is_at is not None: curr_coord = self.vehicle.transform.location.copy() if curr_coord.distance(self.obstacle_avoid_starting_coord ) < self.bypass_dist_dist: if self.obstacle_is_at == ObstacleEnum.LEFT: self.logger.info("BYPASSING --> TURING LEFT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_left_steer, brake=False) else: self.logger.info("BYPASSING --> TURING RIGHT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_right_steer, brake=False) else: self.logger.info( "BYPASSING --> DONE! Shifting to recover mode") self.mode = CS249AgentModes.OBSTACLE_RECOVER self.obstacle_avoid_starting_coord = self.vehicle.transform.location.copy( ) return VehicleControl() else: self.logger.error("No obstacle to bypass") return VehicleControl(brake=True) def recover_obstacle(self): if self.obstacle_is_at is not None: curr_coord = self.vehicle.transform.location.copy() if curr_coord.distance(self.obstacle_avoid_starting_coord ) < self.recover_max_dist: if self.obstacle_is_at == ObstacleEnum.LEFT: self.logger.info("Recovering --> TURING RIGHT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_right_steer, brake=False) else: self.logger.info("Recovering --> TURING LEFT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_left_steer, brake=False) else: self.logger.info( "Recovering --> DONE! Shifting to normal state") self.mode = CS249AgentModes.NORMAL self.obstacle_is_at = None self.obstacle_avoid_starting_coord = None return VehicleControl() else: self.logger.error("No obstacle to recover from") return VehicleControl(brake=True) def find_obstacles_via_depth_to_pcd(self, debug=False): pcd: o3d.geometry.PointCloud = self.depth2pointcloud.run_in_series( self.front_depth_camera.data, self.front_rgb_camera.data) if debug: self.non_blocking_pcd_visualization(pcd=pcd, axis_size=1, should_show_axis=False) pcd = self.filter_ground( pcd=pcd, max_dist=self.max_dist, height_threshold=self.height_threshold, ransac_dist_threshold=self.ransac_dist_threshold, ransac_n=self.ransac_n, ransac_itr=self.ransac_itr) occu_map = self.occu_map_from_pcd(pcd=pcd, scaling_factor=self.scaling_factor, cx=self.cx, cz=self.cz) left, center, right = self.find_obstacle_l_c_r(occu_map=occu_map, debug=debug) return left, center, right def find_obstacle_l_c_r( self, occu_map, left_occ_thresh=0.5, center_occ_thresh=0.5, right_occ_thresh=0.5, debug=False ) -> Tuple[Tuple[bool, float], Tuple[bool, float], Tuple[bool, float]]: """ Given an occupancy map `occu_map`, find whether in `left`, `center`, `right` which is/are occupied and also its probability for occupied. Args: occu_map: occupancy map left_occ_thresh: threshold occupied --> lower ==> more likely to be occupied center_occ_thresh: threshold occupied --> lower ==> more likely to be occupied right_occ_thresh: threshold occupied --> lower ==> more likely to be occupied debug: if true, draw out where the algorithm is looking at Returns: there tuples of bool and float representing occupied or not and its relative probability. """ backtorgb = cv2.cvtColor(occu_map, cv2.COLOR_GRAY2RGB) height, width, channel = backtorgb.shape left_rec_start = (30 * width // 100, 40 * height // 100) left_rec_end = (50 * width // 100, 45 * height // 100) mid_rec_start = (30 * width // 100, 48 * height // 100) mid_rec_end = (50 * width // 100, 52 * height // 100) right_rec_start = (30 * width // 100, 55 * height // 100) right_rec_end = (50 * width // 100, 60 * height // 100) right = self.is_occupied(m=occu_map, start=right_rec_start, end=right_rec_end, threshold=left_occ_thresh) center = self.is_occupied(m=occu_map, start=mid_rec_start, end=mid_rec_end, threshold=center_occ_thresh) left = self.is_occupied(m=occu_map, start=left_rec_start, end=left_rec_end, threshold=right_occ_thresh) if debug: backtorgb = cv2.rectangle(backtorgb, left_rec_start, left_rec_end, (0, 0, 255), 1) backtorgb = cv2.rectangle(backtorgb, mid_rec_start, mid_rec_end, (0, 255, 0), 1) backtorgb = cv2.rectangle(backtorgb, right_rec_start, right_rec_end, (255, 0, 0), 1) cv2.imshow("Occupancy Map", backtorgb) cv2.waitKey(1) return left, center, right @staticmethod def is_occupied(m, start, end, threshold) -> Tuple[bool, float]: """ Return the whether the area in `m` specified with `start` and `end` is occupied or not based on a ratio threshold. If the number of free spots / total area is less than threshold, then it means that this place is probability occupied. Args: m: 2D numpy array of occupancy map (free map to be exact) start: starting bounding box end: ending bounding box threshold: ratio to determine free or not Returns: bool -> true if occupied, false if free. """ cropped = m[start[1]:end[1], start[0]:end[0]] area = (end[1] - start[1]) * (end[0] - start[0]) spots_free = (cropped > 0.8).sum() ratio = round(spots_free / area, 3) return ratio < threshold, ratio # if spots_free/area < threshold, then this place is occupied def occu_map_from_pcd(self, pcd: o3d.geometry.PointCloud, scaling_factor, cx, cz) -> np.ndarray: """ Convert point cloud to occupancy map by first doing an affine transformation, then use the log odd update for updating the occupancy map Note that this method will update self.occu_map as well as returning the updated occupancy map. Args: pcd: point cloud scaling_factor: scaling factor for the affine transformation from pcd to occupancy map cx: x-axis constant for the affine transformation from pcd to occupancy map cz: z-axis constant for the affine transformation from pcd to occupancy map Returns: the updated occupancy map """ points = np.asarray(pcd.points) points *= scaling_factor points = points.astype(int) points[:, 0] += cx points[:, 2] += cz np.clip(points, 0, len(self.occu_map)) # points that i see that are too far away self.occu_map -= 0.05 self.occu_map[points[:, 0], points[:, 2]] += 0.9 # ground self.occu_map = self.occu_map.clip(0, 1) # kernel = np.ones((2, 2), np.uint8) # self.occu_map = cv2.morphologyEx(self.occu_map, cv2.MORPH_OPEN, kernel) # erosion followed by dilation # self.occu_map = cv2.dilate(self.occu_map, kernel, iterations=2) # to further filter out some noise return self.occu_map def filter_ground(self, pcd: o3d.geometry.PointCloud, max_dist: float = 2, height_threshold=0.5, ransac_dist_threshold=0.01, ransac_n=3, ransac_itr=100) -> o3d.geometry.PointCloud: """ Find ground from point cloud by first selecting points that are below the (car's position + a certain threshold) Then it will take only the points that are less than `max_dist` distance away Then do RANSAC on the resulting point cloud. Note that this function assumes that the ground will be the largest plane seen after filtering out everything above the vehicle Args: pcd: point cloud to be parsed max_dist: maximum distance height_threshold: additional height padding ransac_dist_threshold: RANSAC distance threshold ransac_n: RANSAC starting number of points ransac_itr: RANSAC number of iterations Returns: point cloud that only has the ground. """ points = np.asarray(pcd.points) colors = np.asarray(pcd.colors) points_of_interest = np.where( (points[:, 1] < self.vehicle.transform.location.y + height_threshold) & (points[:, 2] < max_dist)) points = points[points_of_interest] colors = colors[points_of_interest] pcd.points = o3d.utility.Vector3dVector(points) pcd.colors = o3d.utility.Vector3dVector(colors) plane_model, inliers = pcd.segment_plane( distance_threshold=ransac_dist_threshold, ransac_n=ransac_n, num_iterations=ransac_itr) pcd = pcd.select_by_index(inliers) return pcd def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud, should_center=False, should_show_axis=False, axis_size: float = 1): """ Real time point cloud visualization. Args: pcd: point cloud to be visualized should_center: true to always center the point cloud should_show_axis: true to show axis axis_size: adjust axis size Returns: None """ points = np.asarray(pcd.points) colors = np.asarray(pcd.colors) if should_center: points = points - np.mean(points, axis=0) if self.points_added is False: self.pcd = o3d.geometry.PointCloud() self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( size=axis_size, origin=np.mean(points, axis=0)) self.vis.add_geometry(self.coordinate_frame) self.vis.add_geometry(self.pcd) self.points_added = True else: # print(np.shape(np.vstack((np.asarray(self.pcd.points), points)))) self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( size=axis_size, origin=np.mean(points, axis=0)) self.vis.update_geometry(self.coordinate_frame) self.vis.update_geometry(self.pcd) self.vis.poll_events() self.vis.update_renderer() def lead_car_step(self): if self.time_counter % 10 == 0: self.udp_multicast.send_current_state() if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None: left, center, right = self.find_obstacles_via_depth_to_pcd( debug=True) if left[0] or center[0] or right[0] or self.mode == CS249AgentModes.OBSTACLE_AVOID or \ self.mode == CS249AgentModes.OBSTACLE_RECOVER or self.mode == CS249AgentModes.OBSTACLE_BYPASS: # self.logger.info(f"Braking due to obstacle: left: {left} | center: {center} | right: {right}") # return VehicleControl(brake=True) self.logger.info(f"Avoiding obstacle") return self.act_on_obstacle(left, center, right) if self.is_light_found(rgb_data=self.front_rgb_camera.data.copy(), debug=True): self.logger.info("Braking due to traffic light") return VehicleControl(brake=True) error = self.find_error() if error is None: return self.no_line_seen() else: self.kwargs["lat_error"] = error self.vehicle.control = self.controller.run_in_series( next_waypoint=None) self.prev_steerings.append(self.vehicle.control.steering) return self.vehicle.control else: # image feed is not available yet return VehicleControl() def follower_step(self): if self.time_counter % 10 == 0: self.udp_multicast.send_current_state() # location x, y, z; rotation roll, pitch, yaw; velocity x, y, z; acceleration x, y, z if self.udp_multicast.msg_log.get(self.car_to_follow, None) is not None: control = self.controller.run_in_series( next_waypoint=Transform.from_array(self.udp_multicast.msg_log[ self.car_to_follow])) if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None: left, center, right = self.find_obstacles_via_depth_to_pcd() obstacle_detected = left[0] or center[0] or right[0] if obstacle_detected and Transform.from_array( self.udp_multicast.msg_log[self.car_to_follow] ).location.distance(self.vehicle.transform.location ) > self.controller.distance_to_keep: # if obstacle is detected and the obstacle is not the following vehicle, execute avoid sequence control = self.act_on_obstacle(left, center, right) return control return control else: # self.logger.info("No other cars found") return VehicleControl(throttle=0, steering=0) def act_on_obstacle(self, left, center, right): # if obstacle is detected, deal with obstacle first if left[0] and center[0] and right[0]: self.logger.info("Can't find feasible path around obstacle") return VehicleControl(brake=True) # if there is a way to avoid it or i am already avoiding it if (left[0] or center[0] or right[0]) and \ (self.mode != CS249AgentModes.OBSTACLE_AVOID and self.mode != CS249AgentModes.OBSTACLE_BYPASS and self.mode != CS249AgentModes.OBSTACLE_RECOVER): self.mode = CS249AgentModes.OBSTACLE_AVOID self.obstacle_is_at = ObstacleEnum.LEFT if left[ 0] else ObstacleEnum.RIGHT self.obstacle_avoid_starting_coord = self.vehicle.transform.location.copy( ) return VehicleControl() elif self.mode == CS249AgentModes.OBSTACLE_AVOID or self.mode == CS249AgentModes.OBSTACLE_RECOVER \ or self.mode == CS249AgentModes.OBSTACLE_BYPASS: if self.mode == CS249AgentModes.OBSTACLE_AVOID: return self.avoid_obstacle() elif self.mode == CS249AgentModes.OBSTACLE_BYPASS: return self.bypass_obstacle() elif self.mode == CS249AgentModes.OBSTACLE_RECOVER: return self.recover_obstacle() else: return VehicleControl(brake=True) return VehicleControl() def is_light_found(self, rgb_data, low=(200, 200, 0), high=(255, 255, 100), n=500, debug=False) -> bool: """ Find if there's light depending on if the image has n points that is within `low` and `high` range Args: n: minimum number of pixels to register as light found high: high range in the format of BGR low: low range in the format of BGR debug: True to show image Returns: True if light is found, False otherwise """ mask = cv2.inRange(rgb_data, low, high) if debug: cv2.imshow("traffic light", mask) if (mask > 0).sum() > n: return True return False def no_line_seen(self): # did not see the line neutral = -90 incline = self.vehicle.transform.rotation.pitch - neutral if incline < -10: # is down slope, execute previous command as-is # get the PID for downhill long_control = self.controller.long_pid_control() self.vehicle.control.throttle = long_control return self.vehicle.control else: # is flat or up slope, execute adjusted previous command return self.execute_prev_command() def obstacle_found(self, threshold=0.468, debug=False) -> bool: """ find obstacle by interpreting the depth image directly -- if in an area of interest, the area's average depth is smaller than threshold, then it means that there's probably something that is blocking the view and thus register as obstacle Args: threshold: minimum threshold to detect obstacle debug: true to show image Returns: True if obstacle is detected, false otherwise """ if self.front_depth_camera.data is not None: depth_data = self.front_depth_camera.data roi = depth_data[70 * depth_data.shape[0] // 100:90 * depth_data.shape[0] // 100, 30 * depth_data.shape[1] // 100:60 * depth_data.shape[1] // 100] dist = np.average(roi) if debug: cv2.imshow("roi", roi) # cv2.imshow("depth", depth_data) print("distance to obstacle avg = ", dist) return dist < threshold else: return False def find_error(self): # make rgb and depth into the same shape data: np.ndarray = cv2.resize(self.front_rgb_camera.data.copy(), dsize=(192, 256)) # cv2.imshow("rgb_mask", cv2.inRange(data, self.rgb_lower_range, self.rgb_upper_range)) data = self.rgb2ycbcr(data) # cv2.imshow("ycbcr_mask", cv2.inRange(data, self.ycbcr_lower_range, self.ycbcr_upper_range)) # find the lane error_at_10 = self.find_error_at(data=data, y_offset=10, error_scaling=[(20, 0.1), (40, 0.75), (60, 0.8), (80, 0.9), (100, 0.95), (200, 1)]) error_at_50 = self.find_error_at(data=data, y_offset=50, error_scaling=[(20, 0.2), (40, 0.4), (60, 0.7), (70, 0.7), (80, 0.7), (100, 0.8), (200, 0.8)]) if error_at_10 is None and error_at_50 is None: return None # we only want to follow the furthest thing we see. error = 0 if error_at_10 is not None: error = error_at_10 if error_at_50 is not None: error = error_at_50 return error def find_error_at(self, data, y_offset, error_scaling) -> Optional[float]: y = data.shape[0] - y_offset lane_x = [] # cv2.imshow("data", data) # mask_red = cv2.inRange(src=data, lowerb=(0, 150, 60), upperb=(250, 240, 140)) # TERRACE RED # mask_yellow = cv2.inRange(src=data, lowerb=(0, 130, 0), upperb=(250, 200, 110)) # TERRACE YELLOW mask_red = cv2.inRange(src=data, lowerb=(0, 180, 60), upperb=(250, 240, 140)) # CORY 337 RED mask_yellow = cv2.inRange(src=data, lowerb=(0, 140, 0), upperb=(250, 200, 80)) # CORY 337 YELLOW # mask = mask_yellow mask = mask_red | mask_yellow # cv2.imshow("Lane Mask (Red)", mask_red) # cv2.imshow("Lane Mask (Yellow)", mask_yellow) kernel = np.ones((5, 5), np.uint8) mask = cv2.erode(mask, kernel, iterations=1) mask = cv2.dilate(mask, kernel, iterations=1) cv2.imshow("Lane Mask (Yellow + Red)", mask) for x in range(0, data.shape[1], 5): if mask[y][x] > 0: lane_x.append(x) if len(lane_x) == 0: return None # if lane is found avg_x = int(np.average(lane_x)) # find error center_x = data.shape[1] // 2 error = avg_x - center_x # we want small error to be almost ignored, only big errors matter. for e, scale in error_scaling: if abs(error) <= e: # print(f"Error at {y_offset} -> {error, scale} -> {error * scale}") error = error * scale break return error def execute_prev_command(self): # no lane found, execute the previous control with a decaying factor if np.average(self.prev_steerings) < 0: self.vehicle.control.steering = -1 else: self.vehicle.control.steering = 1 # self.logger.info("Cannot see line, executing prev cmd") self.prev_steerings.append(self.vehicle.control.steering) self.vehicle.control.throttle = self.controller.long_pid_control() # self.logger.info(f"No Lane found, executing discounted prev command: {self.vehicle.control}") return self.vehicle.control def rgb2ycbcr(self, im): xform = np.array([[.299, .587, .114], [-.1687, -.3313, .5], [.5, -.4187, -.0813]]) ycbcr = im.dot(xform.T) ycbcr[:, :, [1, 2]] += 128 return np.uint8(ycbcr)
class PointcloudRecordingAgent(Agent): def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs): super().__init__(vehicle, agent_settings, **kwargs) self.prev_steerings: deque = deque(maxlen=10) self.agent_settings.pid_config_file_path = (Path(self.agent_settings.pid_config_file_path).parent / "iOS_pid_config.json").as_posix() self.controller = ImageBasedPIDController(agent=self) # START LOC self.start_loc: Optional[Transform] = None self.start_loc_bound: float = 0.2 self.has_exited_start_loc: bool = False # STOP Mid step self.ip_addr = "10.0.0.2" # Waypoint Following self.waypoints: List[Transform] = [] self.curr_waypoint_index = 0 self.closeness_threshold = 0.4 # occupancy grid map # point cloud visualization # self.vis = o3d.visualization.Visualizer() # self.vis.create_window(width=500, height=500) # self.pcd = o3d.geometry.PointCloud() # self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame() # self.points_added = False # pointcloud and ground plane detection self.depth2pointcloud = DepthToPointCloudDetector(agent=self) self.max_dist = 1.5 self.height_threshold = 1 self.ransac_dist_threshold = 0.01 self.ransac_n = 3 self.ransac_itr = 100 self.waypoint_map: Optional[Map] = None buffer = 10 x_scale = 20 y_scale = 20 x_offset = 100 y_offset = 100 self.occu_map = Map( x_offset=x_offset, y_offset=y_offset, x_scale=x_scale, y_scale=y_scale, x_width=2500, y_height=2500, buffer=10, name="occupancy map" ) self.m = np.zeros(shape=(self.occu_map.map.shape[0], self.occu_map.map.shape[1], 3)) def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(PointcloudRecordingAgent, self).run_step(sensors_data, vehicle) if self.front_rgb_camera.data is not None and self.front_depth_camera.data is not None: self.prev_steerings.append(self.vehicle.control.steering) try: pcd: o3d.geometry.PointCloud = self.depth2pointcloud.run_in_series(self.front_depth_camera.data, self.front_rgb_camera.data) folder_name = Path("./data/pointcloud") folder_name.mkdir(parents=True, exist_ok=True) o3d.io.write_point_cloud((folder_name / f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S_%f')}.pcd").as_posix(), pcd, print_progress=True) pcd = self.filter_ground(pcd) points = np.asarray(pcd.points) new_points = np.copy(points) points = np.vstack([new_points[:, 0], new_points[:, 2]]).T self.occu_map.update(points, val=1) coord = self.occu_map.world_loc_to_occu_map_coord(loc=self.vehicle.transform.location) self.m[np.where(self.occu_map.map == 1)] = [255, 255, 255] self.m[coord[1] - 2:coord[1] + 2, coord[0] - 2:coord[0] + 2] = [0, 0, 255] cv2.imshow("m", self.m) except Exception as e: print(e) return VehicleControl() @staticmethod def load_data(file_path: str) -> List[Transform]: waypoints = [] f = Path(file_path).open('r') for line in f.readlines(): x, y, z = line.split(",") x, y, z = float(x), float(y), float(z) l = Location(x=x, y=y, z=z) waypoints.append(Transform(location=l)) return waypoints def filter_ground(self, pcd: o3d.geometry.PointCloud, max_dist: float = -1, height_threshold=0.1, ransac_dist_threshold=0.01, ransac_n=3, ransac_itr=100) -> o3d.geometry.PointCloud: """ Find ground from point cloud by first selecting points that are below the (car's position + a certain threshold) Then it will take only the points that are less than `max_dist` distance away Then do RANSAC on the resulting point cloud. Note that this function assumes that the ground will be the largest plane seen after filtering out everything above the vehicle Args: pcd: point cloud to be parsed max_dist: maximum distance height_threshold: additional height padding ransac_dist_threshold: RANSAC distance threshold ransac_n: RANSAC starting number of points ransac_itr: RANSAC number of iterations Returns: point cloud that only has the ground. """ points = np.asarray(pcd.points) colors = np.asarray(pcd.colors) # height and distance filter # 0 -> left and right | 1 -> up and down | 2 = close and far points_of_interest = np.where((points[:, 1] < 0.3)) points = points[points_of_interest] colors = colors[points_of_interest] pcd.points = o3d.utility.Vector3dVector(points) pcd.colors = o3d.utility.Vector3dVector(colors) plane_model, inliers = pcd.segment_plane(distance_threshold=ransac_dist_threshold, ransac_n=ransac_n, num_iterations=ransac_itr) pcd: o3d.geometry.PointCloud = pcd.select_by_index(inliers) pcd = pcd.voxel_down_sample(0.01) return pcd def waypoint_visualize(self, map_data: np.ndarray, name: str = "waypoint_visualization", car_location: Optional[Location] = None, next_waypoint_location: Optional[Location] = None): m = np.zeros(shape=(map_data.shape[0], map_data.shape[1], 3)) m[np.where(map_data > 0.9)] = [255, 255, 255] point_size = 2 if car_location is not None: coord = self.waypoint_map.world_loc_to_occu_map_coord(car_location) m[coord[1] - point_size:coord[1] + point_size, coord[0] - point_size:coord[0] + point_size] = [0, 0, 255] if next_waypoint_location is not None: coord = self.waypoint_map.world_loc_to_occu_map_coord(next_waypoint_location) m[coord[1] - point_size:coord[1] + point_size, coord[0] - point_size:coord[0] + point_size] = [0, 255, 0] cv2.imshow(name, m) cv2.waitKey(1) """ Lane Following """ def find_error(self): # make rgb and depth into the same shape data: np.ndarray = cv2.resize(self.front_rgb_camera.data.copy(), dsize=(192, 256)) # cv2.imshow("rgb_mask", cv2.inRange(data, self.rgb_lower_range, self.rgb_upper_range)) data = self.rgb2ycbcr(data) # cv2.imshow("ycbcr_mask", cv2.inRange(data, self.ycbcr_lower_range, self.ycbcr_upper_range)) # find the lane error_at_10 = self.find_error_at(data=data, y_offset=10, error_scaling=[ (20, 0.1), (40, 0.75), (60, 0.8), (80, 0.9), (100, 0.95), (200, 1) ]) error_at_50 = self.find_error_at(data=data, y_offset=50, error_scaling=[ (20, 0.2), (40, 0.4), (60, 0.7), (70, 0.7), (80, 0.7), (100, 0.8), (200, 0.8) ] ) if error_at_10 is None and error_at_50 is None: return None # we only want to follow the furthest thing we see. error = 0 if error_at_10 is not None: error = error_at_10 if error_at_50 is not None: error = error_at_50 return error def find_error_at(self, data, y_offset, error_scaling) -> Optional[float]: y = data.shape[0] - y_offset lane_x = [] cv2.imshow("data", data) # mask_red = cv2.inRange(src=data, lowerb=(0, 150, 60), upperb=(250, 240, 140)) # TERRACE RED # mask_yellow = cv2.inRange(src=data, lowerb=(0, 130, 0), upperb=(250, 200, 110)) # TERRACE YELLOW # mask_red = cv2.inRange(src=data, lowerb=(0, 180, 60), upperb=(250, 240, 140)) # CORY 337 RED # mask_yellow = cv2.inRange(src=data, lowerb=(0, 140, 0), upperb=(250, 200, 80)) # CORY 337 YELLOW mask_blue = cv2.inRange(src=data, lowerb=(60, 70, 120), upperb=(170, 130, 255)) # SHUWEI BLUE mask = mask_blue # mask = mask_red | mask_yellow # cv2.imshow("Lane Mask (Red)", mask_red) # cv2.imshow("Lane Mask (Yellow)", mask_yellow) kernel = np.ones((5, 5), np.uint8) mask = cv2.erode(mask, kernel, iterations=1) mask = cv2.dilate(mask, kernel, iterations=1) cv2.imshow("Lane Mask (mask_blue)", mask) for x in range(0, data.shape[1], 5): if mask[y][x] > 0: lane_x.append(x) if len(lane_x) == 0: return None # if lane is found avg_x = int(np.average(lane_x)) # find error center_x = data.shape[1] // 2 error = avg_x - center_x # we want small error to be almost ignored, only big errors matter. for e, scale in error_scaling: if abs(error) <= e: # print(f"Error at {y_offset} -> {error, scale} -> {error * scale}") error = error * scale break return error def execute_prev_command(self): # no lane found, execute the previous control with a decaying factor if np.average(self.prev_steerings) < 0: self.vehicle.control.steering = -1 else: self.vehicle.control.steering = 1 # self.logger.info("Cannot see line, executing prev cmd") self.prev_steerings.append(self.vehicle.control.steering) self.vehicle.control.throttle = self.controller.long_pid_control() # self.logger.info(f"No Lane found, executing discounted prev command: {self.vehicle.control}") return self.vehicle.control def rgb2ycbcr(self, im): xform = np.array([[.299, .587, .114], [-.1687, -.3313, .5], [.5, -.4187, -.0813]]) ycbcr = im.dot(xform.T) ycbcr[:, :, [1, 2]] += 128 return np.uint8(ycbcr) def no_line_seen(self): # did not see the line neutral = -90 incline = self.vehicle.transform.rotation.pitch - neutral if incline < -10: # is down slope, execute previous command as-is # get the PID for downhill long_control = self.controller.long_pid_control() self.vehicle.control.throttle = long_control return self.vehicle.control else: # is flat or up slope, execute adjusted previous command return self.execute_prev_command() def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud, should_center=False, should_show_axis=False, axis_size: float = 1): """ Real time point cloud visualization. Args: pcd: point cloud to be visualized should_center: true to always center the point cloud should_show_axis: true to show axis axis_size: adjust axis size Returns: None """ points = np.asarray(pcd.points) colors = np.asarray(pcd.colors) if should_center: points = points - np.mean(points, axis=0) if self.points_added is False: self.pcd = o3d.geometry.PointCloud() self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size, origin=np.mean(points, axis=0)) self.vis.add_geometry(self.coordinate_frame) self.vis.add_geometry(self.pcd) self.points_added = True else: # print(np.shape(np.vstack((np.asarray(self.pcd.points), points)))) self.pcd.points = o3d.utility.Vector3dVector(points) self.pcd.colors = o3d.utility.Vector3dVector(colors) if should_show_axis: self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size, origin=np.mean(points, axis=0)) self.vis.update_geometry(self.coordinate_frame) self.vis.update_geometry(self.pcd) self.vis.poll_events() self.vis.update_renderer()