def create_camera_setups(track): """Creates different camera setups depending on the track.""" camera_setups = {} transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) center_camera_setup = RGBCameraSetup(CENTER_CAMERA_NAME, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, 90) camera_setups[CENTER_CAMERA_NAME] = center_camera_setup tl_camera_setup = RGBCameraSetup(TL_CAMERA_NAME, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, 45) camera_setups[TL_CAMERA_NAME] = tl_camera_setup left_camera_setup = None right_camera_setup = None # Add left and right cameras if we don't have access to lidar. if track == Track.CAMERAS: left_location = CENTER_CAMERA_LOCATION + pylot.utils.Location( 0, -FLAGS.offset_left_right_cameras, 0) left_camera_setup = RGBCameraSetup( LEFT_CAMERA_NAME, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, pylot.utils.Transform(left_location, pylot.utils.Rotation()), 90) camera_setups[LEFT_CAMERA_NAME] = left_camera_setup right_location = CENTER_CAMERA_LOCATION + pylot.utils.Location( 0, FLAGS.offset_left_right_cameras, 0) right_camera_setup = RGBCameraSetup( RIGHT_CAMERA_NAME, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, pylot.utils.Transform(right_location, pylot.utils.Rotation()), 90) camera_setups[RIGHT_CAMERA_NAME] = right_camera_setup return camera_setups
def log_bounding_boxes(simulator_image, depth_msg, segmented_frame, traffic_lights, tl_color, speed_signs, stop_signs, weather, town): game_time = int(simulator_image.timestamp * 1000) print("Processing game time {} in {} with weather {}".format( game_time, town, weather)) transform = pylot.utils.Transform.from_simulator_transform( simulator_image.transform) camera_setup = RGBCameraSetup("rgb_camera", FLAGS.frame_width, FLAGS.camera_height, transform, FLAGS.camera_fov) frame = CameraFrame.from_simulator_frame(simulator_image, camera_setup) _, world = get_world() town_name = world.get_map().name speed_limit_det_obstacles = [] if speed_signs: speed_limit_det_obstacles = \ pylot.simulation.utils.get_detected_speed_limits( speed_signs, depth_msg.frame, segmented_frame) traffic_stop_det_obstacles = [] if stop_signs: traffic_stop_det_obstacles = \ pylot.simulation.utils.get_detected_traffic_stops( stop_signs, depth_msg.frame) traffic_light_det_obstacles = [] if traffic_lights: traffic_light_det_obstacles = get_traffic_light_obstacles( traffic_lights, depth_msg.frame, segmented_frame, tl_color, town_name) det_obstacles = (speed_limit_det_obstacles + traffic_stop_det_obstacles + traffic_light_det_obstacles) # Log the frame. file_name = '{}signs-{}_{}_{}.png'.format(FLAGS.data_path, game_time, weather, town) rgb_img = Image.fromarray(frame.as_rgb_numpy_array()) rgb_img.save(file_name) if FLAGS.log_bbox_images: frame.annotate_with_bounding_boxes(game_time, det_obstacles) file_name = '{}annotated-signs-{}_{}_{}.png'.format( FLAGS.data_path, game_time, weather, town) rgb_img = Image.fromarray(frame.as_rgb_numpy_array()) rgb_img.save(file_name) # Log the bounding boxes. bboxes = [obstacle.get_in_log_format() for obstacle in det_obstacles] file_name = '{}bboxes-{}_{}_{}.json'.format(FLAGS.data_path, game_time, weather, town) with open(file_name, 'w') as outfile: json.dump(bboxes, outfile) if FLAGS.visualize_bboxes: frame.annotate_with_bounding_boxes(game_time, det_obstacles) frame.visualize('bboxes')
def add_rgb_camera(transform, vehicle_id_stream, name='center_rgb_camera', fov=90): rgb_camera_setup = RGBCameraSetup(name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, fov) camera_stream = _add_camera_driver(vehicle_id_stream, rgb_camera_setup) return (camera_stream, rgb_camera_setup)
def add_rgb_camera(transform, vehicle_id_stream, release_sensor_stream, name='center_rgb_camera', fov=90): from pylot.drivers.sensor_setup import RGBCameraSetup rgb_camera_setup = RGBCameraSetup(name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, fov) camera_stream, notify_reading_stream = _add_camera_driver( vehicle_id_stream, release_sensor_stream, rgb_camera_setup) return (camera_stream, notify_reading_stream, rgb_camera_setup)
def create_camera_setups(): """Creates RGBCameraSetups for the cameras deployed on the car. Note: Change this method if your agent requires more cameras, or if you want to change camera properties. It returns a dict from camera name to :py:class:`~pylot.drivers.sensor_setup.RGBCameraSetup`. """ camera_setups = {} # Add a center front camera. transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) center_camera_setup = RGBCameraSetup(CENTER_CAMERA_NAME, FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 90) camera_setups[CENTER_CAMERA_NAME] = center_camera_setup if not FLAGS.simulator_traffic_light_detection: # Add a camera with a narrow field of view. The traffic light # camera is added in the same position as the center camera. # We use this camera for traffic light detection. tl_camera_setup = RGBCameraSetup(TL_CAMERA_NAME, FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) camera_setups[TL_CAMERA_NAME] = tl_camera_setup if FLAGS.execution_mode == 'challenge-sensors': # The agent in executed in the sensors-only track. # We add camera, which we use for lane detection because we do not # have access to the OpenDrive map in this track. lane_transform = pylot.utils.Transform(LANE_CAMERA_LOCATION, pylot.utils.Rotation(pitch=-15)) lane_camera_setup = RGBCameraSetup(LANE_CAMERA_NAME, 1280, 720, lane_transform, 90) camera_setups[LANE_CAMERA_NAME] = lane_camera_setup return camera_setups
def create_camera_setups(): camera_setups = {} transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) center_camera_setup = RGBCameraSetup(CENTER_CAMERA_NAME, FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 90) camera_setups[CENTER_CAMERA_NAME] = center_camera_setup if not FLAGS.carla_traffic_light_detection: tl_camera_setup = RGBCameraSetup(TL_CAMERA_NAME, FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) camera_setups[TL_CAMERA_NAME] = tl_camera_setup if FLAGS.execution_mode == 'challenge-sensors': # Add camera for lane detection. lane_transform = pylot.utils.Transform(LANE_CAMERA_LOCATION, pylot.utils.Rotation(pitch=-15)) lane_camera_setup = RGBCameraSetup(LANE_CAMERA_NAME, 1280, 720, lane_transform, 90) camera_setups[LANE_CAMERA_NAME] = lane_camera_setup # left_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, # pylot.utils.Rotation(yaw=-45)) # left_camera_setup = RGBCameraSetup(LEFT_CAMERA_NAME, # FLAGS.camera_image_width, # FLAGS.camera_image_height, # left_transform, 90) # camera_setups[LEFT_CAMERA_NAME] = left_camera_setup # right_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, # pylot.utils.Rotation(yaw=45)) # right_camera_setup = RGBCameraSetup(RIGHT_CAMERA_NAME, # FLAGS.camera_image_width, # FLAGS.camera_image_height, # right_transform, 90) # camera_setups[RIGHT_CAMERA_NAME] = right_camera_setup return camera_setups
def main(argv): transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) control_loop_stream = erdos.LoopStream() release_sensor_stream = erdos.IngestStream() pipeline_finish_notify_stream = erdos.IngestStream() # Create an operator that connects to the simulator. ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge( control_loop_stream, release_sensor_stream, pipeline_finish_notify_stream) # Add sensors. rgb_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, notify_rgb_stream) = \ pylot.operator_creator.add_camera_driver( rgb_camera_setup, vehicle_id_stream, release_sensor_stream) depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, _) = pylot.operator_creator.add_camera_driver(depth_camera_setup, vehicle_id_stream, release_sensor_stream) seg_camera_setup = SegmentedCameraSetup('seg_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (segmented_stream, _) = pylot.operator_creator.add_camera_driver(seg_camera_setup, vehicle_id_stream, release_sensor_stream) if FLAGS.log_rgb_camera: pylot.operator_creator.add_camera_logging( center_camera_stream, 'center_camera_logger_operator', 'center') if FLAGS.log_segmented_camera: pylot.operator_creator.add_camera_logging( segmented_stream, 'center_segmented_camera_logger_operator', 'segmented') if FLAGS.log_depth_camera: pylot.operator_creator.add_camera_logging( depth_camera_stream, 'depth_camera_logger_operator', 'depth') imu_stream = None if FLAGS.log_imu: (imu_stream, _) = pylot.operator_creator.add_imu( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) pylot.operator_creator.add_imu_logging(imu_stream) gnss_stream = None if FLAGS.log_gnss: (gnss_stream, _) = pylot.operator_creator.add_gnss( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) pylot.operator_creator.add_gnss_logging(gnss_stream) if FLAGS.log_pose: pylot.operator_creator.add_pose_logging(pose_stream) traffic_lights_stream = None traffic_light_camera_stream = None if FLAGS.log_traffic_lights: tl_camera_setup = RGBCameraSetup('traffic_light_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_camera_setup, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_camera_logging( traffic_light_camera_stream, 'traffic_light_camera_logger_operator', 'traffic-light') tl_seg_camera_setup = SegmentedCameraSetup( 'traffic_light_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_segmented_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_seg_camera_setup, vehicle_id_stream, release_sensor_stream) tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_depth_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_depth_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = \ pylot.operator_creator.add_perfect_traffic_light_detector( ground_traffic_lights_stream, traffic_light_camera_stream, traffic_light_depth_camera_stream, traffic_light_segmented_camera_stream, pose_stream) pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream) if FLAGS.log_left_right_cameras: (left_camera_stream, right_camera_stream, _, _) = pylot.operator_creator.add_left_right_cameras( transform, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_camera_logging( left_camera_stream, 'left_camera_logger_operator', 'left') pylot.operator_creator.add_camera_logging( right_camera_stream, 'right_camera_logger_operator', 'right') point_cloud_stream = None if FLAGS.log_lidar: (point_cloud_stream, _, _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_lidar_logging(point_cloud_stream) obstacles_stream = None if FLAGS.log_obstacles: obstacles_stream = pylot.operator_creator.add_perfect_detector( depth_camera_stream, center_camera_stream, segmented_stream, pose_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream) pylot.operator_creator.add_bounding_box_logging(obstacles_stream) if FLAGS.log_multiple_object_tracker: pylot.operator_creator.add_multiple_object_tracker_logging( obstacles_stream) obstacles_tracking_stream = None if FLAGS.log_trajectories or FLAGS.log_chauffeur: obstacles_tracking_stream = \ pylot.operator_creator.add_perfect_tracking( vehicle_id_stream, ground_obstacles_stream, pose_stream) if FLAGS.log_trajectories: pylot.operator_creator.add_trajectory_logging( obstacles_tracking_stream) top_down_segmented_stream = None top_down_camera_setup = None if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation: top_down_transform = pylot.utils.get_top_down_transform( transform, FLAGS.top_down_camera_altitude) top_down_seg_cs = SegmentedCameraSetup('top_down_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, top_down_transform, 90) (top_down_segmented_stream, _) = \ pylot.operator_creator.add_camera_driver( top_down_seg_cs, vehicle_id_stream, release_sensor_stream) if FLAGS.log_top_down_segmentation: pylot.operator_creator.add_camera_logging( top_down_segmented_stream, 'top_down_segmented_logger_operator', 'top-down-segmented') if FLAGS.log_chauffeur: top_down_camera_setup = RGBCameraSetup('top_down_rgb_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, top_down_transform, 90) (top_down_camera_stream, _) = pylot.operator_creator.add_camera_driver( top_down_camera_setup, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_chauffeur_logging( vehicle_id_stream, pose_stream, obstacles_tracking_stream, top_down_camera_stream, top_down_segmented_stream, top_down_camera_setup) perfect_lane_stream = None if FLAGS.log_lane_detection_camera: perfect_lane_stream = pylot.operator_creator.add_perfect_lane_detector( pose_stream, open_drive_stream, center_camera_stream) # TODO: Hack! We synchronize on a single stream, based on a guesestimate # of which stream is slowest. Instead, We should synchronize on all output # streams, and we should ensure that even the operators without output # streams complete. if FLAGS.control == 'simulator_auto_pilot': # We insert a synchronizing operator that sends back a command when # the low watermark progresses on all input stream. stream_to_sync_on = center_camera_stream if obstacles_tracking_stream is not None: stream_to_sync_on = obstacles_tracking_stream if traffic_lights_stream is not None: stream_to_sync_on = traffic_lights_stream if perfect_lane_stream is not None: stream_to_sync_on = perfect_lane_stream if obstacles_stream is not None: stream_to_sync_on = obstacles_stream control_stream = pylot.operator_creator.add_synchronizer( vehicle_id_stream, stream_to_sync_on) control_loop_stream.set(control_stream) else: raise ValueError( "Must be in auto pilot mode. Pass --control=simulator_auto_pilot") control_display_stream = None streams_to_send_top_on = [] if pylot.flags.must_visualize(): control_display_stream, ingest_streams = \ pylot.operator_creator.add_visualizer( pose_stream=pose_stream, camera_stream=center_camera_stream, tl_camera_stream=traffic_light_camera_stream, depth_stream=depth_camera_stream, point_cloud_stream=point_cloud_stream, segmentation_stream=segmented_stream, imu_stream=imu_stream, obstacles_stream=obstacles_stream, traffic_lights_stream=traffic_lights_stream, tracked_obstacles_stream=obstacles_tracking_stream) streams_to_send_top_on += ingest_streams # Connect an instance to the simulator to make sure that we can turn the # synchronous mode off after the script finishes running. client, world = pylot.simulation.utils.get_world(FLAGS.simulator_host, FLAGS.simulator_port, FLAGS.simulator_timeout) # Run the data-flow. node_handle = erdos.run_async() signal.signal(signal.SIGINT, shutdown) # Ask all sensors to release their data. release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) for stream in streams_to_send_top_on: stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) try: if pylot.flags.must_visualize(): pylot.utils.run_visualizer_control_loop(control_display_stream) node_handle.wait() except KeyboardInterrupt: node_handle.shutdown() pylot.simulation.utils.set_asynchronous_mode(world) if pylot.flags.must_visualize(): import pygame pygame.quit()
def driver(): transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation(pitch=-15)) streams_to_send_top_on = [] control_loop_stream = erdos.LoopStream() time_to_decision_loop_stream = erdos.LoopStream() if FLAGS.simulator_mode == 'pseudo-asynchronous': release_sensor_stream = erdos.LoopStream() pipeline_finish_notify_stream = erdos.LoopStream() else: release_sensor_stream = erdos.IngestStream() pipeline_finish_notify_stream = erdos.IngestStream() notify_streams = [] # Create operator that bridges between pipeline and the simulator. ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge( control_loop_stream, release_sensor_stream, pipeline_finish_notify_stream, ) # Add sensors. center_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, notify_rgb_stream) = \ pylot.operator_creator.add_camera_driver( center_camera_setup, vehicle_id_stream, release_sensor_stream) notify_streams.append(notify_rgb_stream) if pylot.flags.must_add_depth_camera_sensor(): depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, notify_depth_stream) = \ pylot.operator_creator.add_camera_driver( depth_camera_setup, vehicle_id_stream, release_sensor_stream) else: depth_camera_stream = None if pylot.flags.must_add_segmented_camera_sensor(): segmented_camera_setup = SegmentedCameraSetup( 'segmented_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (ground_segmented_stream, notify_segmented_stream) = \ pylot.operator_creator.add_camera_driver( segmented_camera_setup, vehicle_id_stream, release_sensor_stream) else: ground_segmented_stream = None if pylot.flags.must_add_lidar_sensor(): # Place LiDAR sensor in the same location as the center camera. (point_cloud_stream, notify_lidar_stream, lidar_setup) = pylot.operator_creator.add_lidar( transform, vehicle_id_stream, release_sensor_stream) else: point_cloud_stream = None lidar_setup = None if FLAGS.obstacle_location_finder_sensor == 'lidar': depth_stream = point_cloud_stream # Camera sensors are slower than the lidar sensor. notify_streams.append(notify_lidar_stream) elif FLAGS.obstacle_location_finder_sensor == 'depth_camera': depth_stream = depth_camera_stream notify_streams.append(notify_depth_stream) elif FLAGS.obstacle_location_finder_sensor == 'depth_stereo': (depth_stream, notify_left_camera_stream, notify_right_camera_stream) = pylot.component_creator.add_depth( transform, vehicle_id_stream, center_camera_setup, depth_camera_stream, release_sensor_stream) notify_streams.append(notify_left_camera_stream) notify_streams.append(notify_right_camera_stream) else: raise ValueError( 'Unknown --obstacle_location_finder_sensor value {}'.format( FLAGS.obstacle_location_finder_sensor)) imu_stream = None if pylot.flags.must_add_imu_sensor(): (imu_stream, _) = pylot.operator_creator.add_imu( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) gnss_stream = None if pylot.flags.must_add_gnss_sensor(): (gnss_stream, _) = pylot.operator_creator.add_gnss( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) if FLAGS.localization: pose_stream = pylot.operator_creator.add_localization( imu_stream, gnss_stream, pose_stream) obstacles_stream, perfect_obstacles_stream = \ pylot.component_creator.add_obstacle_detection( center_camera_stream, center_camera_setup, pose_stream, depth_stream, depth_camera_stream, ground_segmented_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, time_to_decision_loop_stream) tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) traffic_lights_stream, tl_camera_stream = \ pylot.component_creator.add_traffic_light_detection( tl_transform, vehicle_id_stream, release_sensor_stream, pose_stream, depth_stream, ground_traffic_lights_stream, time_to_decision_loop_stream) lane_detection_stream = pylot.component_creator.add_lane_detection( center_camera_stream, pose_stream, open_drive_stream) if lane_detection_stream is None: lane_detection_stream = erdos.IngestStream() streams_to_send_top_on.append(lane_detection_stream) obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking( center_camera_stream, center_camera_setup, obstacles_stream, depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream, time_to_decision_loop_stream) segmented_stream = pylot.component_creator.add_segmentation( center_camera_stream, ground_segmented_stream) if FLAGS.fusion: pylot.operator_creator.add_fusion(pose_stream, obstacles_stream, depth_stream, ground_obstacles_stream) prediction_stream, prediction_camera_stream, notify_prediction_stream = \ pylot.component_creator.add_prediction( obstacles_tracking_stream, vehicle_id_stream, time_to_decision_loop_stream, transform, release_sensor_stream, pose_stream, point_cloud_stream, lidar_setup) if prediction_stream is None: prediction_stream = obstacles_stream if notify_prediction_stream: notify_streams.append(notify_prediction_stream) goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]), float(FLAGS.goal_location[1]), float(FLAGS.goal_location[2])) waypoints_stream = pylot.component_creator.add_planning( goal_location, pose_stream, prediction_stream, traffic_lights_stream, lane_detection_stream, open_drive_stream, global_trajectory_stream, time_to_decision_loop_stream) if FLAGS.simulator_mode == "pseudo-asynchronous": # Add a synchronizer in the pseudo-asynchronous mode. ( waypoints_stream_for_control, pose_stream_for_control, sensor_ready_stream, _pipeline_finish_notify_stream, ) = pylot.operator_creator.add_planning_pose_synchronizer( waypoints_stream, pose_stream_for_control, pose_stream, *notify_streams) release_sensor_stream.set(sensor_ready_stream) pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream) else: waypoints_stream_for_control = waypoints_stream pose_stream_for_control = pose_stream control_stream = pylot.component_creator.add_control( pose_stream_for_control, waypoints_stream_for_control, vehicle_id_stream, perfect_obstacles_stream) control_loop_stream.set(control_stream) pylot.component_creator.add_evaluation(vehicle_id_stream, pose_stream, imu_stream) time_to_decision_stream = pylot.operator_creator.add_time_to_decision( pose_stream, obstacles_stream) time_to_decision_loop_stream.set(time_to_decision_stream) control_display_stream = None if pylot.flags.must_visualize(): control_display_stream, ingest_streams = \ pylot.operator_creator.add_visualizer( pose_stream, center_camera_stream, tl_camera_stream, prediction_camera_stream, depth_camera_stream, point_cloud_stream, segmented_stream, imu_stream, obstacles_stream, traffic_lights_stream, obstacles_tracking_stream, lane_detection_stream, prediction_stream, waypoints_stream, control_stream) streams_to_send_top_on += ingest_streams node_handle = erdos.run_async() for stream in streams_to_send_top_on: stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) # If we did not use the pseudo-asynchronous mode, ask the sensors to # release their readings whenever. if FLAGS.simulator_mode != "pseudo-asynchronous": release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) pipeline_finish_notify_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) return node_handle, control_display_stream
def __init__(self, pose_stream, rgb_camera_stream, tl_camera_stream, prediction_camera_stream, depth_camera_stream, point_cloud_stream, segmentation_stream, imu_stream, obstacles_stream, obstacles_error_stream, traffic_lights_stream, tracked_obstacles_stream, lane_detection_stream, prediction_stream, waypoints_stream, control_stream, display_control_stream, pygame_display, flags): visualize_streams = [] self._pose_msgs = deque() pose_stream.add_callback( partial(self.save, msg_type="Pose", queue=self._pose_msgs)) visualize_streams.append(pose_stream) self._bgr_msgs = deque() rgb_camera_stream.add_callback( partial(self.save, msg_type="RGB", queue=self._bgr_msgs)) visualize_streams.append(rgb_camera_stream) self._imu_msgs = deque() imu_stream.add_callback( partial(self.save, msg_type="IMU", queue=self._imu_msgs)) visualize_streams.append(imu_stream) """ self._obstacle_msgs = deque() obstacles_stream.add_callback( partial(self.save, msg_type="Obstacle", queue=self._obstacle_msgs)) visualize_streams.append(obstacles_stream) """ self._obstacle_error_msgs = deque() obstacles_error_stream.add_callback( partial(self.save, msg_type="ObstacleError", queue=self._obstacle_error_msgs)) visualize_streams.append(obstacles_error_stream) self._tracked_obstacle_msgs = deque() tracked_obstacles_stream.add_callback( partial(self.save, msg_type="TrackedObstacle", queue=self._tracked_obstacle_msgs)) visualize_streams.append(tracked_obstacles_stream) self._tl_camera_msgs = deque() tl_camera_stream.add_callback( partial(self.save, msg_type="TLCamera", queue=self._tl_camera_msgs)) visualize_streams.append(tl_camera_stream) self._traffic_light_msgs = deque() traffic_lights_stream.add_callback( partial(self.save, msg_type="TrafficLight", queue=self._traffic_light_msgs)) visualize_streams.append(traffic_lights_stream) self._waypoint_msgs = deque() waypoints_stream.add_callback( partial(self.save, msg_type="Waypoint", queue=self._waypoint_msgs)) visualize_streams.append(waypoints_stream) self._prediction_camera_msgs = deque() prediction_camera_stream.add_callback( partial(self.save, msg_type="PredictionCamera", queue=self._prediction_camera_msgs)) visualize_streams.append(prediction_camera_stream) self._prediction_msgs = deque() prediction_stream.add_callback( partial(self.save, msg_type="Prediction", queue=self._prediction_msgs)) visualize_streams.append(prediction_stream) self._point_cloud_msgs = deque() point_cloud_stream.add_callback( partial(self.save, msg_type="PointCloud", queue=self._point_cloud_msgs)) visualize_streams.append(point_cloud_stream) self._lane_detection_msgs = deque() lane_detection_stream.add_callback( partial(self.save, msg_type="Lanes", queue=self._lane_detection_msgs)) visualize_streams.append(lane_detection_stream) self._depth_msgs = deque() depth_camera_stream.add_callback( partial(self.save, msg_type="Depth", queue=self._depth_msgs)) visualize_streams.append(depth_camera_stream) self._segmentation_msgs = deque() segmentation_stream.add_callback( partial(self.save, msg_type="Segmentation", queue=self._segmentation_msgs)) visualize_streams.append(segmentation_stream) self._control_msgs = deque() control_stream.add_callback( partial(self.save, msg_type="Control", queue=self._control_msgs)) visualize_streams.append(control_stream) # Register a watermark callback on all the streams to be visualized. erdos.add_watermark_callback(visualize_streams, [], self.on_watermark) # Add a callback on a control stream to figure out what to display. display_control_stream.add_callback(self.change_display) self._logger = erdos.utils.setup_logging(self.config.name, self.config.log_file_name) self.display = pygame_display # Set the font. fonts = [x for x in pygame.font.get_fonts() if 'mono' in x] default_font = 'ubuntumono' mono = default_font if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self.font = pygame.font.Font(mono, 14) # Array of keys to figure out which message to display. self.current_display = 0 self.display_array = [] self.window_titles = [] if flags.visualize_rgb_camera: self.display_array.append("RGB") self.window_titles.append("RGB Camera") if flags.visualize_detected_obstacles: # include obstacles error here; todo: seperate into flags self.display_array.append("ObstacleError") self.window_titles.append("Detected obstacles") #self.display_array.append("Obstacle") #self.window_titles.append("Detected obstacles") if flags.visualize_tracked_obstacles: self.display_array.append("TrackedObstacle") self.window_titles.append("Obstacle tracking") if flags.visualize_detected_traffic_lights: self.display_array.append("TLCamera") self.window_titles.append("Detected traffic lights") if flags.visualize_waypoints: self.display_array.append("Waypoint") self.window_titles.append("Planning") if flags.visualize_prediction: self.display_array.append("PredictionCamera") self.window_titles.append("Prediction") if flags.visualize_lidar: self.display_array.append("PointCloud") self.window_titles.append("LiDAR") if flags.visualize_detected_lanes: self.display_array.append("Lanes") self.window_titles.append("Detected lanes") if flags.visualize_depth_camera: self.display_array.append("Depth") self.window_titles.append("Depth Camera") if flags.visualize_segmentation: self.display_array.append("Segmentation") self.window_titles.append("Segmentation") if flags.visualize_world: self._planning_world = World(flags, self._logger) top_down_transform = pylot.utils.get_top_down_transform( pylot.utils.Transform(pylot.utils.Location(), pylot.utils.Rotation()), flags.top_down_camera_altitude) self._bird_eye_camera_setup = RGBCameraSetup( 'bird_eye_camera', flags.camera_image_width, flags.camera_image_height, top_down_transform, 90) self.display_array.append("PlanningWorld") self.window_titles.append("Planning world") else: self._planning_world = None assert len(self.display_array) == len(self.window_titles), \ "The display and titles differ." # Save the flags. self._flags = flags
def main(argv): """ Computes ground obstacle detection and segmentation decay.""" transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) control_loop_stream = erdos.LoopStream() release_sensor_stream = erdos.IngestStream() ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge(control_loop_stream, release_sensor_stream) # Add camera sensors. rgb_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, _) = pylot.operator_creator.add_camera_driver(rgb_camera_setup, vehicle_id_stream, release_sensor_stream) depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, _) = pylot.operator_creator.add_camera_driver(depth_camera_setup, vehicle_id_stream, release_sensor_stream) segmented_camera_setup = SegmentedCameraSetup('segmented_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (segmented_stream, _) = pylot.operator_creator.add_camera_driver(segmented_camera_setup, vehicle_id_stream, release_sensor_stream) map_stream = None if FLAGS.compute_detection_decay: obstacles_stream = pylot.operator_creator.add_perfect_detector( depth_camera_stream, center_camera_stream, segmented_stream, pose_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream) map_stream = pylot.operator_creator.add_detection_decay( obstacles_stream) iou_stream = None if FLAGS.compute_segmentation_decay: iou_stream = pylot.operator_creator.add_segmentation_decay( segmented_stream) # TODO: Hack! We synchronize on a single stream, based on a guesestimated # of which stream is slowest. Instead, We should synchronize on all output # streams, and we should ensure that even the operators without output # streams complete. if FLAGS.control == 'simulator_auto_pilot': stream_to_sync_on = iou_stream if map_stream is not None: stream_to_sync_on = map_stream op_config = erdos.OperatorConfig(name='synchronizer_operator', flow_watermarks=False) (control_stream, ) = erdos.connect(SynchronizerOperator, op_config, [stream_to_sync_on], FLAGS) control_loop_stream.set(control_stream) else: raise ValueError( "Must be in auto pilot mode. Pass --control=simulator_auto_pilot") erdos.run_async() # Ask all sensors to release their data. release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
def add_traffic_light_detection(tl_transform, vehicle_id_stream, release_sensor_stream, pose_stream=None, depth_stream=None, ground_traffic_lights_stream=None, time_to_decision_stream=None): """Adds traffic light detection operators. The traffic light detectors use a camera with a narrow field of view. If the `--perfect_traffic_light_detection` flag is set, the method adds a perfect traffic light detector operator, and returns a stream of perfect traffic lights. Otherwise, if the `--traffic_light_detection` flag is set it returns a stream of traffic lights detected using a trained model. Args: tl_transform (:py:class:`~pylot.utils.Transform`): Transform of the traffic light camera relative to the ego vehicle. vehicle_id_stream (:py:class:`erdos.ReadStream`): A stream on which the simulator publishes simulator ego-vehicle id. pose_stream (:py:class:`erdos.ReadStream`, optional): A stream on which pose info is received. depth_stream (:py:class:`erdos.ReadStream`, optional): Stream on which point cloud messages or depth frames are received. Returns: :py:class:`erdos.ReadStream`: Stream on which :py:class:`~pylot.perception.messages.ObstaclesMessage` traffic light messages are published. """ tl_camera_stream = None if FLAGS.traffic_light_detection or FLAGS.perfect_traffic_light_detection: logger.debug('Adding traffic light camera...') # Only add the TL camera if traffic light detection is enabled. tl_camera_setup = RGBCameraSetup('traffic_light_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_camera_stream, _) = pylot.operator_creator.add_camera_driver(tl_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = None if FLAGS.traffic_light_detection: logger.debug('Using traffic light detection...') traffic_lights_stream = \ pylot.operator_creator.add_traffic_light_detector( tl_camera_stream, time_to_decision_stream) # Adds operator that finds the world locations of the traffic lights. traffic_lights_stream = \ pylot.operator_creator.add_obstacle_location_finder( traffic_lights_stream, depth_stream, pose_stream, tl_camera_setup) if FLAGS.perfect_traffic_light_detection: assert (pose_stream is not None and ground_traffic_lights_stream is not None) logger.debug('Using perfect traffic light detection...') # Add segmented and depth cameras with fov 45. These cameras are needed # by the perfect traffic light detector. tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_depth_camera_stream, _, _) = pylot.operator_creator.add_camera_driver(tl_depth_camera_setup, vehicle_id_stream, release_sensor_stream) segmented_tl_camera_setup = SegmentedCameraSetup( 'traffic_light_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_segmented_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( segmented_tl_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = \ pylot.operator_creator.add_perfect_traffic_light_detector( ground_traffic_lights_stream, tl_camera_stream, tl_depth_camera_stream, tl_segmented_camera_stream, pose_stream) if FLAGS.simulator_traffic_light_detection: logger.debug('Using ground traffic lights from the simulator...') traffic_lights_stream = ground_traffic_lights_stream return traffic_lights_stream, tl_camera_stream