def add_depth_camera(transform, vehicle_id_stream, name='center_depth_camera', fov=90): depth_camera_setup = DepthCameraSetup(name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, fov) ground_depth_camera_stream = _add_camera_driver(vehicle_id_stream, depth_camera_setup) return (ground_depth_camera_stream, depth_camera_setup)
def on_depth_msg(simulator_image): global DEPTH_FRAME transform = pylot.utils.Transform.from_simulator_transform( simulator_image.transform) camera_setup = DepthCameraSetup("depth_camera", FLAGS.frame_width, FLAGS.frame_height, transform, FLAGS.camera_fov) DEPTH_FRAME = DepthFrame.from_simulator_frame(simulator_image, camera_setup)
def on_depth_msg(carla_image): global DEPTH_FRAME transform = pylot.utils.Transform.from_carla_transform( carla_image.transform) camera_setup = DepthCameraSetup("depth_camera", FLAGS.frame_width, FLAGS.camera_height, transform, FLAGS.camera_fov) DEPTH_FRAME = DepthFrameMessage( int(carla_image.timestamp * 1000), DepthFrame.from_carla_frame(carla_image, camera_setup))
def add_depth_camera(transform, vehicle_id_stream, release_sensor_stream, name='center_depth_camera', fov=90): from pylot.drivers.sensor_setup import DepthCameraSetup depth_camera_setup = DepthCameraSetup(name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height, transform, fov) ground_depth_camera_stream, notify_reading_stream = _add_camera_driver( vehicle_id_stream, release_sensor_stream, depth_camera_setup) return (ground_depth_camera_stream, notify_reading_stream, depth_camera_setup)
def main(argv): transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) control_loop_stream = erdos.LoopStream() release_sensor_stream = erdos.IngestStream() pipeline_finish_notify_stream = erdos.IngestStream() # Create an operator that connects to the simulator. ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge( control_loop_stream, release_sensor_stream, pipeline_finish_notify_stream) # Add sensors. rgb_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, notify_rgb_stream) = \ pylot.operator_creator.add_camera_driver( rgb_camera_setup, vehicle_id_stream, release_sensor_stream) depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, _) = pylot.operator_creator.add_camera_driver(depth_camera_setup, vehicle_id_stream, release_sensor_stream) seg_camera_setup = SegmentedCameraSetup('seg_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (segmented_stream, _) = pylot.operator_creator.add_camera_driver(seg_camera_setup, vehicle_id_stream, release_sensor_stream) if FLAGS.log_rgb_camera: pylot.operator_creator.add_camera_logging( center_camera_stream, 'center_camera_logger_operator', 'center') if FLAGS.log_segmented_camera: pylot.operator_creator.add_camera_logging( segmented_stream, 'center_segmented_camera_logger_operator', 'segmented') if FLAGS.log_depth_camera: pylot.operator_creator.add_camera_logging( depth_camera_stream, 'depth_camera_logger_operator', 'depth') imu_stream = None if FLAGS.log_imu: (imu_stream, _) = pylot.operator_creator.add_imu( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) pylot.operator_creator.add_imu_logging(imu_stream) gnss_stream = None if FLAGS.log_gnss: (gnss_stream, _) = pylot.operator_creator.add_gnss( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) pylot.operator_creator.add_gnss_logging(gnss_stream) if FLAGS.log_pose: pylot.operator_creator.add_pose_logging(pose_stream) traffic_lights_stream = None traffic_light_camera_stream = None if FLAGS.log_traffic_lights: tl_camera_setup = RGBCameraSetup('traffic_light_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_camera_setup, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_camera_logging( traffic_light_camera_stream, 'traffic_light_camera_logger_operator', 'traffic-light') tl_seg_camera_setup = SegmentedCameraSetup( 'traffic_light_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_segmented_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_seg_camera_setup, vehicle_id_stream, release_sensor_stream) tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, 45) (traffic_light_depth_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( tl_depth_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = \ pylot.operator_creator.add_perfect_traffic_light_detector( ground_traffic_lights_stream, traffic_light_camera_stream, traffic_light_depth_camera_stream, traffic_light_segmented_camera_stream, pose_stream) pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream) if FLAGS.log_left_right_cameras: (left_camera_stream, right_camera_stream, _, _) = pylot.operator_creator.add_left_right_cameras( transform, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_camera_logging( left_camera_stream, 'left_camera_logger_operator', 'left') pylot.operator_creator.add_camera_logging( right_camera_stream, 'right_camera_logger_operator', 'right') point_cloud_stream = None if FLAGS.log_lidar: (point_cloud_stream, _, _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_lidar_logging(point_cloud_stream) obstacles_stream = None if FLAGS.log_obstacles: obstacles_stream = pylot.operator_creator.add_perfect_detector( depth_camera_stream, center_camera_stream, segmented_stream, pose_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream) pylot.operator_creator.add_bounding_box_logging(obstacles_stream) if FLAGS.log_multiple_object_tracker: pylot.operator_creator.add_multiple_object_tracker_logging( obstacles_stream) obstacles_tracking_stream = None if FLAGS.log_trajectories or FLAGS.log_chauffeur: obstacles_tracking_stream = \ pylot.operator_creator.add_perfect_tracking( vehicle_id_stream, ground_obstacles_stream, pose_stream) if FLAGS.log_trajectories: pylot.operator_creator.add_trajectory_logging( obstacles_tracking_stream) top_down_segmented_stream = None top_down_camera_setup = None if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation: top_down_transform = pylot.utils.get_top_down_transform( transform, FLAGS.top_down_camera_altitude) top_down_seg_cs = SegmentedCameraSetup('top_down_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, top_down_transform, 90) (top_down_segmented_stream, _) = \ pylot.operator_creator.add_camera_driver( top_down_seg_cs, vehicle_id_stream, release_sensor_stream) if FLAGS.log_top_down_segmentation: pylot.operator_creator.add_camera_logging( top_down_segmented_stream, 'top_down_segmented_logger_operator', 'top-down-segmented') if FLAGS.log_chauffeur: top_down_camera_setup = RGBCameraSetup('top_down_rgb_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, top_down_transform, 90) (top_down_camera_stream, _) = pylot.operator_creator.add_camera_driver( top_down_camera_setup, vehicle_id_stream, release_sensor_stream) pylot.operator_creator.add_chauffeur_logging( vehicle_id_stream, pose_stream, obstacles_tracking_stream, top_down_camera_stream, top_down_segmented_stream, top_down_camera_setup) perfect_lane_stream = None if FLAGS.log_lane_detection_camera: perfect_lane_stream = pylot.operator_creator.add_perfect_lane_detector( pose_stream, open_drive_stream, center_camera_stream) # TODO: Hack! We synchronize on a single stream, based on a guesestimate # of which stream is slowest. Instead, We should synchronize on all output # streams, and we should ensure that even the operators without output # streams complete. if FLAGS.control == 'simulator_auto_pilot': # We insert a synchronizing operator that sends back a command when # the low watermark progresses on all input stream. stream_to_sync_on = center_camera_stream if obstacles_tracking_stream is not None: stream_to_sync_on = obstacles_tracking_stream if traffic_lights_stream is not None: stream_to_sync_on = traffic_lights_stream if perfect_lane_stream is not None: stream_to_sync_on = perfect_lane_stream if obstacles_stream is not None: stream_to_sync_on = obstacles_stream control_stream = pylot.operator_creator.add_synchronizer( vehicle_id_stream, stream_to_sync_on) control_loop_stream.set(control_stream) else: raise ValueError( "Must be in auto pilot mode. Pass --control=simulator_auto_pilot") control_display_stream = None streams_to_send_top_on = [] if pylot.flags.must_visualize(): control_display_stream, ingest_streams = \ pylot.operator_creator.add_visualizer( pose_stream=pose_stream, camera_stream=center_camera_stream, tl_camera_stream=traffic_light_camera_stream, depth_stream=depth_camera_stream, point_cloud_stream=point_cloud_stream, segmentation_stream=segmented_stream, imu_stream=imu_stream, obstacles_stream=obstacles_stream, traffic_lights_stream=traffic_lights_stream, tracked_obstacles_stream=obstacles_tracking_stream) streams_to_send_top_on += ingest_streams # Connect an instance to the simulator to make sure that we can turn the # synchronous mode off after the script finishes running. client, world = pylot.simulation.utils.get_world(FLAGS.simulator_host, FLAGS.simulator_port, FLAGS.simulator_timeout) # Run the data-flow. node_handle = erdos.run_async() signal.signal(signal.SIGINT, shutdown) # Ask all sensors to release their data. release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) for stream in streams_to_send_top_on: stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) try: if pylot.flags.must_visualize(): pylot.utils.run_visualizer_control_loop(control_display_stream) node_handle.wait() except KeyboardInterrupt: node_handle.shutdown() pylot.simulation.utils.set_asynchronous_mode(world) if pylot.flags.must_visualize(): import pygame pygame.quit()
def driver(): transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation(pitch=-15)) streams_to_send_top_on = [] control_loop_stream = erdos.LoopStream() time_to_decision_loop_stream = erdos.LoopStream() if FLAGS.simulator_mode == 'pseudo-asynchronous': release_sensor_stream = erdos.LoopStream() pipeline_finish_notify_stream = erdos.LoopStream() else: release_sensor_stream = erdos.IngestStream() pipeline_finish_notify_stream = erdos.IngestStream() notify_streams = [] # Create operator that bridges between pipeline and the simulator. ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge( control_loop_stream, release_sensor_stream, pipeline_finish_notify_stream, ) # Add sensors. center_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, notify_rgb_stream) = \ pylot.operator_creator.add_camera_driver( center_camera_setup, vehicle_id_stream, release_sensor_stream) notify_streams.append(notify_rgb_stream) if pylot.flags.must_add_depth_camera_sensor(): depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, notify_depth_stream) = \ pylot.operator_creator.add_camera_driver( depth_camera_setup, vehicle_id_stream, release_sensor_stream) else: depth_camera_stream = None if pylot.flags.must_add_segmented_camera_sensor(): segmented_camera_setup = SegmentedCameraSetup( 'segmented_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (ground_segmented_stream, notify_segmented_stream) = \ pylot.operator_creator.add_camera_driver( segmented_camera_setup, vehicle_id_stream, release_sensor_stream) else: ground_segmented_stream = None if pylot.flags.must_add_lidar_sensor(): # Place LiDAR sensor in the same location as the center camera. (point_cloud_stream, notify_lidar_stream, lidar_setup) = pylot.operator_creator.add_lidar( transform, vehicle_id_stream, release_sensor_stream) else: point_cloud_stream = None lidar_setup = None if FLAGS.obstacle_location_finder_sensor == 'lidar': depth_stream = point_cloud_stream # Camera sensors are slower than the lidar sensor. notify_streams.append(notify_lidar_stream) elif FLAGS.obstacle_location_finder_sensor == 'depth_camera': depth_stream = depth_camera_stream notify_streams.append(notify_depth_stream) elif FLAGS.obstacle_location_finder_sensor == 'depth_stereo': (depth_stream, notify_left_camera_stream, notify_right_camera_stream) = pylot.component_creator.add_depth( transform, vehicle_id_stream, center_camera_setup, depth_camera_stream, release_sensor_stream) notify_streams.append(notify_left_camera_stream) notify_streams.append(notify_right_camera_stream) else: raise ValueError( 'Unknown --obstacle_location_finder_sensor value {}'.format( FLAGS.obstacle_location_finder_sensor)) imu_stream = None if pylot.flags.must_add_imu_sensor(): (imu_stream, _) = pylot.operator_creator.add_imu( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) gnss_stream = None if pylot.flags.must_add_gnss_sensor(): (gnss_stream, _) = pylot.operator_creator.add_gnss( pylot.utils.Transform(location=pylot.utils.Location(), rotation=pylot.utils.Rotation()), vehicle_id_stream) if FLAGS.localization: pose_stream = pylot.operator_creator.add_localization( imu_stream, gnss_stream, pose_stream) obstacles_stream, perfect_obstacles_stream = \ pylot.component_creator.add_obstacle_detection( center_camera_stream, center_camera_setup, pose_stream, depth_stream, depth_camera_stream, ground_segmented_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, time_to_decision_loop_stream) tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) traffic_lights_stream, tl_camera_stream = \ pylot.component_creator.add_traffic_light_detection( tl_transform, vehicle_id_stream, release_sensor_stream, pose_stream, depth_stream, ground_traffic_lights_stream, time_to_decision_loop_stream) lane_detection_stream = pylot.component_creator.add_lane_detection( center_camera_stream, pose_stream, open_drive_stream) if lane_detection_stream is None: lane_detection_stream = erdos.IngestStream() streams_to_send_top_on.append(lane_detection_stream) obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking( center_camera_stream, center_camera_setup, obstacles_stream, depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream, time_to_decision_loop_stream) segmented_stream = pylot.component_creator.add_segmentation( center_camera_stream, ground_segmented_stream) if FLAGS.fusion: pylot.operator_creator.add_fusion(pose_stream, obstacles_stream, depth_stream, ground_obstacles_stream) prediction_stream, prediction_camera_stream, notify_prediction_stream = \ pylot.component_creator.add_prediction( obstacles_tracking_stream, vehicle_id_stream, time_to_decision_loop_stream, transform, release_sensor_stream, pose_stream, point_cloud_stream, lidar_setup) if prediction_stream is None: prediction_stream = obstacles_stream if notify_prediction_stream: notify_streams.append(notify_prediction_stream) goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]), float(FLAGS.goal_location[1]), float(FLAGS.goal_location[2])) waypoints_stream = pylot.component_creator.add_planning( goal_location, pose_stream, prediction_stream, traffic_lights_stream, lane_detection_stream, open_drive_stream, global_trajectory_stream, time_to_decision_loop_stream) if FLAGS.simulator_mode == "pseudo-asynchronous": # Add a synchronizer in the pseudo-asynchronous mode. ( waypoints_stream_for_control, pose_stream_for_control, sensor_ready_stream, _pipeline_finish_notify_stream, ) = pylot.operator_creator.add_planning_pose_synchronizer( waypoints_stream, pose_stream_for_control, pose_stream, *notify_streams) release_sensor_stream.set(sensor_ready_stream) pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream) else: waypoints_stream_for_control = waypoints_stream pose_stream_for_control = pose_stream control_stream = pylot.component_creator.add_control( pose_stream_for_control, waypoints_stream_for_control, vehicle_id_stream, perfect_obstacles_stream) control_loop_stream.set(control_stream) pylot.component_creator.add_evaluation(vehicle_id_stream, pose_stream, imu_stream) time_to_decision_stream = pylot.operator_creator.add_time_to_decision( pose_stream, obstacles_stream) time_to_decision_loop_stream.set(time_to_decision_stream) control_display_stream = None if pylot.flags.must_visualize(): control_display_stream, ingest_streams = \ pylot.operator_creator.add_visualizer( pose_stream, center_camera_stream, tl_camera_stream, prediction_camera_stream, depth_camera_stream, point_cloud_stream, segmented_stream, imu_stream, obstacles_stream, traffic_lights_stream, obstacles_tracking_stream, lane_detection_stream, prediction_stream, waypoints_stream, control_stream) streams_to_send_top_on += ingest_streams node_handle = erdos.run_async() for stream in streams_to_send_top_on: stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) # If we did not use the pseudo-asynchronous mode, ask the sensors to # release their readings whenever. if FLAGS.simulator_mode != "pseudo-asynchronous": release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) pipeline_finish_notify_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) return node_handle, control_display_stream
def main(args): """ The main function that orchestrates the setup of the world, connection to the scenario and the subsequent logging of the frames for computation of the mIoU. Args: args: The argparse.Namespace instance that is retrieved from parsing the arguments. """ # Setup the world. world = setup_world(args.host, args.port, args.delta) # Retrieve the ego-vehicle from the simulation. ego_vehicle = None while ego_vehicle is None: print("Waiting for the scenario to be ready ...") sleep(1) ego_vehicle = retrieve_actor(world, 'vehicle.*', 'hero') world.tick() # Transform of the cameras. camera_transform = carla.Transform(location=carla.Location(1.0, 0.0, 1.8), rotation=carla.Rotation(0, 0, 0)) # Connect the RGB camera to the vehicle. rgb_camera = spawn_camera('sensor.camera.rgb', camera_transform, ego_vehicle, *args.res.split('x')) # Connect the Semantic segmentation camera to the vehicle. semantic_camera = spawn_camera('sensor.camera.semantic_segmentation', camera_transform, ego_vehicle, *args.res.split('x')) # Connect the depth camera to the vehicle. depth_camera = spawn_camera('sensor.camera.depth', camera_transform, ego_vehicle, *args.res.split('x')) # Open the CSV file for writing. csv_file = open(args.output, 'w') csv_writer = csv.writer(csv_file) # Create the cleanup function. global CLEANUP_FUNCTION CLEANUP_FUNCTION = functools.partial( cleanup_function, world=world, cameras=[rgb_camera, semantic_camera, depth_camera], csv_file=csv_file) # Create a PyGame surface for debugging purposes. width, height = map(int, args.res.split('x')) surface = None if args.visualize: surface = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF) # Register a callback function with the camera. rgb_camera.listen(process_rgb_images) semantic_camera.listen(process_semantic_images) depth_camera_setup = DepthCameraSetup( "depth_camera", width, height, Transform.from_simulator_transform(camera_transform)) depth_camera.listen( functools.partial(process_depth_images, depth_camera_setup=depth_camera_setup, ego_vehicle=ego_vehicle, speed=args.speed, csv=csv_writer, surface=surface, visualize=args.visualize)) try: # To keep the thread alive so that the images can be processed. while True: pass except KeyboardInterrupt: CLEANUP_FUNCTION() sys.exit(0)
def main(argv): """ Computes ground obstacle detection and segmentation decay.""" transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION, pylot.utils.Rotation()) control_loop_stream = erdos.LoopStream() release_sensor_stream = erdos.IngestStream() ( pose_stream, pose_stream_for_control, ground_traffic_lights_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream, vehicle_id_stream, open_drive_stream, global_trajectory_stream, ) = pylot.operator_creator.add_simulator_bridge(control_loop_stream, release_sensor_stream) # Add camera sensors. rgb_camera_setup = RGBCameraSetup('center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (center_camera_stream, _) = pylot.operator_creator.add_camera_driver(rgb_camera_setup, vehicle_id_stream, release_sensor_stream) depth_camera_setup = DepthCameraSetup('depth_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (depth_camera_stream, _) = pylot.operator_creator.add_camera_driver(depth_camera_setup, vehicle_id_stream, release_sensor_stream) segmented_camera_setup = SegmentedCameraSetup('segmented_center_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, transform, FLAGS.camera_fov) (segmented_stream, _) = pylot.operator_creator.add_camera_driver(segmented_camera_setup, vehicle_id_stream, release_sensor_stream) map_stream = None if FLAGS.compute_detection_decay: obstacles_stream = pylot.operator_creator.add_perfect_detector( depth_camera_stream, center_camera_stream, segmented_stream, pose_stream, ground_obstacles_stream, ground_speed_limit_signs_stream, ground_stop_signs_stream) map_stream = pylot.operator_creator.add_detection_decay( obstacles_stream) iou_stream = None if FLAGS.compute_segmentation_decay: iou_stream = pylot.operator_creator.add_segmentation_decay( segmented_stream) # TODO: Hack! We synchronize on a single stream, based on a guesestimated # of which stream is slowest. Instead, We should synchronize on all output # streams, and we should ensure that even the operators without output # streams complete. if FLAGS.control == 'simulator_auto_pilot': stream_to_sync_on = iou_stream if map_stream is not None: stream_to_sync_on = map_stream op_config = erdos.OperatorConfig(name='synchronizer_operator', flow_watermarks=False) (control_stream, ) = erdos.connect(SynchronizerOperator, op_config, [stream_to_sync_on], FLAGS) control_loop_stream.set(control_stream) else: raise ValueError( "Must be in auto pilot mode. Pass --control=simulator_auto_pilot") erdos.run_async() # Ask all sensors to release their data. release_sensor_stream.send( erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
def add_traffic_light_detection(tl_transform, vehicle_id_stream, release_sensor_stream, pose_stream=None, depth_stream=None, ground_traffic_lights_stream=None, time_to_decision_stream=None): """Adds traffic light detection operators. The traffic light detectors use a camera with a narrow field of view. If the `--perfect_traffic_light_detection` flag is set, the method adds a perfect traffic light detector operator, and returns a stream of perfect traffic lights. Otherwise, if the `--traffic_light_detection` flag is set it returns a stream of traffic lights detected using a trained model. Args: tl_transform (:py:class:`~pylot.utils.Transform`): Transform of the traffic light camera relative to the ego vehicle. vehicle_id_stream (:py:class:`erdos.ReadStream`): A stream on which the simulator publishes simulator ego-vehicle id. pose_stream (:py:class:`erdos.ReadStream`, optional): A stream on which pose info is received. depth_stream (:py:class:`erdos.ReadStream`, optional): Stream on which point cloud messages or depth frames are received. Returns: :py:class:`erdos.ReadStream`: Stream on which :py:class:`~pylot.perception.messages.ObstaclesMessage` traffic light messages are published. """ tl_camera_stream = None if FLAGS.traffic_light_detection or FLAGS.perfect_traffic_light_detection: logger.debug('Adding traffic light camera...') # Only add the TL camera if traffic light detection is enabled. tl_camera_setup = RGBCameraSetup('traffic_light_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_camera_stream, _) = pylot.operator_creator.add_camera_driver(tl_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = None if FLAGS.traffic_light_detection: logger.debug('Using traffic light detection...') traffic_lights_stream = \ pylot.operator_creator.add_traffic_light_detector( tl_camera_stream, time_to_decision_stream) # Adds operator that finds the world locations of the traffic lights. traffic_lights_stream = \ pylot.operator_creator.add_obstacle_location_finder( traffic_lights_stream, depth_stream, pose_stream, tl_camera_setup) if FLAGS.perfect_traffic_light_detection: assert (pose_stream is not None and ground_traffic_lights_stream is not None) logger.debug('Using perfect traffic light detection...') # Add segmented and depth cameras with fov 45. These cameras are needed # by the perfect traffic light detector. tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_depth_camera_stream, _, _) = pylot.operator_creator.add_camera_driver(tl_depth_camera_setup, vehicle_id_stream, release_sensor_stream) segmented_tl_camera_setup = SegmentedCameraSetup( 'traffic_light_segmented_camera', FLAGS.camera_image_width, FLAGS.camera_image_height, tl_transform, 45) (tl_segmented_camera_stream, _) = \ pylot.operator_creator.add_camera_driver( segmented_tl_camera_setup, vehicle_id_stream, release_sensor_stream) traffic_lights_stream = \ pylot.operator_creator.add_perfect_traffic_light_detector( ground_traffic_lights_stream, tl_camera_stream, tl_depth_camera_stream, tl_segmented_camera_stream, pose_stream) if FLAGS.simulator_traffic_light_detection: logger.debug('Using ground traffic lights from the simulator...') traffic_lights_stream = ground_traffic_lights_stream return traffic_lights_stream, tl_camera_stream