def spawn_camera(camera_bp,
                 transform,
                 ego_vehicle,
                 width='1280',
                 height='720'):
    """ Spawns the camera with the provided blueprint, and attaches it to the
    given ego_vehicle at the transform.

    Args:
        camera_bp: The blueprint of the camera to be spawned.
        transform: The transform at which to attach the camera.
        ego_vehicle: The vehicle to which the camera should be attached.
        width: The width of the image to be retrieved from the camera.
        height: The height of the image to be retrieved from the camera.

    Returns:
       The instance of the camera object spawned.
    """
    _world = ego_vehicle.get_world()
    camera_blueprint = _world.get_blueprint_library().find(camera_bp)
    camera_blueprint.set_attribute('image_size_x', width)
    camera_blueprint.set_attribute('image_size_y', height)
    camera = _world.spawn_actor(camera_blueprint,
                                transform,
                                attach_to=ego_vehicle)
    camera_setup = SegmentedCameraSetup(
        "segmented_camera", width, height,
        pylot.utils.Transform.from_carla_transform(transform))

    _world.tick()
    return camera, camera_setup
def add_prediction(obstacles_tracking_stream,
                   vehicle_id_stream,
                   camera_transform,
                   release_sensor_stream,
                   pose_stream=None,
                   point_cloud_stream=None,
                   lidar_setup=None):
    """Adds prediction operators.

    Args:
        obstacles_tracking_stream (:py:class:`erdos.ReadStream`):
            Stream on which
            :py:class:`~pylot.perception.messages.ObstacleTrajectoriesMessage`
            are received.
        vehicle_id_stream (:py:class:`erdos.ReadStream`): A stream on
             which the simulator publishes simulator ego-vehicle id.
        camera_transform (:py:class:`~pylot.utils.Transform`): Transform of the
             center camera relative to the ego vehicle.
        pose_stream (:py:class:`erdos.ReadStream`, optional): Stream on
             which pose info is received.
        point_cloud_stream (:py:class:`erdos.ReadStream`, optional): Stream on
            which point cloud messages are received.

    Returns:
        :py:class:`erdos.ReadStream`: Stream on which
        :py:class:`~pylot.prediction.messages.PredictionMessage` messages are
        published.
    """
    prediction_stream = None
    top_down_segmented_camera_stream = None
    notify_reading_stream = None
    if FLAGS.prediction:
        if FLAGS.prediction_type == 'linear':
            prediction_stream = pylot.operator_creator.add_linear_prediction(
                obstacles_tracking_stream)
        elif FLAGS.prediction_type == 'r2p2':
            assert point_cloud_stream is not None
            assert lidar_setup is not None
            prediction_stream = pylot.operator_creator.add_r2p2_prediction(
                point_cloud_stream, obstacles_tracking_stream, lidar_setup)
        else:
            raise ValueError('Unexpected prediction_type {}'.format(
                FLAGS.prediction_type))
        if FLAGS.evaluate_prediction:
            assert pose_stream is not None
            pylot.operator_creator.add_prediction_evaluation(
                pose_stream, obstacles_tracking_stream, prediction_stream)
        if FLAGS.visualize_prediction:
            # Add bird's eye camera.
            top_down_transform = pylot.utils.get_top_down_transform(
                camera_transform, FLAGS.top_down_camera_altitude)
            top_down_seg_camera_setup = SegmentedCameraSetup(
                'top_down_segmented_camera', FLAGS.camera_image_width,
                FLAGS.camera_image_height, top_down_transform, 90)
            (top_down_segmented_camera_stream,
             notify_reading_stream) = pylot.operator_creator.add_camera_driver(
                 top_down_seg_camera_setup, vehicle_id_stream,
                 release_sensor_stream)
    return (prediction_stream, top_down_segmented_camera_stream,
            notify_reading_stream)
def on_segmented_msg(simulator_image):
    global SEGMENTED_FRAME
    transform = pylot.utils.Transform.from_simulator_transform(
        simulator_image.transform)
    camera_setup = SegmentedCameraSetup("segmented_camera", FLAGS.frame_width,
                                        FLAGS.camera_height, transform,
                                        FLAGS.camera_fov)
    SEGMENTED_FRAME = SegmentedFrame(simulator_image, 'carla', camera_setup)
Exemple #4
0
def add_segmented_camera(transform,
                         vehicle_id_stream,
                         name='center_segmented_camera',
                         fov=90):
    segmented_camera_setup = SegmentedCameraSetup(
        name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height,
        transform, fov)
    ground_segmented_camera_stream = _add_camera_driver(
        vehicle_id_stream, segmented_camera_setup)
    return (ground_segmented_camera_stream, segmented_camera_setup)
Exemple #5
0
def add_segmented_camera(transform,
                         vehicle_id_stream,
                         release_sensor_stream,
                         name='center_segmented_camera',
                         fov=90):
    from pylot.drivers.sensor_setup import SegmentedCameraSetup
    segmented_camera_setup = SegmentedCameraSetup(
        name, FLAGS.carla_camera_image_width, FLAGS.carla_camera_image_height,
        transform, fov)
    ground_segmented_camera_stream, notify_reading_stream = _add_camera_driver(
        vehicle_id_stream, release_sensor_stream, segmented_camera_setup)
    return (ground_segmented_camera_stream, notify_reading_stream,
            segmented_camera_setup)
Exemple #6
0
def main(argv):
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    pipeline_finish_notify_stream = erdos.IngestStream()
    # Create an operator that connects to the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream, release_sensor_stream,
        pipeline_finish_notify_stream)

    # Add sensors.
    rgb_camera_setup = RGBCameraSetup('center_camera',
                                      FLAGS.camera_image_width,
                                      FLAGS.camera_image_height, transform,
                                      FLAGS.camera_fov)
    (center_camera_stream, notify_rgb_stream) = \
        pylot.operator_creator.add_camera_driver(
            rgb_camera_setup, vehicle_id_stream, release_sensor_stream)
    depth_camera_setup = DepthCameraSetup('depth_center_camera',
                                          FLAGS.camera_image_width,
                                          FLAGS.camera_image_height, transform,
                                          FLAGS.camera_fov)
    (depth_camera_stream,
     _) = pylot.operator_creator.add_camera_driver(depth_camera_setup,
                                                   vehicle_id_stream,
                                                   release_sensor_stream)
    seg_camera_setup = SegmentedCameraSetup('seg_center_camera',
                                            FLAGS.camera_image_width,
                                            FLAGS.camera_image_height,
                                            transform, FLAGS.camera_fov)
    (segmented_stream,
     _) = pylot.operator_creator.add_camera_driver(seg_camera_setup,
                                                   vehicle_id_stream,
                                                   release_sensor_stream)

    if FLAGS.log_rgb_camera:
        pylot.operator_creator.add_camera_logging(
            center_camera_stream, 'center_camera_logger_operator', 'center')

    if FLAGS.log_segmented_camera:
        pylot.operator_creator.add_camera_logging(
            segmented_stream, 'center_segmented_camera_logger_operator',
            'segmented')

    if FLAGS.log_depth_camera:
        pylot.operator_creator.add_camera_logging(
            depth_camera_stream, 'depth_camera_logger_operator', 'depth')

    imu_stream = None
    if FLAGS.log_imu:
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)
        pylot.operator_creator.add_imu_logging(imu_stream)

    gnss_stream = None
    if FLAGS.log_gnss:
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)
        pylot.operator_creator.add_gnss_logging(gnss_stream)

    if FLAGS.log_pose:
        pylot.operator_creator.add_pose_logging(pose_stream)

    traffic_lights_stream = None
    traffic_light_camera_stream = None
    if FLAGS.log_traffic_lights:
        tl_camera_setup = RGBCameraSetup('traffic_light_camera',
                                         FLAGS.camera_image_width,
                                         FLAGS.camera_image_height, transform,
                                         45)
        (traffic_light_camera_stream, _) = \
            pylot.operator_creator.add_camera_driver(
                tl_camera_setup, vehicle_id_stream, release_sensor_stream)
        pylot.operator_creator.add_camera_logging(
            traffic_light_camera_stream,
            'traffic_light_camera_logger_operator', 'traffic-light')

        tl_seg_camera_setup = SegmentedCameraSetup(
            'traffic_light_segmented_camera', FLAGS.camera_image_width,
            FLAGS.camera_image_height, transform, 45)
        (traffic_light_segmented_camera_stream, _) = \
            pylot.operator_creator.add_camera_driver(
                tl_seg_camera_setup,
                vehicle_id_stream,
                release_sensor_stream)

        tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera',
                                                 FLAGS.camera_image_width,
                                                 FLAGS.camera_image_height,
                                                 transform, 45)
        (traffic_light_depth_camera_stream, _) = \
            pylot.operator_creator.add_camera_driver(
                tl_depth_camera_setup, vehicle_id_stream,
                release_sensor_stream)

        traffic_lights_stream = \
            pylot.operator_creator.add_perfect_traffic_light_detector(
                ground_traffic_lights_stream,
                traffic_light_camera_stream,
                traffic_light_depth_camera_stream,
                traffic_light_segmented_camera_stream,
                pose_stream)
        pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream)

    if FLAGS.log_left_right_cameras:
        (left_camera_stream, right_camera_stream, _,
         _) = pylot.operator_creator.add_left_right_cameras(
             transform, vehicle_id_stream, release_sensor_stream)
        pylot.operator_creator.add_camera_logging(
            left_camera_stream, 'left_camera_logger_operator', 'left')
        pylot.operator_creator.add_camera_logging(
            right_camera_stream, 'right_camera_logger_operator', 'right')

    point_cloud_stream = None
    if FLAGS.log_lidar:
        (point_cloud_stream, _,
         _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream,
                                               release_sensor_stream)
        pylot.operator_creator.add_lidar_logging(point_cloud_stream)

    obstacles_stream = None
    if FLAGS.log_obstacles:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        pylot.operator_creator.add_bounding_box_logging(obstacles_stream)

    if FLAGS.log_multiple_object_tracker:
        pylot.operator_creator.add_multiple_object_tracker_logging(
            obstacles_stream)

    obstacles_tracking_stream = None
    if FLAGS.log_trajectories or FLAGS.log_chauffeur:
        obstacles_tracking_stream = \
            pylot.operator_creator.add_perfect_tracking(
                vehicle_id_stream,
                ground_obstacles_stream,
                pose_stream)
        if FLAGS.log_trajectories:
            pylot.operator_creator.add_trajectory_logging(
                obstacles_tracking_stream)

    top_down_segmented_stream = None
    top_down_camera_setup = None
    if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation:
        top_down_transform = pylot.utils.get_top_down_transform(
            transform, FLAGS.top_down_camera_altitude)
        top_down_seg_cs = SegmentedCameraSetup('top_down_segmented_camera',
                                               FLAGS.camera_image_width,
                                               FLAGS.camera_image_height,
                                               top_down_transform, 90)
        (top_down_segmented_stream, _) = \
            pylot.operator_creator.add_camera_driver(
                top_down_seg_cs,
                vehicle_id_stream,
                release_sensor_stream)

        if FLAGS.log_top_down_segmentation:
            pylot.operator_creator.add_camera_logging(
                top_down_segmented_stream,
                'top_down_segmented_logger_operator', 'top-down-segmented')

        if FLAGS.log_chauffeur:
            top_down_camera_setup = RGBCameraSetup('top_down_rgb_camera',
                                                   FLAGS.camera_image_width,
                                                   FLAGS.camera_image_height,
                                                   top_down_transform, 90)
            (top_down_camera_stream,
             _) = pylot.operator_creator.add_camera_driver(
                 top_down_camera_setup, vehicle_id_stream,
                 release_sensor_stream)
            pylot.operator_creator.add_chauffeur_logging(
                vehicle_id_stream, pose_stream, obstacles_tracking_stream,
                top_down_camera_stream, top_down_segmented_stream,
                top_down_camera_setup)

    perfect_lane_stream = None
    if FLAGS.log_lane_detection_camera:
        perfect_lane_stream = pylot.operator_creator.add_perfect_lane_detector(
            pose_stream, open_drive_stream, center_camera_stream)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimate
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control == 'simulator_auto_pilot':
        # We insert a synchronizing operator that sends back a command when
        # the low watermark progresses on all input stream.
        stream_to_sync_on = center_camera_stream
        if obstacles_tracking_stream is not None:
            stream_to_sync_on = obstacles_tracking_stream
        if traffic_lights_stream is not None:
            stream_to_sync_on = traffic_lights_stream
        if perfect_lane_stream is not None:
            stream_to_sync_on = perfect_lane_stream
        if obstacles_stream is not None:
            stream_to_sync_on = obstacles_stream
        control_stream = pylot.operator_creator.add_synchronizer(
            vehicle_id_stream, stream_to_sync_on)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control=simulator_auto_pilot")

    control_display_stream = None
    streams_to_send_top_on = []
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream=pose_stream,
                camera_stream=center_camera_stream,
                tl_camera_stream=traffic_light_camera_stream,
                depth_stream=depth_camera_stream,
                point_cloud_stream=point_cloud_stream,
                segmentation_stream=segmented_stream,
                imu_stream=imu_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream)
        streams_to_send_top_on += ingest_streams

    # Connect an instance to the simulator to make sure that we can turn the
    # synchronous mode off after the script finishes running.
    client, world = pylot.simulation.utils.get_world(FLAGS.simulator_host,
                                                     FLAGS.simulator_port,
                                                     FLAGS.simulator_timeout)

    # Run the data-flow.
    node_handle = erdos.run_async()

    signal.signal(signal.SIGINT, shutdown)

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    try:
        if pylot.flags.must_visualize():
            pylot.utils.run_visualizer_control_loop(control_display_stream)
        node_handle.wait()
    except KeyboardInterrupt:
        node_handle.shutdown()
        pylot.simulation.utils.set_asynchronous_mode(world)
        if pylot.flags.must_visualize():
            import pygame
            pygame.quit()
Exemple #7
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation(pitch=-15))
    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.simulator_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    # Add sensors.
    center_camera_setup = RGBCameraSetup('center_camera',
                                         FLAGS.camera_image_width,
                                         FLAGS.camera_image_height, transform,
                                         FLAGS.camera_fov)
    (center_camera_stream, notify_rgb_stream) = \
        pylot.operator_creator.add_camera_driver(
            center_camera_setup, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        depth_camera_setup = DepthCameraSetup('depth_center_camera',
                                              FLAGS.camera_image_width,
                                              FLAGS.camera_image_height,
                                              transform, FLAGS.camera_fov)
        (depth_camera_stream, notify_depth_stream) = \
            pylot.operator_creator.add_camera_driver(
                depth_camera_setup, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        segmented_camera_setup = SegmentedCameraSetup(
            'segmented_center_camera', FLAGS.camera_image_width,
            FLAGS.camera_image_height, transform, FLAGS.camera_fov)
        (ground_segmented_stream, notify_segmented_stream) = \
            pylot.operator_creator.add_camera_driver(
                segmented_camera_setup, vehicle_id_stream,
                release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place LiDAR sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None
        lidar_setup = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_stereo':
        (depth_stream, notify_left_camera_stream,
         notify_right_camera_stream) = pylot.component_creator.add_depth(
             transform, vehicle_id_stream, center_camera_setup,
             depth_camera_stream, release_sensor_stream)
        notify_streams.append(notify_left_camera_stream)
        notify_streams.append(notify_right_camera_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    obstacles_stream, perfect_obstacles_stream = \
        pylot.component_creator.add_obstacle_detection(
            center_camera_stream, center_camera_setup, pose_stream,
            depth_stream, depth_camera_stream, ground_segmented_stream,
            ground_obstacles_stream, ground_speed_limit_signs_stream,
            ground_stop_signs_stream, time_to_decision_loop_stream)
    tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                         pylot.utils.Rotation())
    traffic_lights_stream, tl_camera_stream = \
        pylot.component_creator.add_traffic_light_detection(
            tl_transform, vehicle_id_stream, release_sensor_stream,
            pose_stream, depth_stream, ground_traffic_lights_stream,
            time_to_decision_loop_stream)

    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream, open_drive_stream)
    if lane_detection_stream is None:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, center_camera_setup, obstacles_stream,
        depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)

    prediction_stream, prediction_camera_stream, notify_prediction_stream = \
        pylot.component_creator.add_prediction(
            obstacles_tracking_stream, vehicle_id_stream,
            time_to_decision_loop_stream, transform, release_sensor_stream,
            pose_stream, point_cloud_stream, lidar_setup)
    if prediction_stream is None:
        prediction_stream = obstacles_stream
    if notify_prediction_stream:
        notify_streams.append(notify_prediction_stream)

    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))
    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, traffic_lights_stream,
        lane_detection_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if FLAGS.simulator_mode == "pseudo-asynchronous":
        # Add a synchronizer in the pseudo-asynchronous mode.
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
            _pipeline_finish_notify_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
        pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream

    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control,
        vehicle_id_stream, perfect_obstacles_stream)
    control_loop_stream.set(control_stream)

    pylot.component_creator.add_evaluation(vehicle_id_stream, pose_stream,
                                           imu_stream)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, center_camera_stream, tl_camera_stream,
                prediction_camera_stream, depth_camera_stream,
                point_cloud_stream, segmented_stream, imu_stream,
                obstacles_stream, traffic_lights_stream,
                obstacles_tracking_stream, lane_detection_stream,
                prediction_stream, waypoints_stream, control_stream)
        streams_to_send_top_on += ingest_streams

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.simulator_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream
Exemple #8
0
def main(argv):
    """ Computes ground obstacle detection and segmentation decay."""
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(control_loop_stream,
                                                    release_sensor_stream)

    # Add camera sensors.
    rgb_camera_setup = RGBCameraSetup('center_camera',
                                      FLAGS.camera_image_width,
                                      FLAGS.camera_image_height, transform,
                                      FLAGS.camera_fov)
    (center_camera_stream,
     _) = pylot.operator_creator.add_camera_driver(rgb_camera_setup,
                                                   vehicle_id_stream,
                                                   release_sensor_stream)

    depth_camera_setup = DepthCameraSetup('depth_center_camera',
                                          FLAGS.camera_image_width,
                                          FLAGS.camera_image_height, transform,
                                          FLAGS.camera_fov)
    (depth_camera_stream,
     _) = pylot.operator_creator.add_camera_driver(depth_camera_setup,
                                                   vehicle_id_stream,
                                                   release_sensor_stream)

    segmented_camera_setup = SegmentedCameraSetup('segmented_center_camera',
                                                  FLAGS.camera_image_width,
                                                  FLAGS.camera_image_height,
                                                  transform, FLAGS.camera_fov)
    (segmented_stream,
     _) = pylot.operator_creator.add_camera_driver(segmented_camera_setup,
                                                   vehicle_id_stream,
                                                   release_sensor_stream)

    map_stream = None
    if FLAGS.compute_detection_decay:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        map_stream = pylot.operator_creator.add_detection_decay(
            obstacles_stream)

    iou_stream = None
    if FLAGS.compute_segmentation_decay:
        iou_stream = pylot.operator_creator.add_segmentation_decay(
            segmented_stream)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimated
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control == 'simulator_auto_pilot':
        stream_to_sync_on = iou_stream
        if map_stream is not None:
            stream_to_sync_on = map_stream
        op_config = erdos.OperatorConfig(name='synchronizer_operator',
                                         flow_watermarks=False)
        (control_stream, ) = erdos.connect(SynchronizerOperator, op_config,
                                           [stream_to_sync_on], FLAGS)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control=simulator_auto_pilot")

    erdos.run_async()

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Exemple #9
0
def add_traffic_light_detection(tl_transform,
                                vehicle_id_stream,
                                release_sensor_stream,
                                pose_stream=None,
                                depth_stream=None,
                                ground_traffic_lights_stream=None,
                                time_to_decision_stream=None):
    """Adds traffic light detection operators.

    The traffic light detectors use a camera with a narrow field of view.

    If the `--perfect_traffic_light_detection` flag is set, the method adds a
    perfect traffic light detector operator, and returns a stream of perfect
    traffic lights. Otherwise, if the `--traffic_light_detection` flag is
    set it returns a stream of traffic lights detected using a trained model.

    Args:
        tl_transform (:py:class:`~pylot.utils.Transform`): Transform of the
             traffic light camera relative to the ego vehicle.
        vehicle_id_stream (:py:class:`erdos.ReadStream`): A stream on which
            the simulator publishes simulator ego-vehicle id.
        pose_stream (:py:class:`erdos.ReadStream`, optional): A stream
            on which pose info is received.
        depth_stream (:py:class:`erdos.ReadStream`, optional): Stream on
            which point cloud messages or depth frames are received.

    Returns:
        :py:class:`erdos.ReadStream`: Stream on which
        :py:class:`~pylot.perception.messages.ObstaclesMessage` traffic light
        messages are published.
    """
    tl_camera_stream = None
    if FLAGS.traffic_light_detection or FLAGS.perfect_traffic_light_detection:
        logger.debug('Adding traffic light camera...')
        # Only add the TL camera if traffic light detection is enabled.
        tl_camera_setup = RGBCameraSetup('traffic_light_camera',
                                         FLAGS.camera_image_width,
                                         FLAGS.camera_image_height,
                                         tl_transform, 45)
        (tl_camera_stream,
         _) = pylot.operator_creator.add_camera_driver(tl_camera_setup,
                                                       vehicle_id_stream,
                                                       release_sensor_stream)

    traffic_lights_stream = None
    if FLAGS.traffic_light_detection:
        logger.debug('Using traffic light detection...')
        traffic_lights_stream = \
            pylot.operator_creator.add_traffic_light_detector(
                tl_camera_stream, time_to_decision_stream)
        # Adds operator that finds the world locations of the traffic lights.
        traffic_lights_stream = \
            pylot.operator_creator.add_obstacle_location_finder(
                traffic_lights_stream, depth_stream, pose_stream,
                tl_camera_setup)

    if FLAGS.perfect_traffic_light_detection:
        assert (pose_stream is not None
                and ground_traffic_lights_stream is not None)
        logger.debug('Using perfect traffic light detection...')
        # Add segmented and depth cameras with fov 45. These cameras are needed
        # by the perfect traffic light detector.
        tl_depth_camera_setup = DepthCameraSetup('traffic_light_depth_camera',
                                                 FLAGS.camera_image_width,
                                                 FLAGS.camera_image_height,
                                                 tl_transform, 45)
        (tl_depth_camera_stream, _,
         _) = pylot.operator_creator.add_camera_driver(tl_depth_camera_setup,
                                                       vehicle_id_stream,
                                                       release_sensor_stream)

        segmented_tl_camera_setup = SegmentedCameraSetup(
            'traffic_light_segmented_camera', FLAGS.camera_image_width,
            FLAGS.camera_image_height, tl_transform, 45)
        (tl_segmented_camera_stream, _) = \
            pylot.operator_creator.add_camera_driver(
                segmented_tl_camera_setup, vehicle_id_stream,
                release_sensor_stream)

        traffic_lights_stream = \
            pylot.operator_creator.add_perfect_traffic_light_detector(
                ground_traffic_lights_stream, tl_camera_stream,
                tl_depth_camera_stream, tl_segmented_camera_stream,
                pose_stream)

    if FLAGS.simulator_traffic_light_detection:
        logger.debug('Using ground traffic lights from the simulator...')
        traffic_lights_stream = ground_traffic_lights_stream

    return traffic_lights_stream, tl_camera_stream