Exemple #1
0
def create_data_flow():
    """Creates a data-flow which process input data, and outputs commands.

    Returns:
        A tuple of streams. The first N - 1 streams are input streams on
        which the agent can send input data. The last stream is the control
        stream on which the agent receives control commands.
    """
    can_bus_stream = erdos.IngestStream()
    open_drive_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()
    # Currently, we do not use the scene layout information.
    # scene_layout_stream = erdos.IngestStream()
    ground_obstacles_stream = erdos.IngestStream()
    traffic_lights_stream = erdos.IngestStream()

    # Add waypoint planner.
    waypoints_stream = pylot.operator_creator.add_waypoint_planning(
        can_bus_stream, open_drive_stream, global_trajectory_stream,
        ground_obstacles_stream, traffic_lights_stream, None)
    control_stream = pylot.operator_creator.add_pid_agent(
        can_bus_stream, waypoints_stream)
    extract_control_stream = erdos.ExtractStream(control_stream)

    return (can_bus_stream, global_trajectory_stream, ground_obstacles_stream,
            traffic_lights_stream, open_drive_stream, extract_control_stream)
Exemple #2
0
def create_data_flow():
    """ Create the challenge data-flow graph."""
    track = get_track()
    time_to_decision_loop_stream = erdos.LoopStream()
    camera_setups = create_camera_setups(track)
    camera_streams = {}
    for name in camera_setups:
        camera_streams[name] = erdos.IngestStream()
    pose_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()
    open_drive_stream = erdos.IngestStream()

    if (track == Track.ALL_SENSORS
            or track == Track.ALL_SENSORS_HDMAP_WAYPOINTS):
        point_cloud_stream = erdos.IngestStream()
    else:
        point_cloud_stream = pylot.operator_creator.add_depth_estimation(
            camera_streams[LEFT_CAMERA_NAME],
            camera_streams[RIGHT_CAMERA_NAME],
            camera_setups[CENTER_CAMERA_NAME])

    obstacles_stream = pylot.operator_creator.add_obstacle_detection(
        camera_streams[CENTER_CAMERA_NAME], time_to_decision_loop_stream)[0]
    # Adds an operator that finds the world locations of the obstacles.
    obstacles_stream = pylot.operator_creator.add_obstacle_location_finder(
        obstacles_stream, point_cloud_stream, pose_stream,
        camera_streams[CENTER_CAMERA_NAME], camera_setups[CENTER_CAMERA_NAME])

    traffic_lights_stream = pylot.operator_creator.add_traffic_light_detector(
        camera_streams[TL_CAMERA_NAME])
    # Adds an operator that finds the world locations of the traffic lights.
    traffic_lights_stream = \
        pylot.operator_creator.add_obstacle_location_finder(
            traffic_lights_stream, point_cloud_stream, pose_stream,
            camera_streams[TL_CAMERA_NAME], camera_setups[TL_CAMERA_NAME])

    waypoints_stream = pylot.operator_creator.add_waypoint_planning(
        pose_stream, open_drive_stream, global_trajectory_stream,
        obstacles_stream, traffic_lights_stream, None)

    if FLAGS.visualize_rgb_camera:
        pylot.operator_creator.add_camera_visualizer(
            camera_streams[CENTER_CAMERA_NAME], CENTER_CAMERA_NAME)

    control_stream = pylot.operator_creator.add_pid_agent(
        pose_stream, waypoints_stream)
    extract_control_stream = erdos.ExtractStream(control_stream)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    return (camera_streams, pose_stream, global_trajectory_stream,
            open_drive_stream, point_cloud_stream, extract_control_stream)
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--payload', default=8, type=int)
    parser.add_argument('-s', '--scenario', default="dataflow")
    parser.add_argument('-n', '--name', default="test")
    parser.add_argument('-t', '--transport', default="tcp")
    parser.add_argument('-g', '--graph-file')
    parser.add_argument('-o', '--operators', action='store_true')
    args = parser.parse_args()

    # creating Zenoh.net session
    #session = zenoh.net.open({})
    """Creates and runs the dataflow graph."""

    if args.operators:
        (count_stream, ) = erdos.connect(ZenohRecvOp,
                                         erdos.OperatorConfig(), [],
                                         size=args.payload)
        erdos.connect(CallbackOp,
                      erdos.OperatorConfig(), [count_stream],
                      size=args.payload,
                      scenario=args.scenario,
                      name=args.name,
                      transport=args.transport)
        if args.graph_file is not None:
            erdos.run(args.graph_file)
        else:
            erdos.run()
    else:
        #creating Zenoh.net session
        session = zenoh.net.open({})

        ingest_stream = erdos.IngestStream()

        erdos.connect(CallbackOp,
                      erdos.OperatorConfig(), [ingest_stream],
                      size=args.payload,
                      scenario=args.scenario,
                      name=args.name,
                      transport=args.transport)

        if args.graph_file is not None:
            erdos.run_async(args.graph_file)
        else:
            erdos.run_async()

        def listener(sample):
            msg = pickle.loads(sample.payload)
            ingest_stream.send(msg)

        # Creating subscriber info
        sub_info = SubInfo(Reliability.Reliable, SubMode.Push)
        # declaring subscriber
        sub = session.declare_subscriber(f'/thr/test/{args.payload}', sub_info,
                                         listener)

        while True:
            time.sleep(60)
Exemple #4
0
def create_data_flow():
    """ Create the challenge data-flow graph."""
    track = get_track()
    camera_setups = create_camera_setups(track)
    camera_streams = {}
    for name in camera_setups:
        camera_streams[name] = erdos.IngestStream()
    can_bus_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()
    open_drive_stream = erdos.IngestStream()
    if track != Track.ALL_SENSORS_HDMAP_WAYPOINTS:
        # We do not have access to the open drive map. Send top watermark.
        open_drive_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(coordinates=[sys.maxsize])))

    if (track == Track.ALL_SENSORS
            or track == Track.ALL_SENSORS_HDMAP_WAYPOINTS):
        point_cloud_stream = erdos.IngestStream()
    else:
        point_cloud_stream = pylot.operator_creator.add_depth_estimation(
            camera_streams[LEFT_CAMERA_NAME],
            camera_streams[RIGHT_CAMERA_NAME],
            camera_setups[CENTER_CAMERA_NAME])

    obstacles_stream = pylot.operator_creator.add_obstacle_detection(
        camera_streams[CENTER_CAMERA_NAME])[0]
    traffic_lights_stream = pylot.operator_creator.add_traffic_light_detector(
        camera_streams[TL_CAMERA_NAME])

    waypoints_stream = pylot.operator_creator.add_waypoint_planning(
        can_bus_stream, open_drive_stream, global_trajectory_stream, None)

    if FLAGS.visualize_rgb_camera:
        pylot.operator_creator.add_camera_visualizer(
            camera_streams[CENTER_CAMERA_NAME], CENTER_CAMERA_NAME)

    control_stream = pylot.operator_creator.add_pylot_agent(
        can_bus_stream, waypoints_stream, traffic_lights_stream,
        obstacles_stream, point_cloud_stream, open_drive_stream,
        camera_setups[CENTER_CAMERA_NAME])
    extract_control_stream = erdos.ExtractStream(control_stream)
    return (camera_streams, can_bus_stream, global_trajectory_stream,
            open_drive_stream, point_cloud_stream, extract_control_stream)
Exemple #5
0
def main():
    # Create the first dataflow.
    ingest_stream = erdos.IngestStream()
    (map_stream, ) = erdos.connect(map.Map,
                                   erdos.OperatorConfig(name="Double"),
                                   [ingest_stream],
                                   function=double)
    extract_stream = erdos.ExtractStream(map_stream)
    node_handle = erdos.run_async()

    for t in range(5):
        send_msg = erdos.Message(erdos.Timestamp(coordinates=[t]), t)
        ingest_stream.send(send_msg)
        print("IngestStream: sent {send_msg}".format(send_msg=send_msg))
        recv_msg = extract_stream.read()
        print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))

        time.sleep(1)

    node_handle.shutdown()
    erdos.reset()

    # Create a new dataflow.
    ingest_stream = erdos.IngestStream()
    (map_stream, ) = erdos.connect(map.Map,
                                   erdos.OperatorConfig(name="Square"),
                                   [ingest_stream],
                                   function=square)
    extract_stream = erdos.ExtractStream(map_stream)
    node_handle = erdos.run_async()

    for t in range(5):
        send_msg = erdos.Message(erdos.Timestamp(coordinates=[t]), t)
        ingest_stream.send(send_msg)
        print("IngestStream: sent {send_msg}".format(send_msg=send_msg))
        recv_msg = extract_stream.read()
        print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))

        time.sleep(1)

    node_handle.shutdown()
Exemple #6
0
def main():
    ingest_stream = erdos.IngestStream()
    (s, ) = erdos.connect(NoopOp, erdos.OperatorConfig(), [ingest_stream])
    extract_stream = erdos.ExtractStream(s)

    erdos.run_async()

    timestamp = erdos.Timestamp(is_top=True)
    send_msg = erdos.WatermarkMessage(timestamp)
    print("IngestStream: sending {send_msg}".format(send_msg=send_msg))
    ingest_stream.send(send_msg)
    assert ingest_stream.is_closed()

    recv_msg = extract_stream.read()
    print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))
    assert recv_msg.is_top
    assert extract_stream.is_closed()
Exemple #7
0
def main():
    ingest_stream = erdos.IngestStream()
    (square_stream, ) = erdos.connect(Map,
                                      erdos.OperatorConfig(), [ingest_stream],
                                      function=square_msg)
    extract_stream = erdos.ExtractStream(square_stream)

    erdos.run_async()

    count = 0
    while True:
        timestamp = erdos.Timestamp(coordinates=[count])
        send_msg = erdos.Message(timestamp, count)
        print("IngestStream: sending {send_msg}".format(send_msg=send_msg))
        ingest_stream.send(send_msg)
        recv_msg = extract_stream.read()
        print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))

        count += 1
        time.sleep(1)
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--payload', default=8, type=int)
    parser.add_argument('-s', '--scenario', default="dataflow")
    parser.add_argument('-n', '--name', default="test")
    parser.add_argument('-i', '--interveal', default=1, type=int)
    parser.add_argument('-t', '--transport', default="tcp")
    parser.add_argument('-g', '--graph-file')

    args = parser.parse_args()
    """Creates and runs the dataflow graph."""
    ingest_stream = erdos.IngestStream()
    (pong_stream, ) = erdos.connect(PongOp, erdos.OperatorConfig(),
                                    [ingest_stream])
    extract_stream = erdos.ExtractStream(pong_stream)

    if args.graph_file is not None:
        erdos.run_async(args.graph_file)
    else:
        erdos.run_async()

    count = 0
    payload = '0' * args.payload

    while True:
        msg = erdos.Message(erdos.Timestamp(coordinates=[count]), payload)
        t0 = datetime.datetime.now()
        ingest_stream.send(msg)
        #print("PingOp: sending {msg}".format(msg=msg))
        count += 1
        data = extract_stream.read()
        t1 = datetime.datetime.now()
        #print("PingOp: received {msg}".format(msg=data))
        t = t1 - t0
        print(
            f"erdos-{args.transport},{args.scenario},latency,{args.name},{args.payload},{data.timestamp.coordinates[0]},{t.microseconds}"
        )
        time.sleep(args.interveal)
Exemple #9
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.carla_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
    notify_streams = []

    # Create carla operator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(control_loop_stream,
                                                release_sensor_stream)

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place Lidar sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream,
         _) = pylot.operator_creator.add_imu(transform, vehicle_id_stream)

    obstacles_stream = \
        pylot.component_creator.add_obstacle_detection(
            center_camera_stream, rgb_camera_setup, pose_stream,
            depth_stream, depth_camera_stream, ground_segmented_stream,
            ground_obstacles_stream, ground_speed_limit_signs_stream,
            ground_stop_signs_stream, time_to_decision_loop_stream)
    traffic_lights_stream = \
        pylot.component_creator.add_traffic_light_detection(
            transform, vehicle_id_stream, release_sensor_stream, pose_stream,
            depth_stream, ground_traffic_lights_stream)

    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, rgb_camera_setup, obstacles_stream, depth_stream,
        vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    depth_stream = pylot.component_creator.add_depth(transform,
                                                     vehicle_id_stream,
                                                     rgb_camera_setup,
                                                     depth_camera_stream)

    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)

    prediction_stream = pylot.component_creator.add_prediction(
        obstacles_tracking_stream, vehicle_id_stream, transform,
        release_sensor_stream, pose_stream, point_cloud_stream, lidar_setup)

    # Add planning operators.
    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))
    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, center_camera_stream,
        obstacles_stream, traffic_lights_stream, open_drive_stream,
        global_trajectory_stream, time_to_decision_loop_stream)

    # Add a synchronizer in the pseudo-asynchronous mode.
    if FLAGS.carla_mode == "pseudo-asynchronous":
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream

    # Add the behaviour planning and control operator.
    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control)
    control_loop_stream.set(control_stream)

    if FLAGS.evaluation:
        # Add the collision sensor.
        collision_stream = pylot.operator_creator.add_collision_sensor(
            vehicle_id_stream)

        # Add the lane invasion sensor.
        lane_invasion_stream = pylot.operator_creator.add_lane_invasion_sensor(
            vehicle_id_stream)

        # Add the traffic light invasion sensor.
        traffic_light_invasion_stream = \
            pylot.operator_creator.add_traffic_light_invasion_sensor(
                vehicle_id_stream, pose_stream)

        # Add the evaluation logger.
        pylot.operator_creator.add_eval_metric_logging(
            collision_stream, lane_invasion_stream,
            traffic_light_invasion_stream, imu_stream, pose_stream,
            obstacles_stream)

        # Add control evaluation logging operator.
        pylot.operator_creator.add_control_evaluation(
            pose_stream_for_control, waypoints_stream_for_control)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    pylot.operator_creator.add_sensor_visualizers(center_camera_stream,
                                                  depth_camera_stream,
                                                  point_cloud_stream,
                                                  ground_segmented_stream,
                                                  imu_stream, pose_stream)
    erdos.run_async()

    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.carla_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Exemple #10
0
def main(argv):
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    pipeline_finish_notify_stream = erdos.IngestStream()
    # Create an operator that connects to the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream, release_sensor_stream,
        pipeline_finish_notify_stream)

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (depth_camera_stream, _,
     depth_camera_setup) = pylot.operator_creator.add_depth_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (segmented_stream, _,
     _) = pylot.operator_creator.add_segmented_camera(transform,
                                                      vehicle_id_stream,
                                                      release_sensor_stream)

    if FLAGS.log_rgb_camera:
        pylot.operator_creator.add_camera_logging(
            center_camera_stream, 'center_camera_logger_operator', 'center-')

    if FLAGS.log_segmented_camera:
        pylot.operator_creator.add_camera_logging(
            segmented_stream, 'center_segmented_camera_logger_operator',
            'segmented-')

    if FLAGS.log_depth_camera:
        pylot.operator_creator.add_camera_logging(
            depth_camera_stream, 'depth_camera_logger_operator', 'depth-')

    imu_stream = None
    if FLAGS.log_imu:
        (imu_stream,
         _) = pylot.operator_creator.add_imu(transform, vehicle_id_stream)
        pylot.operator_creator.add_imu_logging(imu_stream)

    traffic_lights_stream = None
    traffic_light_camera_stream = None
    if FLAGS.log_traffic_lights:
        (traffic_light_camera_stream, _,
         traffic_light_camera_setup) = pylot.operator_creator.add_rgb_camera(
             transform, vehicle_id_stream, release_sensor_stream,
             'traffic_light_camera', 45)
        pylot.operator_creator.add_camera_logging(
            traffic_light_camera_stream,
            'traffic_light_camera_logger_operator', 'traffic-light-')
        (traffic_light_segmented_camera_stream, _, _) = \
            pylot.operator_creator.add_segmented_camera(
                transform,
                vehicle_id_stream,
                release_sensor_stream,
                'traffic_light_segmented_camera',
                45)
        (traffic_light_depth_camera_stream, _, _) = \
            pylot.operator_creator.add_depth_camera(
                transform, vehicle_id_stream, release_sensor_stream,
                'traffic_light_depth_camera', 45)
        traffic_lights_stream = \
            pylot.operator_creator.add_perfect_traffic_light_detector(
                ground_traffic_lights_stream,
                traffic_light_camera_stream,
                traffic_light_depth_camera_stream,
                traffic_light_segmented_camera_stream,
                pose_stream)
        pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream)

    if FLAGS.log_left_right_cameras:
        (left_camera_stream, right_camera_stream, _,
         _) = pylot.operator_creator.add_left_right_cameras(
             transform, vehicle_id_stream, release_sensor_stream)
        pylot.operator_creator.add_camera_logging(
            left_camera_stream, 'left_camera_logger_operator', 'left-')
        pylot.operator_creator.add_camera_logging(
            right_camera_stream, 'right_camera_logger_operator', 'right-')

    point_cloud_stream = None
    if FLAGS.log_lidar:
        (point_cloud_stream, _,
         _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream,
                                               release_sensor_stream)
        pylot.operator_creator.add_lidar_logging(point_cloud_stream)

    obstacles_stream = None
    if FLAGS.log_obstacles:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        pylot.operator_creator.add_bounding_box_logging(obstacles_stream)

    if FLAGS.log_multiple_object_tracker:
        pylot.operator_creator.add_multiple_object_tracker_logging(
            obstacles_stream)

    obstacles_tracking_stream = None
    if FLAGS.log_trajectories or FLAGS.log_chauffeur:
        obstacles_tracking_stream = \
            pylot.operator_creator.add_perfect_tracking(
                vehicle_id_stream,
                ground_obstacles_stream,
                pose_stream)
        if FLAGS.log_trajectories:
            pylot.operator_creator.add_trajectory_logging(
                obstacles_tracking_stream)

    top_down_segmented_stream = None
    top_down_camera_setup = None
    if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation:
        top_down_transform = pylot.utils.get_top_down_transform(
            transform, FLAGS.top_down_camera_altitude)
        (top_down_segmented_stream, _, _) = \
            pylot.operator_creator.add_segmented_camera(
                top_down_transform,
                vehicle_id_stream,
                release_sensor_stream,
                name='top_down_segmented_camera',
                fov=90)

        if FLAGS.log_top_down_segmentation:
            pylot.operator_creator.add_camera_logging(
                top_down_segmented_stream,
                'top_down_segmented_logger_operator', 'top-down-segmented-')

        if FLAGS.log_chauffeur:
            (top_down_camera_stream,
             top_down_camera_setup) = \
                pylot.operator_creator.add_rgb_camera(
                    top_down_transform,
                    vehicle_id_stream,
                    name='top_down_rgb_camera',
                    fov=90)
            pylot.operator_creator.add_chauffeur_logging(
                vehicle_id_stream, pose_stream, obstacles_tracking_stream,
                top_down_camera_stream, top_down_segmented_stream,
                top_down_camera_setup)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimate
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control == 'simulator_auto_pilot':
        # We insert a synchronizing operator that sends back a command when
        # the low watermark progresses on all input stream.
        stream_to_sync_on = center_camera_stream
        if obstacles_tracking_stream is not None:
            stream_to_sync_on = obstacles_tracking_stream
        if traffic_lights_stream is not None:
            stream_to_sync_on = traffic_lights_stream
        if obstacles_stream is not None:
            stream_to_sync_on = obstacles_stream
        control_stream = pylot.operator_creator.add_synchronizer(
            vehicle_id_stream, stream_to_sync_on)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control=simulator_auto_pilot")

    control_display_stream = None
    streams_to_send_top_on = []
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream=pose_stream,
                camera_stream=center_camera_stream,
                tl_camera_stream=traffic_light_camera_stream,
                depth_stream=depth_camera_stream,
                point_cloud_stream=point_cloud_stream,
                segmentation_stream=segmented_stream,
                imu_stream=imu_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream)
        streams_to_send_top_on += ingest_streams

    # Connect an instance to the simulator to make sure that we can turn the
    # synchronous mode off after the script finishes running.
    client, world = pylot.simulation.utils.get_world(FLAGS.simulator_host,
                                                     FLAGS.simulator_port,
                                                     FLAGS.simulator_timeout)

    # Run the data-flow.
    node_handle = erdos.run_async()

    signal.signal(signal.SIGINT, shutdown)

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    try:
        if pylot.flags.must_visualize():
            pylot.utils.run_visualizer_control_loop(control_display_stream)
        node_handle.wait()
    except KeyboardInterrupt:
        node_handle.shutdown()
        pylot.simulation.utils.set_asynchronous_mode(world)
        if pylot.flags.must_visualize():
            import pygame
            pygame.quit()
Exemple #11
0
def create_data_flow():
    """Creates a dataflow graph of operators.

    This is the place to add other operators (e.g., other detectors,
    behavior planning).
    """
    streams_to_send_top_on = []
    camera_setups = create_camera_setups()
    # Creates a dataflow stream for each camera.
    camera_streams = {}
    for name in camera_setups:
        camera_streams[name] = erdos.IngestStream()
    # Creates a stream on which the agent sends the high-level route the
    # agent must follow in the challenge.
    global_trajectory_stream = erdos.IngestStream()
    # Creates a stream on which the agent sends the open drive stream it
    # receives when it executes in the MAP track.
    open_drive_stream = erdos.IngestStream()
    # Creates a stream on which the agent sends point cloud messages it
    # receives from the LiDAR sensor.
    point_cloud_stream = erdos.IngestStream()
    imu_stream = erdos.IngestStream()
    gnss_stream = erdos.IngestStream()
    route_stream = erdos.IngestStream()
    time_to_decision_loop_stream = erdos.LoopStream()

    if FLAGS.localization:
        # Pylot localization is enabled. Add the localization operator to
        # the dataflow. The operator receives GNSS and IMU messages, and
        # uses an Extended Kalman Filter to compute poses.
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, route_stream)
    else:
        # The agent either directly forwards the poses it receives from
        # the challenge, which are noisy, or the perfect poses if the
        # --perfect_localization flag is set.
        pose_stream = erdos.IngestStream()

    # Stream on which the obstacles are sent when the agent is using perfect
    # detection.
    perfect_obstacles_stream = erdos.IngestStream()
    if FLAGS.simulator_obstacle_detection:
        # Execute with perfect perception. In this configuration, the agent
        # directly gets the location of the other agents from the simulator.
        # This configuration is meant for testing and debugging.
        obstacles_stream = perfect_obstacles_stream
    elif any('efficientdet' in model
             for model in FLAGS.obstacle_detection_model_names):
        # Add an operator that runs EfficientDet to detect obstacles.
        obstacles_stream = pylot.operator_creator.\
            add_efficientdet_obstacle_detection(
                camera_streams[CENTER_CAMERA_NAME],
                time_to_decision_loop_stream)[0]
        if not (FLAGS.evaluate_obstacle_detection
                or FLAGS.evaluate_obstacle_tracking):
            streams_to_send_top_on.append(perfect_obstacles_stream)
    else:
        # Add an operator that uses one of the Tensorflow model zoo
        # detection models. By default, we use FasterRCNN.
        obstacles_stream = pylot.operator_creator.add_obstacle_detection(
            camera_streams[CENTER_CAMERA_NAME],
            time_to_decision_loop_stream)[0]
        if not (FLAGS.evaluate_obstacle_detection
                or FLAGS.evaluate_obstacle_tracking):
            streams_to_send_top_on.append(perfect_obstacles_stream)

    # Stream on which the traffic lights are sent when the agent is
    # using perfect traffic light detection.
    perfect_traffic_lights_stream = erdos.IngestStream()
    if FLAGS.simulator_traffic_light_detection:
        # In this debug configuration, the agent is using perfectly located
        # traffic lights it receives directly from the simulator. Therefore,
        # there's no need to a traffic light detector.
        traffic_lights_stream = perfect_traffic_lights_stream
        camera_streams[TL_CAMERA_NAME] = erdos.IngestStream()
        streams_to_send_top_on.append(camera_streams[TL_CAMERA_NAME])
    else:
        # Adds a traffic light detector operator, which uses the camera with
        # the small fov.
        traffic_lights_stream = \
            pylot.operator_creator.add_traffic_light_detector(
                camera_streams[TL_CAMERA_NAME], time_to_decision_loop_stream)
        # Adds an operator that finds the world location of the traffic lights.
        # The operator synchronizes LiDAR point cloud readings with camera
        # frames, and uses them to compute the depth to traffic light bounding
        # boxes.
        traffic_lights_stream = \
            pylot.operator_creator.add_obstacle_location_finder(
                traffic_lights_stream, point_cloud_stream, pose_stream,
                camera_setups[TL_CAMERA_NAME])
        # We do not send perfectly located traffic lights in this
        # configuration. Therefore, ensure that the stream is "closed"
        # (i.e., send a top watermark)
        streams_to_send_top_on.append(perfect_traffic_lights_stream)

    vehicle_id_stream = erdos.IngestStream()
    if not (FLAGS.perfect_obstacle_tracking or FLAGS.perfect_localization):
        # The vehicle_id_stream is only used when perfect localization
        # or perfect obstacle tracking are enabled.
        streams_to_send_top_on.append(vehicle_id_stream)

    # Adds an operator for tracking detected agents. The operator uses the
    # frames from the center camera, and the bounding boxes found by the
    # obstacle detector operator.
    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        camera_streams[CENTER_CAMERA_NAME],
        camera_setups[CENTER_CAMERA_NAME],
        obstacles_stream,
        depth_stream=point_cloud_stream,
        vehicle_id_stream=vehicle_id_stream,
        pose_stream=pose_stream,
        ground_obstacles_stream=perfect_obstacles_stream,
        time_to_decision_stream=time_to_decision_loop_stream)

    if FLAGS.execution_mode == 'challenge-sensors':
        # The agent is running is sensors-only track. Therefore, we need
        # to add a lane detector because the agent does not have access to
        # the OpenDrive map.
        lanes_stream = pylot.operator_creator.add_lanenet_detection(
            camera_streams[LANE_CAMERA_NAME])
    else:
        # The lanes stream is not used when running in the Map track.
        # We add the stream to the list of streams that are not used, and
        # must be manually "closed" (i.e., send a top watermark).
        lanes_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lanes_stream)

    # The agent uses a linear predictor to compute future trajectories
    # of the other agents.
    prediction_stream, _, _ = pylot.component_creator.add_prediction(
        obstacles_tracking_stream,
        vehicle_id_stream,
        time_to_decision_loop_stream,
        pose_stream=pose_stream)

    # Adds a planner to the agent. The planner receives the pose of
    # the ego-vehicle, detected traffic lights, predictions for other
    # agents, the route the agent must follow, and the open drive data if
    # the agent is executing in the Map track, or detected lanes if it is
    # executing in the Sensors-only track.
    waypoints_stream = pylot.component_creator.add_planning(
        None, pose_stream, prediction_stream, traffic_lights_stream,
        lanes_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if pylot.flags.must_visualize():
        # Adds a visualization dataflow operator if any of the
        # --visualize_* is enabled. The operator creates a pygame window
        # in which the different sensors, detections can be visualized.
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream=pose_stream,
                camera_stream=camera_streams[CENTER_CAMERA_NAME],
                tl_camera_stream=camera_streams[TL_CAMERA_NAME],
                point_cloud_stream=point_cloud_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream,
                waypoints_stream=waypoints_stream,
                lane_detection_stream=lanes_stream,
                prediction_stream=prediction_stream)
        streams_to_send_top_on += ingest_streams
    else:
        control_display_stream = None

    # Adds a controller which tries to follow the waypoints computed
    # by the planner.
    control_stream = pylot.component_creator.add_control(
        pose_stream, waypoints_stream)
    # The controller returns a stream of commands (i.e., throttle, steer)
    # from which the agent can read the command it must return to the
    # challenge.
    extract_control_stream = erdos.ExtractStream(control_stream)

    pylot.component_creator.add_evaluation(vehicle_id_stream, pose_stream,
                                           imu_stream)

    # Operator that computes how much time each component gets to execute.
    # This is needed in Pylot, but can be ignored when running in challenge
    # mode.
    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    return (camera_streams, pose_stream, route_stream,
            global_trajectory_stream, open_drive_stream, point_cloud_stream,
            imu_stream, gnss_stream, extract_control_stream,
            control_display_stream, perfect_obstacles_stream,
            perfect_traffic_lights_stream, vehicle_id_stream,
            streams_to_send_top_on)
Exemple #12
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation(pitch=-15))
    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    # time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.carla_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and CARLA.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    ######################
    # Add sensors.
    ######################
    (center_camera_stream, notify_rgb_stream,
     center_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place LiDAR sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None
        lidar_setup = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    ######################
    # Add detection.
    ######################

    # obstacles_stream, perfect_obstacles_stream = \
    #     pylot.component_creator.add_obstacle_detection(
    #         center_camera_stream, center_camera_setup, pose_stream,
    #         depth_stream, depth_camera_stream, ground_segmented_stream,
    #         ground_obstacles_stream, ground_speed_limit_signs_stream,
    #         ground_stop_signs_stream, time_to_decision_loop_stream)
    # tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
    #                                      pylot.utils.Rotation())
    # traffic_lights_stream, tl_camera_stream = \
    #     pylot.component_creator.add_traffic_light_detection(
    #         tl_transform, vehicle_id_stream, release_sensor_stream,
    #         pose_stream, depth_stream, ground_traffic_lights_stream)
    #
    # lane_detection_stream = pylot.component_creator.add_lane_detection(
    #     center_camera_stream, pose_stream, open_drive_stream)
    # if lane_detection_stream is None:
    #     lane_detection_stream = erdos.IngestStream()
    #     streams_to_send_top_on.append(lane_detection_stream)
    #
    # obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
    #     center_camera_stream, center_camera_setup, obstacles_stream,
    #     depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
    #     time_to_decision_loop_stream)
    #
    # segmented_stream = pylot.component_creator.add_segmentation(
    #     center_camera_stream, ground_segmented_stream)
    #
    # depth_stream = pylot.component_creator.add_depth(transform,
    #                                                  vehicle_id_stream,
    #                                                  center_camera_setup,
    #                                                  depth_camera_stream)

    ######################
    # Add prediction?.
    ######################

    # if FLAGS.fusion:
    #     pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
    #                                       depth_stream,
    #                                       ground_obstacles_stream)
    #
    # prediction_stream, prediction_camera_stream, notify_prediction_stream = \
    #     pylot.component_creator.add_prediction(
    #         obstacles_tracking_stream, vehicle_id_stream, transform,
    #         release_sensor_stream, pose_stream, point_cloud_stream,
    #         lidar_setup)
    # if prediction_stream is None:
    #     prediction_stream = obstacles_stream
    # if notify_prediction_stream:
    #     notify_streams.append(notify_prediction_stream)

    ######################
    # Add planning?.
    ######################

    # goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
    #                                      float(FLAGS.goal_location[1]),
    #                                      float(FLAGS.goal_location[2]))
    # waypoints_stream = pylot.component_creator.add_planning(
    #     goal_location, pose_stream, prediction_stream, traffic_lights_stream,
    #     lane_detection_stream, open_drive_stream, global_trajectory_stream,
    #     time_to_decision_loop_stream)
    #
    # if FLAGS.carla_mode == "pseudo-asynchronous":
    #     # Add a synchronizer in the pseudo-asynchronous mode.
    #     (
    #         waypoints_stream_for_control,
    #         pose_stream_for_control,
    #         sensor_ready_stream,
    #         _pipeline_finish_notify_stream,
    #     ) = pylot.operator_creator.add_planning_pose_synchronizer(
    #         waypoints_stream, pose_stream_for_control, pose_stream,
    #         *notify_streams)
    #     release_sensor_stream.set(sensor_ready_stream)
    #     pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    # else:
    #     waypoints_stream_for_control = waypoints_stream
    #     pose_stream_for_control = pose_stream

    ######################
    # Add control.
    ######################

    # control_stream = pylot.component_creator.add_control(
    #     pose_stream_for_control, waypoints_stream_for_control,
    #     vehicle_id_stream, perfect_obstacles_stream)

    from test_operator import TestOperator
    op_config = erdos.OperatorConfig(name='test_operator',
                                     flow_watermarks=False,
                                     log_file_name=FLAGS.log_file_name,
                                     csv_log_file_name=FLAGS.csv_log_file_name,
                                     profile_file_name=FLAGS.profile_file_name)
    [control_stream] = erdos.connect(TestOperator, op_config,
                                     [center_camera_stream], FLAGS)
    control_loop_stream.set(control_stream)

    # add_evaluation_operators(vehicle_id_stream, pose_stream, imu_stream,
    #                          pose_stream_for_control,
    #                          waypoints_stream_for_control)

    ######################
    # Add ?.
    ######################
    # time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
    #     pose_stream, obstacles_stream)
    # time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    ###########################################################################
    # if pylot.flags.must_visualize():
    #     control_display_stream, ingest_streams = \
    #         pylot.operator_creator.add_visualizer(
    #             pose_stream, center_camera_stream, tl_camera_stream,
    #             prediction_camera_stream, depth_camera_stream,
    #             point_cloud_stream, segmented_stream, imu_stream,
    #             obstacles_stream, traffic_lights_stream,
    #             obstacles_tracking_stream, lane_detection_stream,
    #             prediction_stream, waypoints_stream, control_stream)
    #     streams_to_send_top_on += ingest_streams
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, center_camera_stream, None,
                None, depth_camera_stream,
                point_cloud_stream, None, imu_stream,
                None, None,
                None, None,
                None, None, None)
        streams_to_send_top_on += ingest_streams
    ############################################################################

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.carla_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream
Exemple #13
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation(pitch=-15))
    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.simulator_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     center_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place LiDAR sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None
        lidar_setup = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    obstacles_stream, perfect_obstacles_stream, obstacles_error_stream = \
        pylot.component_creator.add_obstacle_detection(
            center_camera_stream, center_camera_setup, pose_stream,
            depth_stream, depth_camera_stream, ground_segmented_stream,
            ground_obstacles_stream, ground_speed_limit_signs_stream,
            ground_stop_signs_stream, time_to_decision_loop_stream)

    tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                         pylot.utils.Rotation())
    traffic_lights_stream, tl_camera_stream = \
        pylot.component_creator.add_traffic_light_detection(
            tl_transform, vehicle_id_stream, release_sensor_stream,
            pose_stream, depth_stream, ground_traffic_lights_stream)

    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream, open_drive_stream)
    if lane_detection_stream is None:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, center_camera_setup, obstacles_stream,
        depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    depth_stream = pylot.component_creator.add_depth(transform,
                                                     vehicle_id_stream,
                                                     center_camera_setup,
                                                     depth_camera_stream)

    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)

    prediction_stream, prediction_camera_stream, notify_prediction_stream = \
        pylot.component_creator.add_prediction(
            obstacles_tracking_stream, vehicle_id_stream, transform,
            release_sensor_stream, pose_stream, point_cloud_stream,
            lidar_setup)
    if prediction_stream is None:
        prediction_stream = obstacles_stream
    if notify_prediction_stream:
        notify_streams.append(notify_prediction_stream)

    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))
    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, traffic_lights_stream,
        lane_detection_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if FLAGS.simulator_mode == "pseudo-asynchronous":
        # Add a synchronizer in the pseudo-asynchronous mode.
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
            _pipeline_finish_notify_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
        pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream

    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control,
        vehicle_id_stream, perfect_obstacles_stream)
    control_loop_stream.set(control_stream)

    add_evaluation_operators(vehicle_id_stream, pose_stream, imu_stream,
                             pose_stream_for_control,
                             waypoints_stream_for_control)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, center_camera_stream, tl_camera_stream,
                prediction_camera_stream, depth_camera_stream,
                point_cloud_stream, segmented_stream, imu_stream,
                obstacles_stream, obstacles_error_stream, traffic_lights_stream,
                obstacles_tracking_stream, lane_detection_stream,
                prediction_stream, waypoints_stream, control_stream)
        streams_to_send_top_on += ingest_streams

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.simulator_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream
Exemple #14
0
def main(argv):
    """ Computes ground obstacle detection and segmentation decay."""
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    # Create carla operator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(control_loop_stream,
                                                release_sensor_stream)

    # Add camera sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (depth_camera_stream, notify_depth_stream,
     depth_camera_setup) = pylot.operator_creator.add_depth_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (segmented_stream, _,
     _) = pylot.operator_creator.add_segmented_camera(transform,
                                                      vehicle_id_stream,
                                                      release_sensor_stream)

    map_stream = None
    if FLAGS.compute_detection_decay:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        map_stream = pylot.operator_creator.add_detection_decay(
            obstacles_stream)

    iou_stream = None
    if FLAGS.compute_segmentation_decay:
        iou_stream = pylot.operator_creator.add_segmentation_decay(
            segmented_stream)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimated
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control_agent == 'carla_auto_pilot':
        stream_to_sync_on = iou_stream
        if map_stream is not None:
            stream_to_sync_on = map_stream
        op_config = erdos.OperatorConfig(name='synchronizer_operator',
                                         flow_watermarks=False)
        (control_stream, ) = erdos.connect(SynchronizerOperator, op_config,
                                           [stream_to_sync_on], FLAGS)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control_agent=carla_auto_pilot"
        )

    erdos.run_async()

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Exemple #15
0
def add_visualizer(pose_stream=None,
                   camera_stream=None,
                   tl_camera_stream=None,
                   prediction_camera_stream=None,
                   depth_stream=None,
                   point_cloud_stream=None,
                   segmentation_stream=None,
                   imu_stream=None,
                   obstacles_stream=None,
                   traffic_lights_stream=None,
                   tracked_obstacles_stream=None,
                   lane_detection_stream=None,
                   prediction_stream=None,
                   waypoints_stream=None,
                   control_stream=None,
                   name='visualizer_operator'):
    from pylot.debug.visualizer_operator import VisualizerOperator
    import pygame
    pygame.init()
    pygame_display = pygame.display.set_mode(
        (FLAGS.camera_image_width, FLAGS.camera_image_height),
        pygame.HWSURFACE | pygame.DOUBLEBUF)
    pygame.display.set_caption("Pylot")
    streams_to_send_top_on = []
    if pose_stream is None:
        pose_stream = erdos.IngestStream()
        streams_to_send_top_on.append(pose_stream)
    if (camera_stream is None
            or not (FLAGS.visualize_rgb_camera
                    or FLAGS.visualize_detected_obstacles
                    or FLAGS.visualize_detected_traffic_lights
                    or FLAGS.visualize_tracked_obstacles
                    or FLAGS.visualize_waypoints)):
        camera_stream = erdos.IngestStream()
        streams_to_send_top_on.append(camera_stream)
    if depth_stream is None or not FLAGS.visualize_depth_camera:
        depth_stream = erdos.IngestStream()
        streams_to_send_top_on.append(depth_stream)
    if point_cloud_stream is None or not FLAGS.visualize_lidar:
        point_cloud_stream = erdos.IngestStream()
        streams_to_send_top_on.append(point_cloud_stream)
    if segmentation_stream is None or not FLAGS.visualize_segmentation:
        segmentation_stream = erdos.IngestStream()
        streams_to_send_top_on.append(segmentation_stream)
    if imu_stream is None or not FLAGS.visualize_imu:
        imu_stream = erdos.IngestStream()
        streams_to_send_top_on.append(imu_stream)
    if obstacles_stream is None or not FLAGS.visualize_detected_obstacles:
        obstacles_stream = erdos.IngestStream()
        streams_to_send_top_on.append(obstacles_stream)
    if tl_camera_stream is None or not FLAGS.visualize_detected_traffic_lights:
        tl_camera_stream = erdos.IngestStream()
        streams_to_send_top_on.append(tl_camera_stream)
    if (traffic_lights_stream is None
            or not (FLAGS.visualize_detected_traffic_lights
                    or FLAGS.visualize_world)):
        traffic_lights_stream = erdos.IngestStream()
        streams_to_send_top_on.append(traffic_lights_stream)
    if (tracked_obstacles_stream is None
            or not FLAGS.visualize_tracked_obstacles):
        tracked_obstacles_stream = erdos.IngestStream()
        streams_to_send_top_on.append(tracked_obstacles_stream)
    if lane_detection_stream is None or not FLAGS.visualize_detected_lanes:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)
    if prediction_camera_stream is None or not FLAGS.visualize_prediction:
        prediction_camera_stream = erdos.IngestStream()
        streams_to_send_top_on.append(prediction_camera_stream)
    if (prediction_stream is None
            or not (FLAGS.visualize_prediction or FLAGS.visualize_world)):
        prediction_stream = erdos.IngestStream()
        streams_to_send_top_on.append(prediction_stream)
    if waypoints_stream is None or not (FLAGS.visualize_waypoints
                                        or FLAGS.visualize_world):
        waypoints_stream = erdos.IngestStream()
        streams_to_send_top_on.append(waypoints_stream)
    if control_stream is None:
        control_stream = erdos.IngestStream()
        streams_to_send_top_on.append(control_stream)

    control_display_stream = erdos.IngestStream()
    op_config = erdos.OperatorConfig(name=name,
                                     log_file_name=FLAGS.log_file_name,
                                     csv_log_file_name=FLAGS.csv_log_file_name,
                                     profile_file_name=FLAGS.profile_file_name)
    erdos.connect(VisualizerOperator, op_config, [
        pose_stream, camera_stream, tl_camera_stream, prediction_camera_stream,
        depth_stream, point_cloud_stream, segmentation_stream, imu_stream,
        obstacles_stream, traffic_lights_stream, tracked_obstacles_stream,
        lane_detection_stream, prediction_stream, waypoints_stream,
        control_stream, control_display_stream
    ], pygame_display, FLAGS)
    return control_display_stream, streams_to_send_top_on
Exemple #16
0
def create_data_flow():
    left_camera_transform = pylot.utils.Transform(LEFT_CAMERA_LOCATION,
                                                  pylot.utils.Rotation())
    right_camera_transform = pylot.utils.Transform(RIGHT_CAMERA_LOCATION,
                                                   pylot.utils.Rotation())
    velodyne_transform = pylot.utils.Transform(VELODYNE_LOCATION,
                                               pylot.utils.Rotation())

    streams_to_send_top_on = []
    time_to_decision_loop_stream = erdos.LoopStream()

    (left_camera_stream, left_camera_setup) = add_grasshopper3_camera(
        left_camera_transform,
        name='left_grasshopper',
        topic_name='/pg_0/image_color')

    (right_camera_stream, right_camera_setup) = add_grasshopper3_camera(
        right_camera_transform,
        name='right_grasshopper',
        topic_name='/pg_1/image_color')

    (point_cloud_stream,
     lidar_setup) = add_velodyne_lidar(velodyne_transform,
                                       topic_name='/points_raw')

    pose_stream = add_localization()

    if FLAGS.obstacle_detection:
        obstacles_streams, _ = pylot.operator_creator.add_obstacle_detection(
            left_camera_stream, time_to_decision_loop_stream)
        obstacles_stream = obstacles_streams[0]
        # Adds an operator that finds the world locations of the obstacles.
        obstacles_stream = pylot.operator_creator.add_obstacle_location_finder(
            obstacles_stream, point_cloud_stream, pose_stream,
            left_camera_stream, left_camera_setup)
    else:
        obstacles_stream = erdos.IngestStream()

    if FLAGS.traffic_light_detection:
        # The right camera is more likely to contain the traffic lights.
        traffic_lights_stream = \
            pylot.operator_creator.add_traffic_light_detector(
                right_camera_stream)
        # Adds operator that finds the world locations of the traffic lights.
        traffic_lights_stream = \
            pylot.operator_creator.add_obstacle_location_finder(
                traffic_lights_stream, point_cloud_stream, pose_stream,
                right_camera_stream, right_camera_setup)
    else:
        traffic_lights_stream = erdos.IngestStream()

    if FLAGS.lane_detection:
        lane_detection_stream = pylot.component_creator.add_lane_detection(
            left_camera_stream)
    else:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)

    if FLAGS.obstacle_tracking:
        obstacles_wo_history_tracking_stream = \
            pylot.operator_creator.add_obstacle_tracking(
                obstacles_stream,
                left_camera_stream,
                time_to_decision_loop_stream)
        obstacles_tracking_stream = \
            pylot.operator_creator.add_obstacle_location_history(
                obstacles_wo_history_tracking_stream, point_cloud_stream,
                pose_stream, left_camera_stream, left_camera_setup)
    else:
        obstacles_tracking_stream = erdos.IngestStream()

    if FLAGS.prediction:
        prediction_stream = pylot.operator_creator.add_linear_prediction(
            obstacles_tracking_stream)
    else:
        prediction_stream = obstacles_stream

    open_drive_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()

    waypoints_stream = pylot.component_creator.add_planning(
        None, pose_stream, prediction_stream, traffic_lights_stream,
        lane_detection_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if FLAGS.control == 'pid':
        control_stream = pylot.operator_creator.add_pid_control(
            pose_stream, waypoints_stream)
    else:
        raise ValueError('Only PID control is currently supported')

    if FLAGS.drive_by_wire:
        add_drive_by_wire_operator(control_stream)

    control_display_stream = None
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, camera_stream=left_camera_stream,
                tl_camera_stream=right_camera_stream,
                point_cloud_stream=point_cloud_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream,
                lane_detection_stream=lane_detection_stream,
                waypoints_stream=waypoints_stream)
        streams_to_send_top_on += ingest_streams

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    return (obstacles_stream, traffic_lights_stream, obstacles_tracking_stream,
            open_drive_stream, global_trajectory_stream,
            control_display_stream, streams_to_send_top_on)
Exemple #17
0
def driver():
    ingest_stream = erdos.IngestStream()
    (square_stream, ) = erdos.connect(SquareOp, [ingest_stream])
    extract_stream = erdos.ExtractStream(square_stream)

    return ingest_stream, extract_stream
Exemple #18
0
def create_data_flow():
    left_camera_transform = pylot.utils.Transform(LEFT_CAMERA_LOCATION,
                                                  pylot.utils.Rotation())
    right_camera_transform = pylot.utils.Transform(RIGHT_CAMERA_LOCATION,
                                                   pylot.utils.Rotation())
    velodyne_transform = pylot.utils.Transform(VELODYNE_LOCATION,
                                               pylot.utils.Rotation())

    (left_camera_stream, left_camera_setup) = add_grasshopper3_camera(
        left_camera_transform,
        name='left_grasshopper',
        topic_name='/pg_0/image_color')

    (right_camera_stream, right_camera_setup) = add_grasshopper3_camera(
        right_camera_transform,
        name='right_grasshopper',
        topic_name='/pg_1/image_color')

    (point_cloud_stream,
     lidar_setup) = add_velodyne_lidar(velodyne_transform,
                                       topic_name='/points_raw')

    can_bus_stream = add_localization()

    if FLAGS.obstacle_detection:
        obstacles_streams = pylot.operator_creator.add_obstacle_detection(
            left_camera_stream)
        obstacles_stream = obstacles_streams[0]
        # Adds an operator that finds the world locations of the obstacles.
        obstacles_stream = pylot.operator_creator.add_obstacle_location_finder(
            obstacles_stream, point_cloud_stream, can_bus_stream,
            left_camera_stream, left_camera_setup)
    else:
        obstacles_stream = erdos.IngestStream()

    if FLAGS.traffic_light_detection:
        # The right camera is more likely to contain the traffic lights.
        traffic_lights_stream = pylot.operator_creator.add_traffic_light_detector(
            right_camera_stream)
        # Adds operator that finds the world locations of the traffic lights.
        traffic_lights_stream = \
            pylot.operator_creator.add_obstacle_location_finder(
                traffic_lights_stream, point_cloud_stream, can_bus_stream,
                right_camera_stream, right_camera_setup)
    else:
        traffic_lights_stream = erdos.IngestStream()

    if FLAGS.lane_detection:
        lane_detection = pylot.operator_creator.add_canny_edge_lane_detection(
            left_camera_stream)

    if FLAGS.obstacle_tracking:
        obstacles_tracking_stream = pylot.operator_creator.add_obstacle_tracking(
            obstacles_stream, left_camera_stream)
    else:
        obstacles_tracking_stream = erdos.IngestStream()

    if FLAGS.prediction:
        prediction_stream = pylot.operator_creator.add_linear_prediction(
            obstacles_tracking_stream)
    else:
        assert FLAGS.planning_type != 'frenet_optimal_trajectory', \
            "FLAGS.prediction must be true if using frenet optimal trajectory"
        assert FLAGS.planning_type != 'rrt_star', \
            "FLAGS.prediction must be true if using rrt star"

    open_drive_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()

    if FLAGS.planning_type == 'waypoint':
        waypoints_stream = pylot.operator_creator.add_waypoint_planning(
            can_bus_stream, open_drive_stream, global_trajectory_stream,
            obstacles_stream, traffic_lights_stream, None)
    elif FLAGS.planning_type == 'frenet_optimal_trajectory':
        waypoints_stream = pylot.operator_creator.add_fot_planning(
            can_bus_stream, prediction_stream, global_trajectory_stream,
            open_drive_stream, None)
    elif FLAGS.planning_type == 'rrt_star':
        waypoints_stream = pylot.operator_creator.add_rrt_star_planning(
            can_bus_stream, prediction_stream, global_trajectory_stream,
            open_drive_stream, None)
    else:
        raise ValueError('Unsupport planning type {}'.format(
            FLAGS.planning_type))

    if FLAGS.control_agent == 'pid':
        control_stream = pylot.operator_creator.add_pid_agent(
            can_bus_stream, waypoints_stream)
    else:
        raise ValueError('Only PID control is currently supported')

    if FLAGS.drive_by_wire:
        add_drive_by_wire_operator(control_stream)

    # Add visualizers.
    if FLAGS.visualize_rgb_camera:
        pylot.operator_creator.add_camera_visualizer(
            left_camera_stream, 'left_grasshopper3_camera')
    if FLAGS.visualize_waypoints:
        pylot.operator_creator.add_waypoint_visualizer(waypoints_stream,
                                                       left_camera_stream,
                                                       can_bus_stream)

    return (obstacles_stream, traffic_lights_stream, obstacles_tracking_stream,
            open_drive_stream, global_trajectory_stream)
Exemple #19
0
def main(argv):
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    # Create carla operator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(control_loop_stream,
                                                release_sensor_stream)

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (depth_camera_stream, _,
     depth_camera_setup) = pylot.operator_creator.add_depth_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (segmented_stream, _,
     _) = pylot.operator_creator.add_segmented_camera(transform,
                                                      vehicle_id_stream,
                                                      release_sensor_stream)

    if FLAGS.log_rgb_camera:
        pylot.operator_creator.add_camera_logging(
            center_camera_stream, 'center_camera_logger_operator',
            'carla-center-')

    if FLAGS.log_segmented_camera:
        pylot.operator_creator.add_camera_logging(
            segmented_stream, 'center_segmented_camera_logger_operator',
            'carla-segmented-')

    if FLAGS.log_depth_camera:
        pylot.operator_creator.add_camera_logging(
            depth_camera_stream, 'depth_camera_logger_operator',
            'carla-depth-')

    imu_stream = None
    if FLAGS.log_imu:
        (imu_stream,
         _) = pylot.operator_creator.add_imu(transform, vehicle_id_stream)
        pylot.operator_creator.add_imu_logging(imu_stream)

    traffic_lights_stream = None
    if FLAGS.log_traffic_lights:
        (traffic_light_camera_stream, _,
         traffic_light_camera_setup) = pylot.operator_creator.add_rgb_camera(
             transform, vehicle_id_stream, release_sensor_stream,
             'traffic_light_camera', 45)
        pylot.operator_creator.add_camera_logging(
            traffic_light_camera_stream,
            'traffic_light_camera_logger_operator', 'carla-traffic-light-')
        (traffic_light_segmented_camera_stream, _, _) = \
            pylot.operator_creator.add_segmented_camera(
                transform,
                vehicle_id_stream,
                release_sensor_stream,
                'traffic_light_segmented_camera',
                45)
        (traffic_light_depth_camera_stream, _, _) = \
            pylot.operator_creator.add_depth_camera(
                transform, vehicle_id_stream, release_sensor_stream,
                'traffic_light_depth_camera', 45)
        traffic_lights_stream = \
            pylot.operator_creator.add_perfect_traffic_light_detector(
                ground_traffic_lights_stream,
                traffic_light_camera_stream,
                traffic_light_depth_camera_stream,
                traffic_light_segmented_camera_stream,
                pose_stream)
        pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream)

    if FLAGS.log_left_right_cameras:
        (left_camera_stream, right_camera_stream, _,
         _) = pylot.operator_creator.add_left_right_cameras(
             transform, vehicle_id_stream, release_sensor_stream)
        pylot.operator_creator.add_camera_logging(
            left_camera_stream, 'left_camera_logger_operator', 'carla-left-')
        pylot.operator_creator.add_camera_logging(
            right_camera_stream, 'right_camera_logger_operator',
            'carla-right-')

    point_cloud_stream = None
    if FLAGS.log_lidar:
        (point_cloud_stream, _,
         _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream,
                                               release_sensor_stream)
        pylot.operator_creator.add_lidar_logging(point_cloud_stream)

    obstacles_stream = None
    if FLAGS.log_obstacles:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        pylot.operator_creator.add_bounding_box_logging(obstacles_stream)

    if FLAGS.log_multiple_object_tracker:
        pylot.operator_creator.add_multiple_object_tracker_logging(
            obstacles_stream)

    obstacles_tracking_stream = None
    if FLAGS.log_trajectories or FLAGS.log_chauffeur:
        obstacles_tracking_stream = \
            pylot.operator_creator.add_perfect_tracking(
                ground_obstacles_stream,
                pose_stream)
        if FLAGS.log_trajectories:
            pylot.operator_creator.add_trajectory_logging(
                obstacles_tracking_stream)

    top_down_segmented_stream = None
    top_down_camera_setup = None
    if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation:
        top_down_transform = pylot.utils.get_top_down_transform(
            transform, FLAGS.top_down_lateral_view)
        (top_down_segmented_stream, _) = \
            pylot.operator_creator.add_segmented_camera(
                top_down_transform,
                vehicle_id_stream,
                name='top_down_segmented_camera',
                fov=90)

        if FLAGS.log_top_down_segmentation:
            pylot.operator_creator.add_camera_logging(
                top_down_segmented_stream,
                'top_down_segmented_logger_operator',
                'carla-top-down-segmented-')

        if FLAGS.log_chauffeur:
            (top_down_camera_stream,
             top_down_camera_setup) = \
                pylot.operator_creator.add_rgb_camera(
                    top_down_transform,
                    vehicle_id_stream,
                    name='top_down_rgb_camera',
                    fov=90)
            pylot.operator_creator.add_chauffeur_logging(
                vehicle_id_stream, pose_stream, obstacles_tracking_stream,
                top_down_camera_stream, top_down_segmented_stream,
                top_down_camera_setup)

    pylot.operator_creator.add_sensor_visualizers(center_camera_stream,
                                                  depth_camera_stream,
                                                  point_cloud_stream,
                                                  segmented_stream, imu_stream,
                                                  pose_stream)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimate
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control_agent == 'carla_auto_pilot':
        # We insert a synchronizing operator that sends back a command when
        # the low watermark progresses on all input stream.
        stream_to_sync_on = center_camera_stream
        if obstacles_tracking_stream is not None:
            stream_to_sync_on = obstacles_tracking_stream
        if traffic_lights_stream is not None:
            stream_to_sync_on = traffic_lights_stream
        if obstacles_stream is not None:
            stream_to_sync_on = obstacles_stream
        control_stream = pylot.operator_creator.add_synchronizer(
            stream_to_sync_on)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control_agent=carla_auto_pilot"
        )

    # Run the data-flow.
    erdos.run_async()

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Exemple #20
0
def create_data_flow():
    """ Create the challenge data-flow graph."""
    streams_to_send_top_on = []
    time_to_decision_loop_stream = erdos.LoopStream()
    camera_setups = create_camera_setups()
    camera_streams = {}
    for name in camera_setups:
        camera_streams[name] = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()
    open_drive_stream = erdos.IngestStream()
    point_cloud_stream = erdos.IngestStream()
    imu_stream = erdos.IngestStream()
    gnss_stream = erdos.IngestStream()
    route_stream = erdos.IngestStream()

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, route_stream)
    else:
        pose_stream = erdos.IngestStream()

    ground_obstacles_stream = erdos.IngestStream()
    ground_traffic_lights_stream = erdos.IngestStream()
    vehicle_id_stream = erdos.IngestStream()
    if not (FLAGS.perfect_obstacle_tracking or FLAGS.carla_localization):
        streams_to_send_top_on.append(vehicle_id_stream)

    if FLAGS.carla_obstacle_detection:
        obstacles_stream = ground_obstacles_stream
    elif any('efficientdet' in model
             for model in FLAGS.obstacle_detection_model_names):
        obstacles_stream = pylot.operator_creator.\
            add_efficientdet_obstacle_detection(
                camera_streams[CENTER_CAMERA_NAME],
                time_to_decision_loop_stream)[0]
        streams_to_send_top_on.append(ground_obstacles_stream)
    else:
        obstacles_stream = pylot.operator_creator.add_obstacle_detection(
            camera_streams[CENTER_CAMERA_NAME],
            time_to_decision_loop_stream)[0]
        streams_to_send_top_on.append(ground_obstacles_stream)

    if FLAGS.carla_traffic_light_detection:
        traffic_lights_stream = ground_traffic_lights_stream
        camera_streams[TL_CAMERA_NAME] = erdos.IngestStream()
        streams_to_send_top_on.append(camera_streams[TL_CAMERA_NAME])
    else:
        traffic_lights_stream = \
            pylot.operator_creator.add_traffic_light_detector(
                camera_streams[TL_CAMERA_NAME])
        # Adds an operator that finds the world location of the traffic lights.
        traffic_lights_stream = \
            pylot.operator_creator.add_obstacle_location_finder(
                traffic_lights_stream, point_cloud_stream, pose_stream,
                camera_setups[TL_CAMERA_NAME])
        streams_to_send_top_on.append(ground_traffic_lights_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        camera_streams[CENTER_CAMERA_NAME],
        camera_setups[CENTER_CAMERA_NAME],
        obstacles_stream,
        depth_stream=point_cloud_stream,
        vehicle_id_stream=vehicle_id_stream,
        pose_stream=pose_stream,
        ground_obstacles_stream=ground_obstacles_stream,
        time_to_decision_stream=time_to_decision_loop_stream)

    if FLAGS.execution_mode == 'challenge-sensors':
        lanes_stream = pylot.operator_creator.add_lanenet_detection(
            camera_streams[LANE_CAMERA_NAME])
    else:
        lanes_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lanes_stream)

    prediction_stream = pylot.operator_creator.add_linear_prediction(
        obstacles_tracking_stream)

    waypoints_stream = pylot.component_creator.add_planning(
        None, pose_stream, prediction_stream, traffic_lights_stream,
        lanes_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream=pose_stream,
                camera_stream=camera_streams[CENTER_CAMERA_NAME],
                tl_camera_stream=camera_streams[TL_CAMERA_NAME],
                point_cloud_stream=point_cloud_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream,
                waypoints_stream=waypoints_stream,
                lane_detection_stream=lanes_stream,
                prediction_stream=prediction_stream)
        streams_to_send_top_on += ingest_streams
    else:
        control_display_stream = None

    control_stream = pylot.operator_creator.add_pid_control(
        pose_stream, waypoints_stream)
    extract_control_stream = erdos.ExtractStream(control_stream)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    return (camera_streams, pose_stream, route_stream,
            global_trajectory_stream, open_drive_stream, point_cloud_stream,
            imu_stream, gnss_stream, extract_control_stream,
            control_display_stream, ground_obstacles_stream,
            ground_traffic_lights_stream, vehicle_id_stream,
            streams_to_send_top_on)
Exemple #21
0
def driver():
    csv_logger = erdos.utils.setup_csv_logging("head-csv", FLAGS.csv_log_file_name)
    csv_logger.info('TIME,SIMTIME,CAMERA,GROUNDID,LABEL,X,Y,Z,ERROR')

    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.simulator_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    # Add sensors.
    front_transform = pylot.utils.Transform(FRONT_CAMERA_LOCATION, pylot.utils.Rotation(pitch=-5))
    back_transform = pylot.utils.Transform(BACK_CAMERA_LOCATION, pylot.utils.Rotation(pitch=0, yaw=180)) # CORRECT ROTATION?

    #camera_names = ["FRONT", "BACK"]
    #transforms = [front_transform, back_transform]
    camera_names = ["FRONT"]
    transforms = [front_transform]
    rgb_camera_streams, rgb_camera_setups, depth_camera_streams, segmented_camera_streams, point_cloud_streams, lidar_setups, depth_streams = \
            add_cameras(vehicle_id_stream, release_sensor_stream, notify_streams, transforms)

    # add a birds-eye camera

    top_down_transform = pylot.utils.get_top_down_transform(
                pylot.utils.Transform(pylot.utils.Location(),
                                      pylot.utils.Rotation()),
                                      FLAGS.top_down_camera_altitude)
    (bird_camera_stream, notify_bird_stream, bird_setup) = \
        pylot.operator_creator.add_bird_camera(top_down_transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_bird_stream)

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

# ----------------------------------------------------------------------------------

    #IGNORE
    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    # for each camera
    obstacles_streams = []
    perfect_obstacles_streams = []
    obstacles_error_streams = []
    for i in range(len(transforms)):
        transform = transforms[i]
        rgb_camera_stream = rgb_camera_streams[i]
        rgb_camera_setup = rgb_camera_setups[i]
        depth_camera_stream = depth_camera_streams[i]
        ground_segmented_stream = segmented_camera_streams[i]
        point_cloud_stream = point_cloud_streams[i]
        lidar_setup = lidar_setups[i]
        depth_stream = depth_streams[i]
        eval_name = camera_names[i] + "-eval"

        obstacles_stream, perfect_obstacles_stream, obstacles_error_stream = \
            pylot.component_creator.add_obstacle_detection(
                rgb_camera_stream, rgb_camera_setup, pose_stream,
                depth_stream, depth_camera_stream, ground_segmented_stream,
                ground_obstacles_stream, ground_speed_limit_signs_stream,
                ground_stop_signs_stream, time_to_decision_loop_stream, eval_name)

        print("CREATED COMPONENTS: ", transform.location)

        obstacles_streams.append(obstacles_stream)
        perfect_obstacles_streams.append(perfect_obstacles_stream)
        obstacles_error_streams.append(obstacles_error_stream)

    # ------------------------------------------------------------------------
    # ------------------------------------------------------------------------
    # ------------------------------------------------------------------------
    # ------------------------------------------------------------------------
    # add all of the other operators (mostly irrelevant)
    # just use the front camera for all other components
    center_camera_stream = rgb_camera_streams[0]
    center_camera_setup = rgb_camera_setups[0]
    obstacles_stream = obstacles_streams[0]
    ground_segmented_stream = segmented_camera_streams[0]
    point_cloud_stream = point_cloud_streams[0]
    depth_stream = depth_streams[0]

    tl_transform = pylot.utils.Transform(FRONT_CAMERA_LOCATION,
                                        pylot.utils.Rotation())
    # = ground, = None
    traffic_lights_stream, tl_camera_stream = \
        pylot.component_creator.add_traffic_light_detection(
            tl_transform, vehicle_id_stream, release_sensor_stream,
            pose_stream, depth_stream, ground_traffic_lights_stream)

    # = None
    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream, open_drive_stream)
    if lane_detection_stream is None:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)

    # = None
    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, center_camera_setup, obstacles_stream,
        depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    # = None
    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    # = None
    depth_stream = pylot.component_creator.add_depth(front_transform,
                                                     vehicle_id_stream,
                                                     center_camera_setup,
                                                     depth_camera_stream)

    #IGNORE
    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)
    # = None, = None, = None
    prediction_stream, prediction_camera_stream, notify_prediction_stream = \
        pylot.component_creator.add_prediction(
            obstacles_tracking_stream, vehicle_id_stream, front_transform,
            release_sensor_stream, pose_stream, point_cloud_stream,
            lidar_setup)
    if prediction_stream is None:
        prediction_stream = obstacles_stream
    if notify_prediction_stream:
        notify_streams.append(notify_prediction_stream)

    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))

    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, traffic_lights_stream,
        lane_detection_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if FLAGS.simulator_mode == "pseudo-asynchronous":
        # Add a synchronizer in the pseudo-asynchronous mode.
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
            _pipeline_finish_notify_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
        pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream


    # synchronizing on front perfect obstacle stream -- is that correct?
    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control,
        vehicle_id_stream, perfect_obstacles_streams[0])
    control_loop_stream.set(control_stream)

    add_evaluation_operators(vehicle_id_stream, pose_stream, imu_stream,
                             pose_stream_for_control,
                             waypoints_stream_for_control)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_streams[0])
    time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, rgb_camera_streams, bird_camera_stream, tl_camera_stream,
                prediction_camera_stream, depth_camera_streams,
                point_cloud_streams, segmented_stream, imu_stream,
                obstacles_streams, obstacles_error_streams, perfect_obstacles_streams,
                traffic_lights_stream, obstacles_tracking_stream, lane_detection_stream,
                prediction_stream, waypoints_stream, control_stream)
        streams_to_send_top_on += ingest_streams

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.simulator_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream