Beispiel #1
0
 def on_pose_update(self, data):
     self._counter += 1
     if self._counter % self._modulo_to_send != 0:
         return
     loc = Location(data.pose.position.x, data.pose.position.y,
                    data.pose.position.z)
     quaternion = [
         data.pose.orientation.x, data.pose.orientation.y,
         data.pose.orientation.z, data.pose.orientation.w
     ]
     roll, pitch, yaw = euler_from_quaternion(quaternion)
     rotation = Rotation(np.degrees(pitch), np.degrees(yaw),
                         np.degrees(roll))
     timestamp = erdos.Timestamp(coordinates=[self._msg_cnt])
     pose = Pose(Transform(loc, rotation), self._forward_speed)
     self._logger.debug('NDT localization {}'.format(pose))
     self._pose_stream.send(erdos.Message(timestamp, pose))
     self._pose_stream.send(erdos.WatermarkMessage(timestamp))
     self._msg_cnt += 1
Beispiel #2
0
 def on_point_cloud(self, data):
     self._counter += 1
     if self._counter % self._modulo_to_send != 0:
         return
     timestamp = erdos.Timestamp(coordinates=[self._msg_cnt])
     points = []
     for data in pc2.read_points(data,
                                 field_names=('x', 'y', 'z'),
                                 skip_nans=True):
         points.append([data[0], data[1], data[2]])
     points = np.array(points)
     point_cloud = pylot.perception.point_cloud.PointCloud(
         points, self._lidar_setup)
     msg = PointCloudMessage(timestamp, point_cloud)
     self._point_cloud_stream.send(msg)
     watermark_msg = erdos.WatermarkMessage(timestamp)
     self._point_cloud_stream.send(watermark_msg)
     self._logger.debug('@{}: sent message'.format(timestamp))
     self._msg_cnt += 1
Beispiel #3
0
def main():
    ingest_stream = erdos.streams.IngestStream()
    square_stream = ingest_stream.map(lambda x: x * x)

    extract_stream = erdos.streams.ExtractStream(square_stream)

    erdos.run_async()

    count = 0
    while True:
        timestamp = erdos.Timestamp(coordinates=[count])
        send_msg = erdos.Message(timestamp, count)
        print("IngestStream: sending {send_msg}".format(send_msg=send_msg))
        ingest_stream.send(send_msg)
        recv_msg = extract_stream.read()
        print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))

        count += 1
        time.sleep(1)
Beispiel #4
0
def main():
    ingest_stream = erdos.IngestStream()
    (s, ) = erdos.connect(NoopOp, erdos.OperatorConfig(), [ingest_stream])
    extract_stream = erdos.ExtractStream(s)

    handle = erdos.run_async()

    timestamp = erdos.Timestamp(is_top=True)
    send_msg = erdos.WatermarkMessage(timestamp)
    print("IngestStream: sending {send_msg}".format(send_msg=send_msg))
    ingest_stream.send(send_msg)
    assert ingest_stream.is_closed()

    recv_msg = extract_stream.read()
    print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))
    assert recv_msg.is_top
    assert extract_stream.is_closed()

    handle.shutdown()
Beispiel #5
0
def create_data_flow():
    """ Create the challenge data-flow graph."""
    track = get_track()
    camera_setups = create_camera_setups(track)
    camera_streams = {}
    for name in camera_setups:
        camera_streams[name] = erdos.IngestStream()
    can_bus_stream = erdos.IngestStream()
    global_trajectory_stream = erdos.IngestStream()
    open_drive_stream = erdos.IngestStream()
    if track != Track.ALL_SENSORS_HDMAP_WAYPOINTS:
        # We do not have access to the open drive map. Send top watermark.
        open_drive_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(coordinates=[sys.maxsize])))

    if (track == Track.ALL_SENSORS
            or track == Track.ALL_SENSORS_HDMAP_WAYPOINTS):
        point_cloud_stream = erdos.IngestStream()
    else:
        point_cloud_stream = pylot.operator_creator.add_depth_estimation(
            camera_streams[LEFT_CAMERA_NAME],
            camera_streams[RIGHT_CAMERA_NAME],
            camera_setups[CENTER_CAMERA_NAME])

    obstacles_stream = pylot.operator_creator.add_obstacle_detection(
        camera_streams[CENTER_CAMERA_NAME])[0]
    traffic_lights_stream = pylot.operator_creator.add_traffic_light_detector(
        camera_streams[TL_CAMERA_NAME])

    waypoints_stream = pylot.operator_creator.add_waypoint_planning(
        can_bus_stream, open_drive_stream, global_trajectory_stream, None)

    if FLAGS.visualize_rgb_camera:
        pylot.operator_creator.add_camera_visualizer(
            camera_streams[CENTER_CAMERA_NAME], CENTER_CAMERA_NAME)

    control_stream = pylot.operator_creator.add_pylot_agent(
        can_bus_stream, waypoints_stream, traffic_lights_stream,
        obstacles_stream, point_cloud_stream, open_drive_stream,
        camera_setups[CENTER_CAMERA_NAME])
    extract_control_stream = erdos.ExtractStream(control_stream)
    return (camera_streams, can_bus_stream, global_trajectory_stream,
            open_drive_stream, point_cloud_stream, extract_control_stream)
Beispiel #6
0
    def setup(self, path_to_conf_file):
        super(ERDOSAgent, self).setup(path_to_conf_file)
        self._camera_setups = create_camera_setups()
        self._lidar_setup = create_lidar_setup()

        self._sent_open_drive = False

        # Create the dataflow of AV components. Change the method
        # to add your operators.
        (self._camera_streams, self._pose_stream, self._route_stream,
         self._global_trajectory_stream, self._open_drive_stream,
         self._point_cloud_stream, self._imu_stream, self._gnss_stream,
         self._control_stream, self._control_display_stream,
         self._perfect_obstacles_stream, self._perfect_traffic_lights_stream,
         self._vehicle_id_stream, streams_to_send_top_on) = create_data_flow()
        # Execute the dataflow.
        self._node_handle = erdos.run_async()
        # Close the streams that are not used (i.e., send top watermark).
        for stream in streams_to_send_top_on:
            stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Beispiel #7
0
def main():
    ingest_stream = erdos.IngestStream()
    (square_stream, ) = erdos.connect(Map,
                                      erdos.OperatorConfig(), [ingest_stream],
                                      function=square_msg)
    extract_stream = erdos.ExtractStream(square_stream)

    erdos.run_async()

    count = 0
    while True:
        timestamp = erdos.Timestamp(coordinates=[count])
        send_msg = erdos.Message(timestamp, count)
        print("IngestStream: sending {send_msg}".format(send_msg=send_msg))
        ingest_stream.send(send_msg)
        recv_msg = extract_stream.read()
        print("ExtractStream: received {recv_msg}".format(recv_msg=recv_msg))

        count += 1
        time.sleep(1)
Beispiel #8
0
    def process_imu(self, imu_msg):
        """ Invoked when an IMU message is received from the simulator.
        Sends IMU measurements to downstream operators.

        Args:
            imu_msg: carla.IMUMeasurement
        """
        with self._lock:
            game_time = int(imu_msg.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = IMUMessage(Transform.from_carla_transform(imu_msg.transform),
                             Vector3D.from_carla_vector(imu_msg.accelerometer),
                             Vector3D.from_carla_vector(imu_msg.gyroscope),
                             imu_msg.compass, timestamp)
            self._imu_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._imu_stream.send(watermark_msg)
    def process_gnss(self, gnss_msg):
        """Invoked when a GNSS message is received from the simulator.

        Sends GNSS measurements to downstream operators.

        Args:
            gnss_msg (carla.GnssMeasurement): GNSS reading.
        """
        game_time = int(gnss_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_gnss',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = GNSSMessage(
                    timestamp,
                    Transform.from_carla_transform(gnss_msg.transform),
                    gnss_msg.altitude, gnss_msg.latitude, gnss_msg.longitude)
                self._gnss_stream.send(msg)
                self._gnss_stream.send(watermark_msg)
    def process_lane_invasion(self, lane_invasion_event):
        """Invoked when a lane invasion event is received from the simulation.

        Args:
            lane_invasion_event (:py:class:`carla.LaneInvasionEvent`): A lane-
                invasion event that contains the lane marking which was
                invaded by the ego-vehicle.
        """
        game_time = int(lane_invasion_event.timestamp * 1000)
        self._logger.debug(
            "@[{}]: Received a lane-invasion event from the simulator".format(
                game_time))

        # Create the lane markings that were invaded.
        lane_markings = []
        for lane_marking in lane_invasion_event.crossed_lane_markings:
            lane_marking = LaneMarking.from_simulator_lane_marking(
                lane_marking)
            lane_markings.append(lane_marking)

        # Find the type of the lane that was invaded.
        closest_wp = self._map.get_waypoint(
            self._vehicle.get_transform().location,
            project_to_road=False,
            lane_type=carla.LaneType.Any)
        lane_type = LaneType.NONE
        if closest_wp:
            lane_type = LaneType(closest_wp.lane_type)

        # Create a LaneInvasionMessage.
        timestamp = erdos.Timestamp(coordinates=[game_time])
        msg = LaneInvasionMessage(lane_markings, lane_type, timestamp)

        # Send the LaneInvasionMessage
        self._lane_invasion_stream.send(msg)
        # TODO(ionel): This code will fail if process_lane_invasion is
        # called twice for the same timestamp.
        self._lane_invasion_stream.send(erdos.WatermarkMessage(timestamp))
Beispiel #11
0
    def send_actor_data(self, msg):
        """ Callback function that gets called when the world is ticked.
        This function sends a WatermarkMessage to the downstream operators as
        a signal that they need to release data to the rest of the pipeline.

        Args:
            msg: Data recieved from the simulation at a tick.
        """
        # Ensure that the callback executes serially.
        with self._lock:
            game_time = int(msg.elapsed_seconds * 1000)
            self._logger.info(
                'The world is at the timestamp {}'.format(game_time))
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)
            with erdos.profile(self.config.name + '.send_actor_data',
                               self,
                               event_data={'timestamp': str(timestamp)}):
                if (self._flags.carla_localization_frequency == -1
                        or self._next_localization_sensor_reading is None or
                        game_time == self._next_localization_sensor_reading):
                    if self._flags.carla_mode == 'pseudo-asynchronous':
                        self._update_next_localization_pseudo_async_ticks(
                            game_time)
                    self.__send_hero_vehicle_data(self.pose_stream, timestamp,
                                                  watermark_msg)
                    self.__send_ground_actors_data(timestamp, watermark_msg)
                    self.__update_spectactor_pose()

                if self._flags.carla_mode == "pseudo-asynchronous" and (
                        self._flags.carla_control_frequency == -1
                        or self._next_control_sensor_reading is None
                        or game_time == self._next_control_sensor_reading):
                    self._update_next_control_pseudo_asynchronous_ticks(
                        game_time)
                    self.__send_hero_vehicle_data(self.pose_stream_for_control,
                                                  timestamp, watermark_msg)
                    self.__update_spectactor_pose()
Beispiel #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--payload', default=8, type=int)
    parser.add_argument('-s', '--scenario', default="dataflow")
    parser.add_argument('-n', '--name', default="test")
    parser.add_argument('-i', '--interveal', default=1, type=int)
    parser.add_argument('-t', '--transport', default="tcp")
    parser.add_argument('-g', '--graph-file')

    args = parser.parse_args()
    """Creates and runs the dataflow graph."""
    ingest_stream = erdos.IngestStream()
    (pong_stream, ) = erdos.connect(PongOp, erdos.OperatorConfig(),
                                    [ingest_stream])
    extract_stream = erdos.ExtractStream(pong_stream)

    if args.graph_file is not None:
        erdos.run_async(args.graph_file)
    else:
        erdos.run_async()

    count = 0
    payload = '0' * args.payload

    while True:
        msg = erdos.Message(erdos.Timestamp(coordinates=[count]), payload)
        t0 = datetime.datetime.now()
        ingest_stream.send(msg)
        #print("PingOp: sending {msg}".format(msg=msg))
        count += 1
        data = extract_stream.read()
        t1 = datetime.datetime.now()
        #print("PingOp: received {msg}".format(msg=data))
        t = t1 - t0
        print(
            f"erdos-{args.transport},{args.scenario},latency,{args.name},{args.payload},{data.timestamp.coordinates[0]},{t.microseconds}"
        )
        time.sleep(args.interveal)
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = FrameMessage(
                    timestamp,
                    CameraFrame.from_carla_frame(carla_image,
                                                 self._camera_setup))
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = DepthFrameMessage(
                    timestamp,
                    DepthFrame.from_carla_frame(carla_image,
                                                self._camera_setup))
            elif self._camera_setup.camera_type == \
                 'sensor.camera.semantic_segmentation':
                msg = SegmentedFrameMessage(
                    timestamp,
                    SegmentedFrame.from_carla_image(carla_image,
                                                    self._camera_setup))
            # Send the message containing the frame.
            self._camera_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._camera_stream.send(watermark_msg)
Beispiel #14
0
    def process_imu(self, imu_msg):
        """Invoked when an IMU measurement is received from the simulator.

        Sends IMU measurements to downstream operators.
        """
        game_time = int(imu_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_imu',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = IMUMessage(
                    timestamp,
                    Transform.from_simulator_transform(imu_msg.transform),
                    Vector3D.from_simulator_vector(imu_msg.accelerometer),
                    Vector3D.from_simulator_vector(imu_msg.gyroscope),
                    imu_msg.compass)
                self._imu_stream.send(msg)
                # Note: The operator is set not to automatically propagate
                # watermarks received on input streams. Thus, we can issue
                # watermarks only after the simulator callback is invoked.
                self._imu_stream.send(watermark_msg)
Beispiel #15
0
    def process_collision(self, collision_event):
        """ Invoked when a collision event is received from the simulation.

        The collision event contains the impulse, location and the object with
        which the ego-vehicle collided.
        """
        game_time = int(collision_event.timestamp * 1000)
        self._logger.debug(
            "@[{}]: Received a collision event from the simulator.".format(
                game_time))

        # Create a CollisionMessage.
        timestamp = erdos.Timestamp(coordinates=[game_time])
        msg = CollisionMessage(
            collision_event.other_actor,
            Vector3D.from_simulator_vector(collision_event.normal_impulse),
            timestamp)

        # Send the CollisionMessage.
        self._collision_stream.send(msg)
        # TODO(ionel): This code will fail if process_collision is called twice
        # for the same timestamp (i.e., if the vehicle collides with two other
        # actors)
        self._collision_stream.send(erdos.WatermarkMessage(timestamp))
Beispiel #16
0
    def process_point_clouds(self, carla_pc):
        """ Invoked when a pointcloud is received from the simulator.

        Args:
            carla_pc: a carla.SensorData object.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_pc.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            # Include the transform relative to the vehicle.
            # Carla carla_pc.transform returns the world transform, but
            # we do not use it directly.
            msg = PointCloudMessage(
                PointCloud.from_carla_point_cloud(
                    carla_pc, self._lidar_setup.get_transform()), timestamp)

            self._lidar_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._lidar_stream.send(watermark_msg)
Beispiel #17
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.carla_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
    notify_streams = []

    # Create carla operator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(control_loop_stream,
                                                release_sensor_stream)

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place Lidar sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream,
         _) = pylot.operator_creator.add_imu(transform, vehicle_id_stream)

    obstacles_stream = \
        pylot.component_creator.add_obstacle_detection(
            center_camera_stream, rgb_camera_setup, pose_stream,
            depth_stream, depth_camera_stream, ground_segmented_stream,
            ground_obstacles_stream, ground_speed_limit_signs_stream,
            ground_stop_signs_stream, time_to_decision_loop_stream)
    traffic_lights_stream = \
        pylot.component_creator.add_traffic_light_detection(
            transform, vehicle_id_stream, release_sensor_stream, pose_stream,
            depth_stream, ground_traffic_lights_stream)

    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, rgb_camera_setup, obstacles_stream, depth_stream,
        vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    depth_stream = pylot.component_creator.add_depth(transform,
                                                     vehicle_id_stream,
                                                     rgb_camera_setup,
                                                     depth_camera_stream)

    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)

    prediction_stream = pylot.component_creator.add_prediction(
        obstacles_tracking_stream, vehicle_id_stream, transform,
        release_sensor_stream, pose_stream, point_cloud_stream, lidar_setup)

    # Add planning operators.
    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))
    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, center_camera_stream,
        obstacles_stream, traffic_lights_stream, open_drive_stream,
        global_trajectory_stream, time_to_decision_loop_stream)

    # Add a synchronizer in the pseudo-asynchronous mode.
    if FLAGS.carla_mode == "pseudo-asynchronous":
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream

    # Add the behaviour planning and control operator.
    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control)
    control_loop_stream.set(control_stream)

    if FLAGS.evaluation:
        # Add the collision sensor.
        collision_stream = pylot.operator_creator.add_collision_sensor(
            vehicle_id_stream)

        # Add the lane invasion sensor.
        lane_invasion_stream = pylot.operator_creator.add_lane_invasion_sensor(
            vehicle_id_stream)

        # Add the traffic light invasion sensor.
        traffic_light_invasion_stream = \
            pylot.operator_creator.add_traffic_light_invasion_sensor(
                vehicle_id_stream, pose_stream)

        # Add the evaluation logger.
        pylot.operator_creator.add_eval_metric_logging(
            collision_stream, lane_invasion_stream,
            traffic_light_invasion_stream, imu_stream, pose_stream,
            obstacles_stream)

        # Add control evaluation logging operator.
        pylot.operator_creator.add_control_evaluation(
            pose_stream_for_control, waypoints_stream_for_control)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    pylot.operator_creator.add_sensor_visualizers(center_camera_stream,
                                                  depth_camera_stream,
                                                  point_cloud_stream,
                                                  ground_segmented_stream,
                                                  imu_stream, pose_stream)
    erdos.run_async()

    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.carla_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Beispiel #18
0
def main(argv):
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    pipeline_finish_notify_stream = erdos.IngestStream()
    # Create an operator that connects to the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream, release_sensor_stream,
        pipeline_finish_notify_stream)

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (depth_camera_stream, _,
     depth_camera_setup) = pylot.operator_creator.add_depth_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (segmented_stream, _,
     _) = pylot.operator_creator.add_segmented_camera(transform,
                                                      vehicle_id_stream,
                                                      release_sensor_stream)

    if FLAGS.log_rgb_camera:
        pylot.operator_creator.add_camera_logging(
            center_camera_stream, 'center_camera_logger_operator', 'center-')

    if FLAGS.log_segmented_camera:
        pylot.operator_creator.add_camera_logging(
            segmented_stream, 'center_segmented_camera_logger_operator',
            'segmented-')

    if FLAGS.log_depth_camera:
        pylot.operator_creator.add_camera_logging(
            depth_camera_stream, 'depth_camera_logger_operator', 'depth-')

    imu_stream = None
    if FLAGS.log_imu:
        (imu_stream,
         _) = pylot.operator_creator.add_imu(transform, vehicle_id_stream)
        pylot.operator_creator.add_imu_logging(imu_stream)

    traffic_lights_stream = None
    traffic_light_camera_stream = None
    if FLAGS.log_traffic_lights:
        (traffic_light_camera_stream, _,
         traffic_light_camera_setup) = pylot.operator_creator.add_rgb_camera(
             transform, vehicle_id_stream, release_sensor_stream,
             'traffic_light_camera', 45)
        pylot.operator_creator.add_camera_logging(
            traffic_light_camera_stream,
            'traffic_light_camera_logger_operator', 'traffic-light-')
        (traffic_light_segmented_camera_stream, _, _) = \
            pylot.operator_creator.add_segmented_camera(
                transform,
                vehicle_id_stream,
                release_sensor_stream,
                'traffic_light_segmented_camera',
                45)
        (traffic_light_depth_camera_stream, _, _) = \
            pylot.operator_creator.add_depth_camera(
                transform, vehicle_id_stream, release_sensor_stream,
                'traffic_light_depth_camera', 45)
        traffic_lights_stream = \
            pylot.operator_creator.add_perfect_traffic_light_detector(
                ground_traffic_lights_stream,
                traffic_light_camera_stream,
                traffic_light_depth_camera_stream,
                traffic_light_segmented_camera_stream,
                pose_stream)
        pylot.operator_creator.add_bounding_box_logging(traffic_lights_stream)

    if FLAGS.log_left_right_cameras:
        (left_camera_stream, right_camera_stream, _,
         _) = pylot.operator_creator.add_left_right_cameras(
             transform, vehicle_id_stream, release_sensor_stream)
        pylot.operator_creator.add_camera_logging(
            left_camera_stream, 'left_camera_logger_operator', 'left-')
        pylot.operator_creator.add_camera_logging(
            right_camera_stream, 'right_camera_logger_operator', 'right-')

    point_cloud_stream = None
    if FLAGS.log_lidar:
        (point_cloud_stream, _,
         _) = pylot.operator_creator.add_lidar(transform, vehicle_id_stream,
                                               release_sensor_stream)
        pylot.operator_creator.add_lidar_logging(point_cloud_stream)

    obstacles_stream = None
    if FLAGS.log_obstacles:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        pylot.operator_creator.add_bounding_box_logging(obstacles_stream)

    if FLAGS.log_multiple_object_tracker:
        pylot.operator_creator.add_multiple_object_tracker_logging(
            obstacles_stream)

    obstacles_tracking_stream = None
    if FLAGS.log_trajectories or FLAGS.log_chauffeur:
        obstacles_tracking_stream = \
            pylot.operator_creator.add_perfect_tracking(
                vehicle_id_stream,
                ground_obstacles_stream,
                pose_stream)
        if FLAGS.log_trajectories:
            pylot.operator_creator.add_trajectory_logging(
                obstacles_tracking_stream)

    top_down_segmented_stream = None
    top_down_camera_setup = None
    if FLAGS.log_chauffeur or FLAGS.log_top_down_segmentation:
        top_down_transform = pylot.utils.get_top_down_transform(
            transform, FLAGS.top_down_camera_altitude)
        (top_down_segmented_stream, _, _) = \
            pylot.operator_creator.add_segmented_camera(
                top_down_transform,
                vehicle_id_stream,
                release_sensor_stream,
                name='top_down_segmented_camera',
                fov=90)

        if FLAGS.log_top_down_segmentation:
            pylot.operator_creator.add_camera_logging(
                top_down_segmented_stream,
                'top_down_segmented_logger_operator', 'top-down-segmented-')

        if FLAGS.log_chauffeur:
            (top_down_camera_stream,
             top_down_camera_setup) = \
                pylot.operator_creator.add_rgb_camera(
                    top_down_transform,
                    vehicle_id_stream,
                    name='top_down_rgb_camera',
                    fov=90)
            pylot.operator_creator.add_chauffeur_logging(
                vehicle_id_stream, pose_stream, obstacles_tracking_stream,
                top_down_camera_stream, top_down_segmented_stream,
                top_down_camera_setup)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimate
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control == 'simulator_auto_pilot':
        # We insert a synchronizing operator that sends back a command when
        # the low watermark progresses on all input stream.
        stream_to_sync_on = center_camera_stream
        if obstacles_tracking_stream is not None:
            stream_to_sync_on = obstacles_tracking_stream
        if traffic_lights_stream is not None:
            stream_to_sync_on = traffic_lights_stream
        if obstacles_stream is not None:
            stream_to_sync_on = obstacles_stream
        control_stream = pylot.operator_creator.add_synchronizer(
            vehicle_id_stream, stream_to_sync_on)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control=simulator_auto_pilot")

    control_display_stream = None
    streams_to_send_top_on = []
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream=pose_stream,
                camera_stream=center_camera_stream,
                tl_camera_stream=traffic_light_camera_stream,
                depth_stream=depth_camera_stream,
                point_cloud_stream=point_cloud_stream,
                segmentation_stream=segmented_stream,
                imu_stream=imu_stream,
                obstacles_stream=obstacles_stream,
                traffic_lights_stream=traffic_lights_stream,
                tracked_obstacles_stream=obstacles_tracking_stream)
        streams_to_send_top_on += ingest_streams

    # Connect an instance to the simulator to make sure that we can turn the
    # synchronous mode off after the script finishes running.
    client, world = pylot.simulation.utils.get_world(FLAGS.simulator_host,
                                                     FLAGS.simulator_port,
                                                     FLAGS.simulator_timeout)

    # Run the data-flow.
    node_handle = erdos.run_async()

    signal.signal(signal.SIGINT, shutdown)

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    try:
        if pylot.flags.must_visualize():
            pylot.utils.run_visualizer_control_loop(control_display_stream)
        node_handle.wait()
    except KeyboardInterrupt:
        node_handle.shutdown()
        pylot.simulation.utils.set_asynchronous_mode(world)
        if pylot.flags.must_visualize():
            import pygame
            pygame.quit()
Beispiel #19
0
    def run_step(self, input_data, timestamp):
        start_time = time.time()
        game_time = int(timestamp * 1000)
        erdos_timestamp = erdos.Timestamp(coordinates=[game_time])

        # Parse the sensor data the agent receives from the leaderboard.
        speed_data = None
        imu_data = None
        gnss_data = None
        for key, val in input_data.items():
            # val is a tuple of (timestamp, data).
            if key in self._camera_streams:
                # The data is for one of the cameras. Transform it to a Pylot
                # CameraFrame, and send it on the camera's corresponding
                # stream.
                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        erdos_timestamp,
                        CameraFrame(val[1][:, :, :3], 'BGR',
                                    self._camera_setups[key])))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'imu':
                imu_data = val[1]
            elif key == 'speed':
                speed_data = val[1]
            elif key == 'gnss':
                gnss_data = val[1]
            elif key == 'opendrive':
                if not self._open_drive_stream.is_closed():
                    # The data is only sent once because it does not change
                    # throught the duration of a scenario.
                    self._open_drive_stream.send(
                        erdos.Message(erdos_timestamp, val[1]['opendrive']))
                    # Inform the operators that read the open drive stream that
                    # they will not receive any other messages on this stream.
                    self._open_drive_stream.send(
                        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
            elif key == 'LIDAR':
                # Send the LiDAR point cloud.
                self.send_lidar_msg(self._point_cloud_stream, val[1],
                                    erdos_timestamp, self._lidar_setup)
            else:
                self.logger.warning("Sensor {} not used".format(key))

        # Send the route the vehicle must follow.
        self.send_global_trajectory_msg(self._global_trajectory_stream,
                                        erdos_timestamp)

        # The following two methods are only relevant when the agent
        # is using perfect perception.
        self.send_vehicle_id_msg(self._vehicle_id_stream)
        self.send_perfect_detections(self._perfect_obstacles_stream,
                                     self._perfect_traffic_lights_stream,
                                     erdos_timestamp, CENTER_CAMERA_LOCATION)

        # Send localization data.
        self.send_localization(erdos_timestamp, imu_data, gnss_data,
                               speed_data)

        # Ensure that the open drive stream is closed when the agent
        # is not running on the MAP track.
        if not self._open_drive_stream.is_closed() and self.track != Track.MAP:
            # We do not have access to the open drive map. Send top watermark.
            self._open_drive_stream.send(
                erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

        sensor_send_runtime = (time.time() - start_time) * 1000
        self.csv_logger.info('{},{},sensor_send_runtime,{:.4f}'.format(
            pylot.utils.time_epoch_ms(), game_time, sensor_send_runtime))

        process_visualization_events(self._control_display_stream)

        # Return the control command received on the control stream.
        command = read_control_command(self._control_stream)
        e2e_runtime = (time.time() - start_time) * 1000
        self.csv_logger.info('{},{},e2e_runtime,{:.4f}'.format(
            pylot.utils.time_epoch_ms(), game_time, e2e_runtime))
        if FLAGS.simulator_mode == 'synchronous':
            return command
        elif FLAGS.simulator_mode == 'pseudo-asynchronous':
            return command, int(e2e_runtime - sensor_send_runtime)
        else:
            raise ValueError('Unexpected simulator_mode {}'.format(
                FLAGS.simulator_mode))
Beispiel #20
0
def main(argv):
    (obstacles_stream, traffic_lights_stream, obstacles_tracking_stream,
     open_drive_stream, global_trajectory_stream, control_display_stream,
     streams_to_send_top_on) = create_data_flow()
    # Run the data-flow.
    node_handle = erdos.run_async()
    signal.signal(signal.SIGINT, shutdown)

    # Send waypoints.
    waypoints = Waypoints.read_from_csv_file(FLAGS.waypoints_csv_file,
                                             FLAGS.target_speed)
    global_trajectory_stream.send(
        WaypointsMessage(
            erdos.Timestamp(coordinates=[0]), waypoints,
            pylot.planning.utils.BehaviorPlannerState.FOLLOW_WAYPOINTS))

    # Send top watermark on all streams that require it.
    top_msg = erdos.WatermarkMessage(erdos.Timestamp(is_top=True))
    open_drive_stream.send(top_msg)
    global_trajectory_stream.send(top_msg)
    for stream in streams_to_send_top_on:
        stream.send(top_msg)

    time_to_sleep = 1.0 / FLAGS.sensor_frequency
    count = 0
    try:
        while True:
            timestamp = erdos.Timestamp(coordinates=[count])
            if not FLAGS.obstacle_detection:
                obstacles_stream.send(ObstaclesMessage(timestamp, []))
                obstacles_stream.send(erdos.WatermarkMessage(timestamp))
            if not FLAGS.traffic_light_detection:
                traffic_lights_stream.send(TrafficLightsMessage(timestamp, []))
                traffic_lights_stream.send(erdos.WatermarkMessage(timestamp))
            if not FLAGS.obstacle_tracking:
                obstacles_tracking_stream.send(
                    ObstacleTrajectoriesMessage(timestamp, []))
                obstacles_tracking_stream.send(
                    erdos.WatermarkMessage(timestamp))
            count += 1
            if pylot.flags.must_visualize():
                import pygame
                from pygame.locals import K_n
                events = pygame.event.get()
                for event in events:
                    if event.type == pygame.KEYUP:
                        if event.key == K_n:
                            control_display_stream.send(
                                erdos.Message(erdos.Timestamp(coordinates=[0]),
                                              event.key))
                    elif event.type == pygame.QUIT:
                        raise KeyboardInterrupt
                    elif event.type == pygame.KEYDOWN:
                        if (event.key == pygame.K_c
                                and pygame.key.get_mods() & pygame.KMOD_CTRL):
                            raise KeyboardInterrupt

            # NOTE: We should offset sleep time by the time it takes to send
            # the messages.
            time.sleep(time_to_sleep)
    except KeyboardInterrupt:
        node_handle.shutdown()
        if pylot.flags.must_visualize():
            import pygame
            pygame.quit()
Beispiel #21
0
 def run(self, read_stream: ReadStream, write_stream: WriteStream):
     msg = erdos.Message(erdos.Timestamp(coordinates=[0]), 0)
     print("LoopOp: sending {msg}".format(msg=msg))
     write_stream.send(msg)
Beispiel #22
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation(pitch=-15))
    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    # time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.carla_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and CARLA.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    ######################
    # Add sensors.
    ######################
    (center_camera_stream, notify_rgb_stream,
     center_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place LiDAR sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None
        lidar_setup = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    ######################
    # Add detection.
    ######################

    # obstacles_stream, perfect_obstacles_stream = \
    #     pylot.component_creator.add_obstacle_detection(
    #         center_camera_stream, center_camera_setup, pose_stream,
    #         depth_stream, depth_camera_stream, ground_segmented_stream,
    #         ground_obstacles_stream, ground_speed_limit_signs_stream,
    #         ground_stop_signs_stream, time_to_decision_loop_stream)
    # tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
    #                                      pylot.utils.Rotation())
    # traffic_lights_stream, tl_camera_stream = \
    #     pylot.component_creator.add_traffic_light_detection(
    #         tl_transform, vehicle_id_stream, release_sensor_stream,
    #         pose_stream, depth_stream, ground_traffic_lights_stream)
    #
    # lane_detection_stream = pylot.component_creator.add_lane_detection(
    #     center_camera_stream, pose_stream, open_drive_stream)
    # if lane_detection_stream is None:
    #     lane_detection_stream = erdos.IngestStream()
    #     streams_to_send_top_on.append(lane_detection_stream)
    #
    # obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
    #     center_camera_stream, center_camera_setup, obstacles_stream,
    #     depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
    #     time_to_decision_loop_stream)
    #
    # segmented_stream = pylot.component_creator.add_segmentation(
    #     center_camera_stream, ground_segmented_stream)
    #
    # depth_stream = pylot.component_creator.add_depth(transform,
    #                                                  vehicle_id_stream,
    #                                                  center_camera_setup,
    #                                                  depth_camera_stream)

    ######################
    # Add prediction?.
    ######################

    # if FLAGS.fusion:
    #     pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
    #                                       depth_stream,
    #                                       ground_obstacles_stream)
    #
    # prediction_stream, prediction_camera_stream, notify_prediction_stream = \
    #     pylot.component_creator.add_prediction(
    #         obstacles_tracking_stream, vehicle_id_stream, transform,
    #         release_sensor_stream, pose_stream, point_cloud_stream,
    #         lidar_setup)
    # if prediction_stream is None:
    #     prediction_stream = obstacles_stream
    # if notify_prediction_stream:
    #     notify_streams.append(notify_prediction_stream)

    ######################
    # Add planning?.
    ######################

    # goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
    #                                      float(FLAGS.goal_location[1]),
    #                                      float(FLAGS.goal_location[2]))
    # waypoints_stream = pylot.component_creator.add_planning(
    #     goal_location, pose_stream, prediction_stream, traffic_lights_stream,
    #     lane_detection_stream, open_drive_stream, global_trajectory_stream,
    #     time_to_decision_loop_stream)
    #
    # if FLAGS.carla_mode == "pseudo-asynchronous":
    #     # Add a synchronizer in the pseudo-asynchronous mode.
    #     (
    #         waypoints_stream_for_control,
    #         pose_stream_for_control,
    #         sensor_ready_stream,
    #         _pipeline_finish_notify_stream,
    #     ) = pylot.operator_creator.add_planning_pose_synchronizer(
    #         waypoints_stream, pose_stream_for_control, pose_stream,
    #         *notify_streams)
    #     release_sensor_stream.set(sensor_ready_stream)
    #     pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    # else:
    #     waypoints_stream_for_control = waypoints_stream
    #     pose_stream_for_control = pose_stream

    ######################
    # Add control.
    ######################

    # control_stream = pylot.component_creator.add_control(
    #     pose_stream_for_control, waypoints_stream_for_control,
    #     vehicle_id_stream, perfect_obstacles_stream)

    from test_operator import TestOperator
    op_config = erdos.OperatorConfig(name='test_operator',
                                     flow_watermarks=False,
                                     log_file_name=FLAGS.log_file_name,
                                     csv_log_file_name=FLAGS.csv_log_file_name,
                                     profile_file_name=FLAGS.profile_file_name)
    [control_stream] = erdos.connect(TestOperator, op_config,
                                     [center_camera_stream], FLAGS)
    control_loop_stream.set(control_stream)

    # add_evaluation_operators(vehicle_id_stream, pose_stream, imu_stream,
    #                          pose_stream_for_control,
    #                          waypoints_stream_for_control)

    ######################
    # Add ?.
    ######################
    # time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
    #     pose_stream, obstacles_stream)
    # time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    ###########################################################################
    # if pylot.flags.must_visualize():
    #     control_display_stream, ingest_streams = \
    #         pylot.operator_creator.add_visualizer(
    #             pose_stream, center_camera_stream, tl_camera_stream,
    #             prediction_camera_stream, depth_camera_stream,
    #             point_cloud_stream, segmented_stream, imu_stream,
    #             obstacles_stream, traffic_lights_stream,
    #             obstacles_tracking_stream, lane_detection_stream,
    #             prediction_stream, waypoints_stream, control_stream)
    #     streams_to_send_top_on += ingest_streams
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, center_camera_stream, None,
                None, depth_camera_stream,
                point_cloud_stream, None, imu_stream,
                None, None,
                None, None,
                None, None, None)
        streams_to_send_top_on += ingest_streams
    ############################################################################

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.carla_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream
Beispiel #23
0
def driver():
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation(pitch=-15))
    streams_to_send_top_on = []
    control_loop_stream = erdos.LoopStream()
    time_to_decision_loop_stream = erdos.LoopStream()
    if FLAGS.simulator_mode == 'pseudo-asynchronous':
        release_sensor_stream = erdos.LoopStream()
        pipeline_finish_notify_stream = erdos.LoopStream()
    else:
        release_sensor_stream = erdos.IngestStream()
        pipeline_finish_notify_stream = erdos.IngestStream()
    notify_streams = []

    # Create operator that bridges between pipeline and the simulator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_simulator_bridge(
        control_loop_stream,
        release_sensor_stream,
        pipeline_finish_notify_stream,
    )

    # Add sensors.
    (center_camera_stream, notify_rgb_stream,
     center_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    notify_streams.append(notify_rgb_stream)
    if pylot.flags.must_add_depth_camera_sensor():
        (depth_camera_stream, notify_depth_stream,
         depth_camera_setup) = pylot.operator_creator.add_depth_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        depth_camera_stream = None
    if pylot.flags.must_add_segmented_camera_sensor():
        (ground_segmented_stream, notify_segmented_stream,
         _) = pylot.operator_creator.add_segmented_camera(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        ground_segmented_stream = None

    if pylot.flags.must_add_lidar_sensor():
        # Place LiDAR sensor in the same location as the center camera.
        (point_cloud_stream, notify_lidar_stream,
         lidar_setup) = pylot.operator_creator.add_lidar(
             transform, vehicle_id_stream, release_sensor_stream)
    else:
        point_cloud_stream = None
        lidar_setup = None

    if FLAGS.obstacle_location_finder_sensor == 'lidar':
        depth_stream = point_cloud_stream
        # Camera sensors are slower than the lidar sensor.
        notify_streams.append(notify_lidar_stream)
    elif FLAGS.obstacle_location_finder_sensor == 'depth_camera':
        depth_stream = depth_camera_stream
        notify_streams.append(notify_depth_stream)
    else:
        raise ValueError(
            'Unknown --obstacle_location_finder_sensor value {}'.format(
                FLAGS.obstacle_location_finder_sensor))

    imu_stream = None
    if pylot.flags.must_add_imu_sensor():
        (imu_stream, _) = pylot.operator_creator.add_imu(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    gnss_stream = None
    if pylot.flags.must_add_gnss_sensor():
        (gnss_stream, _) = pylot.operator_creator.add_gnss(
            pylot.utils.Transform(location=pylot.utils.Location(),
                                  rotation=pylot.utils.Rotation()),
            vehicle_id_stream)

    if FLAGS.localization:
        pose_stream = pylot.operator_creator.add_localization(
            imu_stream, gnss_stream, pose_stream)

    obstacles_stream, perfect_obstacles_stream, obstacles_error_stream = \
        pylot.component_creator.add_obstacle_detection(
            center_camera_stream, center_camera_setup, pose_stream,
            depth_stream, depth_camera_stream, ground_segmented_stream,
            ground_obstacles_stream, ground_speed_limit_signs_stream,
            ground_stop_signs_stream, time_to_decision_loop_stream)

    tl_transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                         pylot.utils.Rotation())
    traffic_lights_stream, tl_camera_stream = \
        pylot.component_creator.add_traffic_light_detection(
            tl_transform, vehicle_id_stream, release_sensor_stream,
            pose_stream, depth_stream, ground_traffic_lights_stream)

    lane_detection_stream = pylot.component_creator.add_lane_detection(
        center_camera_stream, pose_stream, open_drive_stream)
    if lane_detection_stream is None:
        lane_detection_stream = erdos.IngestStream()
        streams_to_send_top_on.append(lane_detection_stream)

    obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(
        center_camera_stream, center_camera_setup, obstacles_stream,
        depth_stream, vehicle_id_stream, pose_stream, ground_obstacles_stream,
        time_to_decision_loop_stream)

    segmented_stream = pylot.component_creator.add_segmentation(
        center_camera_stream, ground_segmented_stream)

    depth_stream = pylot.component_creator.add_depth(transform,
                                                     vehicle_id_stream,
                                                     center_camera_setup,
                                                     depth_camera_stream)

    if FLAGS.fusion:
        pylot.operator_creator.add_fusion(pose_stream, obstacles_stream,
                                          depth_stream,
                                          ground_obstacles_stream)

    prediction_stream, prediction_camera_stream, notify_prediction_stream = \
        pylot.component_creator.add_prediction(
            obstacles_tracking_stream, vehicle_id_stream, transform,
            release_sensor_stream, pose_stream, point_cloud_stream,
            lidar_setup)
    if prediction_stream is None:
        prediction_stream = obstacles_stream
    if notify_prediction_stream:
        notify_streams.append(notify_prediction_stream)

    goal_location = pylot.utils.Location(float(FLAGS.goal_location[0]),
                                         float(FLAGS.goal_location[1]),
                                         float(FLAGS.goal_location[2]))
    waypoints_stream = pylot.component_creator.add_planning(
        goal_location, pose_stream, prediction_stream, traffic_lights_stream,
        lane_detection_stream, open_drive_stream, global_trajectory_stream,
        time_to_decision_loop_stream)

    if FLAGS.simulator_mode == "pseudo-asynchronous":
        # Add a synchronizer in the pseudo-asynchronous mode.
        (
            waypoints_stream_for_control,
            pose_stream_for_control,
            sensor_ready_stream,
            _pipeline_finish_notify_stream,
        ) = pylot.operator_creator.add_planning_pose_synchronizer(
            waypoints_stream, pose_stream_for_control, pose_stream,
            *notify_streams)
        release_sensor_stream.set(sensor_ready_stream)
        pipeline_finish_notify_stream.set(_pipeline_finish_notify_stream)
    else:
        waypoints_stream_for_control = waypoints_stream
        pose_stream_for_control = pose_stream

    control_stream = pylot.component_creator.add_control(
        pose_stream_for_control, waypoints_stream_for_control,
        vehicle_id_stream, perfect_obstacles_stream)
    control_loop_stream.set(control_stream)

    add_evaluation_operators(vehicle_id_stream, pose_stream, imu_stream,
                             pose_stream_for_control,
                             waypoints_stream_for_control)

    time_to_decision_stream = pylot.operator_creator.add_time_to_decision(
        pose_stream, obstacles_stream)
    time_to_decision_loop_stream.set(time_to_decision_stream)

    control_display_stream = None
    if pylot.flags.must_visualize():
        control_display_stream, ingest_streams = \
            pylot.operator_creator.add_visualizer(
                pose_stream, center_camera_stream, tl_camera_stream,
                prediction_camera_stream, depth_camera_stream,
                point_cloud_stream, segmented_stream, imu_stream,
                obstacles_stream, obstacles_error_stream, traffic_lights_stream,
                obstacles_tracking_stream, lane_detection_stream,
                prediction_stream, waypoints_stream, control_stream)
        streams_to_send_top_on += ingest_streams

    node_handle = erdos.run_async()

    for stream in streams_to_send_top_on:
        stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
    # If we did not use the pseudo-asynchronous mode, ask the sensors to
    # release their readings whenever.
    if FLAGS.simulator_mode != "pseudo-asynchronous":
        release_sensor_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        pipeline_finish_notify_stream.send(
            erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

    return node_handle, control_display_stream
Beispiel #24
0
 def destroy(self):
     self._logger.warn('destroying {}'.format(self.config.name))
     # Sending top watermark because the operator is not flowing
     # watermarks.
     self._obstacles_stream.send(
         erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
Beispiel #25
0
 def run(self):
     top_timestamp = erdos.Timestamp(coordinates=[sys.maxsize])
     self.write_stream.send(erdos.WatermarkMessage(top_timestamp))
Beispiel #26
0
    def __init__(self,
                 control_stream,
                 can_bus_stream,
                 ground_traffic_lights_stream,
                 ground_obstacles_stream,
                 ground_speed_limit_signs_stream,
                 ground_stop_signs_stream,
                 vehicle_id_stream,
                 open_drive_stream,
                 global_trajectory_stream,
                 name,
                 flags,
                 log_file_name=None,
                 csv_file_name=None):
        """ Initializes the CarlaOperator with the given name.

        Args:
            name: The unique name of the operator.
            flags: A handle to the global flags instance to retrieve the
                configuration.
            log_file_name: The file to log the required information to.
            csv_file_name: The file to log info to in csv format.
        """
        # Register callback on control stream.
        control_stream.add_callback(self.on_control_msg)
        self.can_bus_stream = can_bus_stream
        self.ground_traffic_lights_stream = ground_traffic_lights_stream
        self.ground_obstacles_stream = ground_obstacles_stream
        self.ground_speed_limit_signs_stream = ground_speed_limit_signs_stream
        self.ground_stop_signs_stream = ground_stop_signs_stream
        self.vehicle_id_stream = vehicle_id_stream
        self.open_drive_stream = open_drive_stream
        self.global_trajectory_stream = global_trajectory_stream

        self._name = name
        self._flags = flags
        self._logger = erdos.utils.setup_logging(name, log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            name + '-csv', csv_file_name)
        # Connect to CARLA and retrieve the world running.
        self._client, self._world = get_world(self._flags.carla_host,
                                              self._flags.carla_port,
                                              self._flags.carla_timeout)
        if self._client is None or self._world is None:
            raise ValueError('There was an issue connecting to the simulator.')

        if self._flags.carla_version == '0.9.5':
            # TODO (Sukrit) :: ERDOS provides no way to retrieve handles to the
            # class objects to do garbage collection. Hence, objects from
            # previous runs of the simulation may persist. We need to clean
            # them up right now. In future, move this logic to a seperate
            # destroy function.
            reset_world(self._world)
        else:
            self._world = self._client.load_world('Town{:02d}'.format(
                self._flags.carla_town))
        # Send open drive string.
        top_timestamp = erdos.Timestamp(coordinates=[sys.maxsize])
        self.open_drive_stream.send(
            erdos.Message(top_timestamp,
                          self._world.get_map().to_opendrive()))
        top_watermark = erdos.WatermarkMessage(top_timestamp)
        self.open_drive_stream.send(top_watermark)
        self.global_trajectory_stream.send(top_watermark)
        # Set the weather.
        weather = get_weathers()[self._flags.carla_weather]
        self._logger.info('Setting the weather to {}'.format(
            self._flags.carla_weather))
        self._world.set_weather(weather)
        # Turn on the synchronous mode so we can control the simulation.
        if self._flags.carla_synchronous_mode:
            set_synchronous_mode(self._world, self._flags.carla_fps)

        # Spawn the required number of vehicles.
        self._vehicles = self._spawn_vehicles(self._flags.carla_num_vehicles)

        # Spawn the vehicle that the pipeline has to drive and send it to
        # the downstream operators.
        self._driving_vehicle = self._spawn_driving_vehicle()

        if (self._flags.carla_version == '0.9.6'
                or self._flags.carla_version == '0.9.7'):
            # People are do not move in versions older than 0.9.6.
            (self._people, ped_control_ids) = self._spawn_people(
                self._flags.carla_num_people)

        # Tick once to ensure that the actors are spawned before the data-flow
        # starts.
        self._tick_at = time.time()
        self._tick_simulator()

        # Start people
        if (self._flags.carla_version == '0.9.6'
                or self._flags.carla_version == '0.9.7'):
            self._start_people(ped_control_ids)
Beispiel #27
0
 def run(self):
     msg = erdos.Message(erdos.Timestamp(coordinates=[0]), 0)
     print("LoopOp: sending {msg}".format(msg=msg))
     self.write_stream.send(msg)
    def run(self):
        # Initialize all the publishers.
        self.enable_pub = rospy.Publisher(ENABLE_TOPIC, Empty, queue_size=10)
        self.disable_pub = rospy.Publisher(DISABLE_TOPIC, Empty, queue_size=10)
        self.throttle_pub = rospy.Publisher(THROTTLE_TOPIC,
                                            ThrottleCmd,
                                            queue_size=10)
        self.brake_pub = rospy.Publisher(BRAKE_TOPIC, BrakeCmd, queue_size=10)
        self.steering_pub = rospy.Publisher(STEERING_TOPIC,
                                            SteeringCmd,
                                            queue_size=10)
        rospy.init_node(self.config.name, anonymous=True, disable_signals=True)

        # Enable the ADAS.
        #self.enable_pub.publish(Empty())

        # Pull from the control stream and publish messages continuously.
        r = rospy.Rate(ROS_FREQUENCY)
        last_control_message = ControlMessage(
            steer=0,
            throttle=0,
            brake=0,
            hand_brake=False,
            reverse=False,
            timestamp=erdos.Timestamp(coordinates=[0]))
        while not rospy.is_shutdown():
            control_message = self._control_stream.try_read()
            if control_message is None or isinstance(control_message,
                                                     erdos.WatermarkMessage):
                control_message = last_control_message
            else:
                last_control_message = control_message

            # Send all the commands from a single ControlMessage one after
            # the other.
            steer_angle = control_message.steer * STEERING_ANGLE_MAX
            self._logger.debug("The steering angle is {}".format(steer_angle))
            steer_message = SteeringCmd(enable=True,
                                        clear=True,
                                        ignore=False,
                                        quiet=False,
                                        count=0,
                                        steering_wheel_angle_cmd=steer_angle,
                                        steering_wheel_angle_velocity=0.0)
            self.steering_pub.publish(steer_message)

            throttle_message = ThrottleCmd(
                enable=True,
                ignore=False,
                count=0,
                pedal_cmd_type=ThrottleCmd.CMD_PERCENT,
                pedal_cmd=control_message.throttle)
            self.throttle_pub.publish(throttle_message)

            brake_message = BrakeCmd(enable=True,
                                     ignore=False,
                                     count=0,
                                     pedal_cmd_type=BrakeCmd.CMD_PERCENT,
                                     pedal_cmd=control_message.brake)
            self.brake_pub.publish(brake_message)

            # Run at frequency
            r.sleep()
Beispiel #29
0
    def run_step(self, input_data, timestamp):
        game_time = int(timestamp * 1000)
        self._logger.debug("Current game time {}".format(game_time))
        erdos_timestamp = erdos.Timestamp(coordinates=[game_time])

        if not self._sent_open_drive and self.track != Track.MAP:
            # We do not have access to the open drive map. Send top watermark.
            self._sent_open_drive = True
            self._open_drive_stream.send(
                erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        # Send the waypoints.
        self.send_waypoints_msg(erdos_timestamp)

        speed_data = None
        imu_data = None
        gnss_data = None
        carla_pc = None
        for key, val in input_data.items():
            # val is a tuple of (data timestamp, data).
            # print("Sensor {} at {}".format(key, val[0]))
            if key in self._camera_streams:
                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        erdos_timestamp,
                        CameraFrame(val[1][:, :, :3], 'BGR',
                                    self._camera_setups[key])))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'imu':
                imu_data = val[1]
            elif key == 'speed':
                speed_data = val[1]
            elif key == 'gnss':
                gnss_data = val[1]
            elif key == 'opendrive':
                self.send_opendrive_map_msg(val[1], erdos_timestamp)
            elif key == 'LIDAR':
                carla_pc = val[1]
            else:
                self._logger.warning("Sensor {} not used".format(key))

        self.send_vehicle_id_msg()
        self.send_ground_data(erdos_timestamp)

        if FLAGS.localization:
            self.send_imu_msg(imu_data, erdos_timestamp)
            self.send_gnss_msg(gnss_data, erdos_timestamp)
            self.send_initial_pose_msg(erdos_timestamp)
        elif FLAGS.carla_localization:
            self.send_perfect_pose_msg(erdos_timestamp)
        else:
            self.send_pose_msg(speed_data, imu_data, gnss_data,
                               erdos_timestamp)

        if using_lidar():
            self.send_lidar_msg(carla_pc, self._lidar_transform,
                                erdos_timestamp)

        if pylot.flags.must_visualize():
            import pygame
            from pygame.locals import K_n
            events = pygame.event.get()
            for event in events:
                if event.type == pygame.KEYUP:
                    if event.key == K_n:
                        self._control_display_stream.send(
                            erdos.Message(erdos.Timestamp(coordinates=[0]),
                                          event.key))

        # Wait until the control is set.
        while True:
            control_msg = self._control_stream.read()
            if not isinstance(control_msg, erdos.WatermarkMessage):
                output_control = carla.VehicleControl()
                output_control.throttle = control_msg.throttle
                output_control.brake = control_msg.brake
                output_control.steer = control_msg.steer
                output_control.reverse = control_msg.reverse
                output_control.hand_brake = control_msg.hand_brake
                output_control.manual_gear_shift = False
                return output_control
Beispiel #30
0
def main(argv):
    """ Computes ground obstacle detection and segmentation decay."""
    transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
                                      pylot.utils.Rotation())

    control_loop_stream = erdos.LoopStream()
    release_sensor_stream = erdos.IngestStream()
    # Create carla operator.
    (
        pose_stream,
        pose_stream_for_control,
        ground_traffic_lights_stream,
        ground_obstacles_stream,
        ground_speed_limit_signs_stream,
        ground_stop_signs_stream,
        vehicle_id_stream,
        open_drive_stream,
        global_trajectory_stream,
    ) = pylot.operator_creator.add_carla_bridge(control_loop_stream,
                                                release_sensor_stream)

    # Add camera sensors.
    (center_camera_stream, notify_rgb_stream,
     rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (depth_camera_stream, notify_depth_stream,
     depth_camera_setup) = pylot.operator_creator.add_depth_camera(
         transform, vehicle_id_stream, release_sensor_stream)
    (segmented_stream, _,
     _) = pylot.operator_creator.add_segmented_camera(transform,
                                                      vehicle_id_stream,
                                                      release_sensor_stream)

    map_stream = None
    if FLAGS.compute_detection_decay:
        obstacles_stream = pylot.operator_creator.add_perfect_detector(
            depth_camera_stream, center_camera_stream, segmented_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream)
        map_stream = pylot.operator_creator.add_detection_decay(
            obstacles_stream)

    iou_stream = None
    if FLAGS.compute_segmentation_decay:
        iou_stream = pylot.operator_creator.add_segmentation_decay(
            segmented_stream)

    # TODO: Hack! We synchronize on a single stream, based on a guesestimated
    # of which stream is slowest. Instead, We should synchronize on all output
    # streams, and we should ensure that even the operators without output
    # streams complete.
    if FLAGS.control_agent == 'carla_auto_pilot':
        stream_to_sync_on = iou_stream
        if map_stream is not None:
            stream_to_sync_on = map_stream
        op_config = erdos.OperatorConfig(name='synchronizer_operator',
                                         flow_watermarks=False)
        (control_stream, ) = erdos.connect(SynchronizerOperator, op_config,
                                           [stream_to_sync_on], FLAGS)
        control_loop_stream.set(control_stream)
    else:
        raise ValueError(
            "Must be in auto pilot mode. Pass --control_agent=carla_auto_pilot"
        )

    erdos.run_async()

    # Ask all sensors to release their data.
    release_sensor_stream.send(
        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))