Exemple #1
0
    def __init__(self, ground_vehicle_id_stream: erdos.ReadStream,
                 pose_stream: erdos.ReadStream,
                 traffic_light_invasion_stream: erdos.WriteStream, flags):
        # Save the streams.
        self._vehicle_id_stream = ground_vehicle_id_stream
        self._pose_stream = pose_stream
        self._traffic_light_invasion_stream = traffic_light_invasion_stream

        # Register a callback on the pose stream to check if the ego-vehicle
        # is invading a traffic light.
        pose_stream.add_callback(self.on_pose_update)

        # The hero vehicle object we obtain from the simulator.
        self._vehicle = None

        # A list of all the traffic lights and their corresponding effected
        # waypoints.
        self._traffic_lights = []
        self._last_red_light_id = None
        self._world, self._map = None, None

        # Initialize a logger.
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)

        # Distance from the light to trigger the check at.
        self.DISTANCE_LIGHT = 10
    def __init__(self, vehicle_id_stream: erdos.ReadStream,
                 pose_stream: erdos.ReadStream,
                 obstacle_tracking_stream: erdos.ReadStream,
                 top_down_camera_stream: erdos.ReadStream,
                 top_down_segmentation_stream: erdos.ReadStream, flags,
                 top_down_camera_setup):
        """ Initializes the operator with the given parameters.

        Args:
            flags: A handle to the global flags instance to retrieve the
                configuration.
            top_down_camera_setup: The setup of the top down camera.
        """
        vehicle_id_stream.add_callback(self.on_ground_vehicle_id_update)
        pose_stream.add_callback(self.on_pose_update)
        obstacle_tracking_stream.add_callback(self.on_tracking_update)
        top_down_camera_stream.add_callback(self.on_top_down_camera_update)
        top_down_segmentation_stream.add_callback(
            self.on_top_down_segmentation_update)
        self._flags = flags
        self._buffer_length = 10
        self._ground_vehicle_id = None
        self._waypoints = None
        # Holds history of global transforms at each timestep.
        self._global_transforms = deque(maxlen=self._buffer_length)
        self._current_transform = None
        self._previous_transform = None
        self._top_down_camera_setup = top_down_camera_setup
        self._data_path = os.path.join(self._flags.data_path, 'chauffeur')
        os.makedirs(self._data_path, exist_ok=True)
 def __init__(self, tracking_stream: ReadStream,
              linear_prediction_stream: WriteStream, flags):
     tracking_stream.add_callback(self.generate_predicted_trajectories,
                                  [linear_prediction_stream])
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
    def __init__(self, collision_stream: erdos.ReadStream,
                 lane_invasion_stream: erdos.ReadStream,
                 traffic_light_invasion_stream: erdos.ReadStream,
                 imu_stream: erdos.ReadStream, pose_stream: erdos.ReadStream,
                 flags):
        # Save the streams.
        self._collision_stream = collision_stream
        self._lane_invasion_stream = lane_invasion_stream
        self._traffic_light_invasion_stream = traffic_light_invasion_stream
        self._imu_stream = imu_stream
        self._pose_stream = pose_stream

        # Register callbacks.
        collision_stream.add_callback(self.on_collision_update)
        lane_invasion_stream.add_callback(self.on_lane_invasion_update)
        traffic_light_invasion_stream.add_callback(
            self.on_traffic_light_invasion_update)
        imu_stream.add_callback(self.on_imu_update)
        pose_stream.add_callback(self.on_pose_update)

        # Initialize a logger.
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)

        # Save the last acceleration and timestamp.
        self._last_lateral_acc, self._last_longitudinal_acc = None, None
        self._last_timestamp = None
 def __init__(self, pose_stream: ReadStream, open_drive_stream: ReadStream,
              detected_lane_stream: WriteStream, flags):
     pose_stream.add_callback(self.on_position_update,
                              [detected_lane_stream])
     self._flags = flags
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
    def __init__(self, camera_stream: erdos.ReadStream,
                 detected_lanes_stream: erdos.WriteStream, flags):
        camera_stream.add_callback(self.on_camera_frame,
                                   [detected_lanes_stream])
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        pylot.utils.set_tf_loglevel(logging.ERROR)
        self._input_tensor = tf.placeholder(dtype=tf.float32,
                                            shape=[1, 256, 512, 3],
                                            name='input_tensor')
        net = lanenet.LaneNet(phase='test')
        self._binary_seg_ret, self._instance_seg_ret = net.inference(
            input_tensor=self._input_tensor, name='LaneNet')
        self._gpu_options = tf.GPUOptions(
            allow_growth=True,
            visible_device_list=str(self._flags.lane_detection_gpu_index),
            per_process_gpu_memory_fraction=flags.
            lane_detection_gpu_memory_fraction,
            allocator_type='BFC')
        self._tf_session = tf.Session(config=tf.ConfigProto(
            gpu_options=self._gpu_options, allow_soft_placement=True))
        with tf.variable_scope(name_or_scope='moving_avg'):
            variable_averages = tf.train.ExponentialMovingAverage(0.9995)
            variables_to_restore = variable_averages.variables_to_restore()

        self._postprocessor = lanenet_postprocess.LaneNetPostProcessor()
        saver = tf.train.Saver(variables_to_restore)
        with self._tf_session.as_default():
            saver.restore(sess=self._tf_session,
                          save_path=flags.lanenet_detection_model_path)
Exemple #7
0
 def __init__(self, obstacles_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags):
     obstacles_stream.add_callback(self.on_obstacles_msg)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._msg_cnt = 0
Exemple #8
0
 def __init__(self, camera_stream: erdos.ReadStream, flags,
              filename_prefix: str):
     camera_stream.add_callback(self.on_frame)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._frame_cnt = 0
     self._filename_prefix = filename_prefix
 def __init__(self, lidar_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags,
              filename_prefix: str):
     lidar_stream.add_callback(self.on_lidar_frame)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._pc_msg_cnt = 0
     self._filename_prefix = filename_prefix
 def __init__(self, obstacle_tracking_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags):
     obstacle_tracking_stream.add_callback(self.on_trajectories_msg)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._msg_cnt = 0
     self._data_path = os.path.join(self._flags.data_path, 'trajectories')
     os.makedirs(self._data_path, exist_ok=True)
Exemple #11
0
 def __init__(self, imu_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags):
     imu_stream.add_callback(self.on_imu_update)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._msg_cnt = 0
     self._data_path = os.path.join(self._flags.data_path, 'imu')
     os.makedirs(self._data_path, exist_ok=True)
 def __init__(self, pose_stream: erdos.ReadStream,
              obstacles_stream: erdos.ReadStream,
              time_to_decision_stream: erdos.WriteStream, flags):
     pose_stream.add_callback(self.on_pose_update,
                              [time_to_decision_stream])
     obstacles_stream.add_callback(self.on_obstacles_update)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._last_obstacles_msg = None
Exemple #13
0
 def __init__(self, lidar_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags,
              filename_prefix: str):
     lidar_stream.add_callback(self.on_lidar_frame)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._pc_msg_cnt = 0
     self._filename_prefix = filename_prefix
     self._data_path = os.path.join(self._flags.data_path, filename_prefix)
     os.makedirs(self._data_path, exist_ok=True)
 def __init__(self, obstacles_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream, flags):
     # Register a callback on obstacles data stream.
     obstacles_stream.add_callback(self.on_obstacles_msg)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._msg_cnt = 0
     self._data_path = os.path.join(self._flags.data_path,
                                    'multiple_object_tracker')
     os.makedirs(self._data_path, exist_ok=True)
 def __init__(self, pose_stream: ReadStream, open_drive_stream: ReadStream,
              center_camera_stream: ReadStream,
              detected_lane_stream: WriteStream, flags):
     pose_stream.add_callback(self.on_pose_update)
     center_camera_stream.add_callback(self.on_bgr_camera_update)
     erdos.add_watermark_callback([pose_stream, center_camera_stream],
                                  [detected_lane_stream],
                                  self.on_position_update)
     self._flags = flags
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._bgr_msgs = deque()
     self._pose_msgs = deque()
    def __init__(self, camera_stream: erdos.ReadStream,
                 time_to_decision_stream: erdos.ReadStream,
                 traffic_lights_stream: erdos.WriteStream, flags):
        # Register a callback on the camera input stream.
        camera_stream.add_callback(self.on_frame, [traffic_lights_stream])
        time_to_decision_stream.add_callback(self.on_time_to_decision_update)
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._flags = flags
        self._traffic_lights_stream = traffic_lights_stream
        self._detection_graph = tf.Graph()
        # Load the model from the model file.
        pylot.utils.set_tf_loglevel(logging.ERROR)
        with self._detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self._flags.traffic_light_det_model_path,
                                'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self._gpu_options = tf.GPUOptions(
            allow_growth=True,
            visible_device_list=str(self._flags.traffic_light_det_gpu_index),
            per_process_gpu_memory_fraction=flags.
            traffic_light_det_gpu_memory_fraction)
        # Create a TensorFlow session.
        self._tf_session = tf.Session(
            graph=self._detection_graph,
            config=tf.ConfigProto(gpu_options=self._gpu_options))
        # Get the tensors we're interested in.
        self._image_tensor = self._detection_graph.get_tensor_by_name(
            'image_tensor:0')
        self._detection_boxes = self._detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        self._detection_scores = self._detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self._detection_classes = self._detection_graph.get_tensor_by_name(
            'detection_classes:0')
        self._num_detections = self._detection_graph.get_tensor_by_name(
            'num_detections:0')
        self._labels = {
            1: TrafficLightColor.GREEN,
            2: TrafficLightColor.YELLOW,
            3: TrafficLightColor.RED,
            4: TrafficLightColor.OFF
        }
        # Unique bounding box id. Incremented for each bounding box.
        self._unique_id = 0
        # Serve some junk image to load up the model.
        self.__run_model(np.zeros((108, 192, 3)))
Exemple #17
0
    def __init__(self, camera_stream: erdos.ReadStream,
                 time_to_decision_stream: erdos.ReadStream,
                 obstacles_stream: erdos.WriteStream, model_names, model_paths,
                 flags):
        camera_stream.add_callback(self.on_msg_camera_stream)
        time_to_decision_stream.add_callback(self.on_time_to_decision_update)
        erdos.add_watermark_callback([camera_stream], [obstacles_stream],
                                     self.on_watermark)
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)

        # Load the COCO labels.
        self._coco_labels = load_coco_labels(self._flags.path_coco_labels)
        self._bbox_colors = load_coco_bbox_colors(self._coco_labels)

        assert len(model_names) == len(
            model_paths), 'Model names and paths do not have same length'
        self._models = {}
        self._tf_session = None
        self._signitures = {
            'image_files': 'image_files:0',
            'image_arrays': 'image_arrays:0',
            'prediction': 'detections:0',
        }
        for index, model_path in enumerate(model_paths):
            model_name = model_names[index]
            self._models[model_name] = self.load_serving_model(
                model_name, model_path,
                flags.obstacle_detection_gpu_memory_fraction)
            if index == 0:
                # Use the first model by default.
                self._model_name, self._tf_session = self._models[model_name]
                # Serve some junk image to load up the model.
                inputs = np.zeros((108, 192, 3))
                self._tf_session.run(
                    self._signitures['prediction'],
                    feed_dict={self._signitures['image_arrays']: [inputs]})[0]
        self._unique_id = 0
        self._frame_msgs = deque()
        self._ttd_msgs = deque()
        self._last_ttd = 400
 def __init__(self, prediction_stream: erdos.ReadStream,
              ground_truth_stream: erdos.ReadStream,
              finished_indicator_stream: erdos.WriteStream,
              evaluate_timely: bool, matching_policy: str, frame_gap: int,
              scoring_module, flags):
     prediction_stream.add_callback(self.on_prediction)
     ground_truth_stream.add_callback(self.on_ground_truth)
     erdos.add_watermark_callback([prediction_stream, ground_truth_stream],
                                  [finished_indicator_stream],
                                  self.on_watermark)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._last_notification = None
     # Buffer of predictions.
     self._predictions = []
     # Buffer of ground data.
     self._ground_truths = []
     # Storing pairs of (game time, ground/output time).
     self._prediction_start_end_times = []
     self._frame_gap = frame_gap
     self._evaluate_timely = evaluate_timely
     self._matching_policy = matching_policy
     # The start time of the most recent inference that completed before
     # the timestamp of the watermark.
     self._start_time_best_inference = None
     # Index in prediction_start_end_times to the inference with the next
     # unprocessed start time. We need to maintain this index because
     # the start_tracker_end_times list might contain entries with
     # start time beyond current watermark.
     self._start_time_frontier = 0
     # Buffer storing start times and ground times of predictions we have
     # to compute accuracy for. The buffer is used to ensure that these
     # accuracies are computed only once the ground data is available.
     self._accuracy_compute_buffer = []
     # See interface below this function
     self._scoring_module = scoring_module
     self.scoring_modules = {
         "start": scoring_module(flags),
         "end": scoring_module(flags)
     }
     self._csv_logger = erdos.utils.setup_csv_logging(
         self.config.name + '-csv', self.config.csv_log_file_name)
Exemple #19
0
    def __init__(self, vehicle_id_stream: ReadStream,
                 ground_obstacles_stream: ReadStream, pose_stream: ReadStream,
                 ground_tracking_stream: WriteStream, flags):
        self._vehicle_id_stream = vehicle_id_stream
        ground_obstacles_stream.add_callback(self.on_obstacles_update)
        pose_stream.add_callback(self.on_pose_update)
        erdos.add_watermark_callback([ground_obstacles_stream, pose_stream],
                                     [ground_tracking_stream],
                                     self.on_watermark)
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._flags = flags
        # Queues of incoming data.
        self._obstacles_raw_msgs = deque()
        self._pose_msgs = deque()

        # Processed data. Key is actor id, value is deque containing the past
        # trajectory of the corresponding actor. Trajectory is stored in world
        # coordinates, for ease of transformation.
        self._obstacles = defaultdict(
            lambda: deque(maxlen=self._flags.tracking_num_steps))
Exemple #20
0
    def __init__(self, point_cloud_stream: erdos.ReadStream,
                 tracking_stream: erdos.ReadStream,
                 time_to_decision_stream: erdos.ReadStream,
                 prediction_stream: erdos.WriteStream, flags, lidar_setup):
        print("WARNING: R2P2 predicts only vehicle trajectories")
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)
        self._flags = flags

        self._device = torch.device('cuda')
        self._r2p2_model = R2P2().to(self._device)
        state_dict = torch.load(flags.r2p2_model_path)
        self._r2p2_model.load_state_dict(state_dict)

        point_cloud_stream.add_callback(self.on_point_cloud_update)
        tracking_stream.add_callback(self.on_trajectory_update)
        time_to_decision_stream.add_callback(self.on_time_to_decision_update)
        erdos.add_watermark_callback([point_cloud_stream, tracking_stream],
                                     [prediction_stream], self.on_watermark)

        self._lidar_setup = lidar_setup

        self._point_cloud_msgs = deque()
        self._tracking_msgs = deque()
Exemple #21
0
    def __init__(self, imu_stream: ReadStream, gnss_stream: ReadStream,
                 ground_pose_stream: ReadStream, pose_stream: WriteStream,
                 flags):
        # Queue of saved messages.
        self._imu_updates = deque()
        self._gnss_updates = deque()
        self._ground_pose_updates = None

        # Register callbacks on both the IMU and GNSS streams.
        imu_stream.add_callback(
            partial(self.save, msg_type="IMU", queue=self._imu_updates))

        gnss_stream.add_callback(
            partial(self.save, msg_type="GNSS", queue=self._gnss_updates))

        # Register the ground pose stream.
        self._ground_pose_updates = deque()
        ground_pose_stream.add_callback(
            partial(self.save,
                    msg_type="pose",
                    queue=self._ground_pose_updates))
        erdos.add_watermark_callback(
            [imu_stream, gnss_stream, ground_pose_stream], [],
            self.on_watermark)

        # Save the output stream.
        self._pose_stream = pose_stream

        # Initialize a logger.
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)

        # Gravity vector.
        self._g = np.array([0, 0, -9.81])

        # Previous timestamp values.
        self._last_pose_estimate = None
        self._last_timestamp = None

        # NOTE: At the start of the simulation, the vehicle drops down from
        # the sky, during which the IMU values screw up the calculations.
        # This boolean flag takes care to start the prediction only when the
        # values have stabilized.
        self._is_started = False

        # Constants required for the Kalman filtering.
        var_imu_f, var_imu_w, var_gnss = 0.5, 0.5, 0.1
        self.__Q = np.identity(6)
        self.__Q[0:3, 0:3] = self.__Q[0:3, 0:3] * var_imu_f
        self.__Q[3:6, 3:6] = self.__Q[3:6, 3:6] * var_imu_w

        self.__F = np.identity(9)

        self.__L = np.zeros([9, 6])
        self.__L[3:9, :] = np.identity(6)

        self.__R_GNSS = np.identity(3) * var_gnss

        self._last_covariance = np.zeros((9, 9))
Exemple #22
0
    def __init__(self, pose_stream: ReadStream, waypoints_stream: ReadStream,
                 flags):
        # Register callbacks to retrieve pose and waypoints messages.
        pose_stream.add_callback(self.on_pose_update)
        waypoints_stream.add_callback(self.on_waypoint_update)

        # Add a watermark callback on pose stream and waypoints stream.
        erdos.add_watermark_callback([pose_stream, waypoints_stream], [],
                                     self.on_watermark)

        # Initialize state.
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)
        self._flags = flags
        self._waypoints_msgs = deque()
        self._pose_messages = deque()

        # Keep track of the last 2 waypoints that the ego vehicle was
        # supposed to achieve.
        self.last_waypoints = deque(maxlen=2)
 def __init__(self, ground_traffic_lights_stream: ReadStream,
              tl_camera_stream: ReadStream, depth_camera_stream: ReadStream,
              segmented_camera_stream: ReadStream, pose_stream: ReadStream,
              traffic_lights_stream: WriteStream, flags):
     ground_traffic_lights_stream.add_callback(self.on_traffic_light_update)
     tl_camera_stream.add_callback(self.on_bgr_camera_update)
     depth_camera_stream.add_callback(self.on_depth_camera_update)
     segmented_camera_stream.add_callback(self.on_segmented_frame)
     pose_stream.add_callback(self.on_pose_update)
     erdos.add_watermark_callback([
         ground_traffic_lights_stream, tl_camera_stream,
         depth_camera_stream, segmented_camera_stream, pose_stream
     ], [traffic_lights_stream], self.on_watermark)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._traffic_lights = deque()
     self._bgr_msgs = deque()
     self._depth_frame_msgs = deque()
     self._segmented_msgs = deque()
     self._pose_msgs = deque()
     self._frame_cnt = 0
Exemple #24
0
 def __init__(self, pose_stream: ReadStream, waypoints_stream: ReadStream,
              control_stream: WriteStream, flags):
     pose_stream.add_callback(self.on_pose_update)
     waypoints_stream.add_callback(self.on_waypoints_update)
     erdos.add_watermark_callback([pose_stream, waypoints_stream],
                                  [control_stream], self.on_watermark)
     self._flags = flags
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     pid_use_real_time = False
     if self._flags.execution_mode == 'real-world':
         # The PID is executing on a real car. Use the real time delta
         # between two control commands.
         pid_use_real_time = True
     if self._flags.simulator_control_frequency == -1:
         dt = 1.0 / self._flags.simulator_fps
     else:
         dt = 1.0 / self._flags.simulator_control_frequency
     self._pid = PIDLongitudinalController(flags.pid_p, flags.pid_d,
                                           flags.pid_i, dt,
                                           pid_use_real_time)
     # Queues in which received messages are stored.
     self._waypoint_msgs = deque()
     self._pose_msgs = deque()
 def __init__(self, obstacles_stream: erdos.ReadStream,
              depth_stream: erdos.ReadStream, pose_stream: erdos.ReadStream,
              obstacles_output_stream: erdos.WriteStream, flags,
              camera_setup):
     obstacles_stream.add_callback(self.on_obstacles_update)
     depth_stream.add_callback(self.on_depth_update)
     pose_stream.add_callback(self.on_pose_update)
     erdos.add_watermark_callback(
         [obstacles_stream, depth_stream, pose_stream],
         [obstacles_output_stream], self.on_watermark)
     self._flags = flags
     self._camera_setup = camera_setup
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     # Queues in which received messages are stored.
     self._obstacles_msgs = deque()
     self._depth_msgs = deque()
     self._pose_msgs = deque()
 def __init__(self, pose_stream: ReadStream, tracking_stream: ReadStream,
              prediction_stream: ReadStream, flags):
     pose_stream.add_callback(self._on_pose_update)
     tracking_stream.add_callback(self._on_tracking_update)
     prediction_stream.add_callback(self._on_prediction_update)
     erdos.add_watermark_callback(
         [pose_stream, tracking_stream, prediction_stream], [],
         self.on_watermark)
     self._flags = flags
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._csv_logger = erdos.utils.setup_csv_logging(
         self.config.name + '-csv', self.config.csv_log_file_name)
     # Message buffers.
     self._prediction_msgs = deque()
     self._tracking_msgs = deque()
     self._pose_msgs = deque()
     # Accumulated list of predictions, from oldest to newest.
     self._predictions = deque(
         maxlen=self._flags.prediction_num_future_steps)
 def __init__(self, obstacle_tracking_stream: erdos.ReadStream, flags):
     obstacle_tracking_stream.add_callback(self.on_trajectories_msg)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     self._msg_cnt = 0
Exemple #28
0
    def __init__(
            self, control_stream: ReadStream,
            release_sensor_stream: ReadStream,
            pipeline_finish_notify_stream: ReadStream,
            pose_stream: WriteStream, pose_stream_for_control: WriteStream,
            ground_traffic_lights_stream: WriteStream,
            ground_obstacles_stream: WriteStream,
            ground_speed_limit_signs_stream: WriteStream,
            ground_stop_signs_stream: WriteStream,
            vehicle_id_stream: WriteStream, open_drive_stream: WriteStream,
            global_trajectory_stream: WriteStream, flags):
        if flags.random_seed:
            random.seed(flags.random_seed)
        # Register callback on control stream.
        control_stream.add_callback(self.on_control_msg)
        erdos.add_watermark_callback([release_sensor_stream], [],
                                     self.on_sensor_ready)
        if flags.simulator_mode == "pseudo-asynchronous":
            erdos.add_watermark_callback([pipeline_finish_notify_stream], [],
                                         self.on_pipeline_finish)
        self.pose_stream = pose_stream
        self.pose_stream_for_control = pose_stream_for_control
        self.ground_traffic_lights_stream = ground_traffic_lights_stream
        self.ground_obstacles_stream = ground_obstacles_stream
        self.ground_speed_limit_signs_stream = ground_speed_limit_signs_stream
        self.ground_stop_signs_stream = ground_stop_signs_stream
        self.vehicle_id_stream = vehicle_id_stream
        self.open_drive_stream = open_drive_stream
        self.global_trajectory_stream = global_trajectory_stream

        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)
        # Connect to simulator and retrieve the world running.
        self._client, self._world = pylot.simulation.utils.get_world(
            self._flags.simulator_host, self._flags.simulator_port,
            self._flags.simulator_timeout)
        self._simulator_version = self._client.get_client_version()

        if not self._flags.scenario_runner and \
                self._flags.control != "manual":
            # Load the appropriate town.
            self._initialize_world()

        # Save the spectator handle so that we don't have to repeteadly get the
        # handle (which is slow).
        self._spectator = self._world.get_spectator()

        if pylot.simulation.utils.check_simulator_version(
                self._simulator_version, required_minor=9, required_patch=8):
            # Any simulator version after 0.9.7.
            # Create a traffic manager to that auto pilot works.
            self._traffic_manager = self._client.get_trafficmanager(
                self._flags.carla_traffic_manager_port)
            self._traffic_manager.set_synchronous_mode(
                self._flags.simulator_mode == 'synchronous')

        if self._flags.scenario_runner:
            # Tick until 4.0 seconds time so that all synchronous scenario runs
            # start at exactly the same game time.
            pylot.simulation.utils.set_synchronous_mode(self._world, 1000)
            self._tick_simulator_until(4000)

        pylot.simulation.utils.set_simulation_mode(self._world, self._flags)

        if self._flags.scenario_runner or self._flags.control == "manual":
            # Wait until the ego vehicle is spawned by the scenario runner.
            self._logger.info("Waiting for the scenario to be ready ...")
            self._ego_vehicle = pylot.simulation.utils.wait_for_ego_vehicle(
                self._world)
            self._logger.info("Found ego vehicle")
        else:
            # Spawn ego vehicle, people and vehicles.
            (self._ego_vehicle, self._vehicle_ids,
             self._people) = pylot.simulation.utils.spawn_actors(
                 self._client, self._world, self._simulator_version,
                 self._flags.simulator_spawn_point_index,
                 self._flags.control == 'simulator_auto_pilot',
                 self._flags.simulator_num_people,
                 self._flags.simulator_num_vehicles, self._logger)

        pylot.simulation.utils.set_vehicle_physics(
            self._ego_vehicle, self._flags.simulator_vehicle_moi,
            self._flags.simulator_vehicle_mass)

        # Lock used to ensure that simulator callbacks are not executed
        # concurrently.
        self._lock = threading.Lock()

        # Dictionary that stores the processing times when sensors are ready
        # to realease data. This info is used to calculate the real processing
        # time of our pipeline without including simulator-induced sensor
        # delays.
        self._next_localization_sensor_reading = None
        self._next_control_sensor_reading = None
        self._simulator_in_sync = False
        self._tick_events = []
        self._control_msgs = {}
    def __init__(self, depth_camera_stream: ReadStream,
                 center_camera_stream: ReadStream,
                 segmented_camera_stream: ReadStream, pose_stream: ReadStream,
                 ground_obstacles_stream: ReadStream,
                 ground_speed_limit_signs_stream: ReadStream,
                 ground_stop_signs_stream: ReadStream,
                 obstacles_stream: WriteStream, flags):
        depth_camera_stream.add_callback(self.on_depth_camera_update)
        center_camera_stream.add_callback(self.on_bgr_camera_update)
        segmented_camera_stream.add_callback(self.on_segmented_frame)
        pose_stream.add_callback(self.on_pose_update)
        ground_obstacles_stream.add_callback(self.on_obstacles_update)
        ground_speed_limit_signs_stream.add_callback(
            self.on_speed_limit_signs_update)
        ground_stop_signs_stream.add_callback(self.on_stop_signs_update)
        # Register a completion watermark callback. The callback is invoked
        # after all the messages with a given timestamp have been received.
        erdos.add_watermark_callback([
            depth_camera_stream, center_camera_stream, segmented_camera_stream,
            pose_stream, ground_obstacles_stream,
            ground_speed_limit_signs_stream, ground_stop_signs_stream
        ], [obstacles_stream], self.on_watermark)

        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._flags = flags
        # Queues of incoming data.
        self._bgr_msgs = deque()
        self._pose_msgs = deque()
        self._depth_frame_msgs = deque()
        self._obstacles = deque()
        self._segmented_msgs = deque()
        self._speed_limit_signs = deque()
        self._stop_signs = deque()
        self._frame_cnt = 0
 def __init__(self, pose_stream: erdos.ReadStream,
              prediction_stream: erdos.ReadStream,
              static_obstacles_stream: erdos.ReadStream,
              lanes_stream: erdos.ReadStream,
              route_stream: erdos.ReadStream,
              open_drive_stream: erdos.ReadStream,
              time_to_decision_stream: erdos.ReadStream,
              waypoints_stream: erdos.WriteStream, flags):
     pose_stream.add_callback(self.on_pose_update)
     prediction_stream.add_callback(self.on_prediction_update)
     static_obstacles_stream.add_callback(self.on_static_obstacles_update)
     lanes_stream.add_callback(self.on_lanes_update)
     route_stream.add_callback(self.on_route)
     open_drive_stream.add_callback(self.on_opendrive_map)
     time_to_decision_stream.add_callback(self.on_time_to_decision)
     erdos.add_watermark_callback([
         pose_stream, prediction_stream, static_obstacles_stream,
         lanes_stream, time_to_decision_stream, route_stream
     ], [waypoints_stream], self.on_watermark)
     self._logger = erdos.utils.setup_logging(self.config.name,
                                              self.config.log_file_name)
     self._flags = flags
     # We do not know yet the vehicle's location.
     self._ego_transform = None
     self._map = None
     self._world = World(flags, self._logger)
     if self._flags.planning_type == 'waypoint':
         # Use the FOT planner for overtaking.
         from pylot.planning.frenet_optimal_trajectory.fot_planner \
             import FOTPlanner
         self._planner = FOTPlanner(self._world, self._flags, self._logger)
     elif self._flags.planning_type == 'frenet_optimal_trajectory':
         from pylot.planning.frenet_optimal_trajectory.fot_planner \
             import FOTPlanner
         self._planner = FOTPlanner(self._world, self._flags, self._logger)
     elif self._flags.planning_type == 'hybrid_astar':
         from pylot.planning.hybrid_astar.hybrid_astar_planner \
             import HybridAStarPlanner
         self._planner = HybridAStarPlanner(self._world, self._flags,
                                            self._logger)
     elif self._flags.planning_type == 'rrt_star':
         from pylot.planning.rrt_star.rrt_star_planner import RRTStarPlanner
         self._planner = RRTStarPlanner(self._world, self._flags,
                                        self._logger)
     else:
         raise ValueError('Unexpected planning type: {}'.format(
             self._flags.planning_type))
     self._state = BehaviorPlannerState.FOLLOW_WAYPOINTS
     self._pose_msgs = deque()
     self._prediction_msgs = deque()
     self._static_obstacles_msgs = deque()
     self._lanes_msgs = deque()
     self._ttd_msgs = deque()