Ejemplo n.º 1
0
    def on_watermark(self, timestamp, obstacles_stream):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        self._logger.debug('@{}: received watermark'.format(timestamp))
        depth_msg = self._depth_frame_msgs.popleft()
        bgr_msg = self._bgr_msgs.popleft()
        segmented_msg = self._segmented_msgs.popleft()
        pose_msg = self._pose_msgs.popleft()
        obstacles_msg = self._obstacles.popleft()
        speed_limit_signs_msg = self._speed_limit_signs.popleft()
        stop_signs_msg = self._stop_signs.popleft()
        self._frame_cnt += 1
        if (hasattr(self._flags, 'log_every_nth_message')
                and self._frame_cnt % self._flags.log_every_nth_message != 0):
            # There's no point to run the perfect detector if collecting
            # data, and only logging every nth frame.
            obstacles_stream.send(ObstaclesMessage(timestamp, []))
            return
        vehicle_transform = pose_msg.data.transform

        # The camera setup sent with the image is relative to the car, we need
        # to transform it relative to the world.
        depth_msg.frame.camera_setup.set_transform(
            vehicle_transform * depth_msg.frame.camera_setup.transform)

        det_obstacles = self.__get_obstacles(obstacles_msg.obstacles,
                                             vehicle_transform,
                                             depth_msg.frame,
                                             segmented_msg.frame)

        det_speed_limits = pylot.simulation.utils.get_detected_speed_limits(
            speed_limit_signs_msg.speed_signs, depth_msg.frame,
            segmented_msg.frame)

        det_stop_signs = pylot.simulation.utils.get_detected_traffic_stops(
            stop_signs_msg.stop_signs, depth_msg.frame)

        det_obstacles = det_obstacles + det_speed_limits + det_stop_signs

        # Send the detected obstacles.
        obstacles_stream.send(ObstaclesMessage(timestamp, det_obstacles))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            bgr_msg.frame.annotate_with_bounding_boxes(bgr_msg.timestamp,
                                                       det_obstacles,
                                                       vehicle_transform)
            if self._flags.visualize_detected_obstacles:
                bgr_msg.frame.visualize(
                    self.config.name,
                    pygame_display=pylot.utils.PYGAME_DISPLAY)
            if self._flags.log_detector_output:
                bgr_msg.frame.save(bgr_msg.timestamp.coordinates[0],
                                   self._flags.data_path, 'perfect-detector')
Ejemplo n.º 2
0
    def on_watermark(self, timestamp, obstacles_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        depth_msg = self._depth_frame_msgs.popleft()
        bgr_msg = self._bgr_msgs.popleft()
        segmented_msg = self._segmented_msgs.popleft()
        can_bus_msg = self._can_bus_msgs.popleft()
        obstacles_msg = self._obstacles.popleft()
        speed_limit_signs_msg = self._speed_limit_signs.popleft()
        stop_signs_msg = self._stop_signs.popleft()
        self._frame_cnt += 1
        if (hasattr(self._flags, 'log_every_nth_message')
                and self._frame_cnt % self._flags.log_every_nth_message != 0):
            # There's no point to run the perfect detector if collecting
            # data, and only logging every nth frame.
            obstacles_stream.send(ObstaclesMessage([], timestamp))
            return
        vehicle_transform = can_bus_msg.data.transform

        # The camera setup sent with the image is relative to the car, we need
        # to transform it relative to the world.
        depth_msg.frame.camera_setup.set_transform(
            vehicle_transform * depth_msg.frame.camera_setup.transform)

        det_obstacles = self.__get_obstacles(obstacles_msg.obstacles,
                                             vehicle_transform,
                                             depth_msg.frame,
                                             segmented_msg.frame)

        det_speed_limits = pylot.simulation.utils.get_detected_speed_limits(
            speed_limit_signs_msg.speed_signs, depth_msg.frame,
            segmented_msg.frame)

        det_stop_signs = pylot.simulation.utils.get_detected_traffic_stops(
            stop_signs_msg.stop_signs, depth_msg.frame)

        det_obstacles = det_obstacles + det_speed_limits + det_stop_signs

        # Send the detected obstacles.
        obstacles_stream.send(ObstaclesMessage(det_obstacles, timestamp))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            bgr_msg.frame.annotate_with_bounding_boxes(bgr_msg.timestamp,
                                                       det_obstacles)
            if self._flags.visualize_detected_obstacles:
                bgr_msg.frame.visualize(self._name)
            if self._flags.log_detector_output:
                bgr_msg.frame.save(bgr_msg.timestamp.coordinates[0],
                                   self._flags.data_path, 'perfect-detector')
Ejemplo n.º 3
0
    def __send_ground_actors_data(self, timestamp: Timestamp):
        # Get all the actors in the simulation.
        actor_list = self._world.get_actors()

        (vehicles, people, traffic_lights, speed_limits, traffic_stops
         ) = pylot.simulation.utils.extract_data_in_pylot_format(actor_list)

        # Send ground people and vehicles.
        self.ground_obstacles_stream.send(
            ObstaclesMessage(timestamp, vehicles + people))
        self.ground_obstacles_stream.send(erdos.WatermarkMessage(timestamp))
        # Send ground traffic lights.
        self.ground_traffic_lights_stream.send(
            TrafficLightsMessage(timestamp, traffic_lights))
        self.ground_traffic_lights_stream.send(
            erdos.WatermarkMessage(timestamp))
        # Send ground speed signs.
        self.ground_speed_limit_signs_stream.send(
            SpeedSignsMessage(timestamp, speed_limits))
        self.ground_speed_limit_signs_stream.send(
            erdos.WatermarkMessage(timestamp))
        # Send stop signs.
        self.ground_stop_signs_stream.send(
            StopSignsMessage(timestamp, traffic_stops))
        self.ground_stop_signs_stream.send(erdos.WatermarkMessage(timestamp))
Ejemplo n.º 4
0
    def on_watermark(self, timestamp, obstacle_tracking_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        frame_msg = self._frame_msgs.popleft()
        camera_frame = frame_msg.frame
        tracked_obstacles = []
        self._watermark_msg_count += 1
        if len(self._obstacles_msgs) > 0:
            obstacles_msg = self._obstacles_msgs.popleft()
            assert frame_msg.timestamp == obstacles_msg.timestamp
            detected_obstacles = []
            for obstacle in obstacles_msg.obstacles:
                if obstacle.is_vehicle() or obstacle.is_person():
                    detected_obstacles.append(obstacle)
            if (self._watermark_msg_count %
                    self._flags.track_every_nth_detection == 0):
                # Reinitialize the tracker with new detections.
                self._logger.debug(
                    'Restarting trackers at frame {}'.format(timestamp))
                self._tracker.reinitialize(camera_frame, detected_obstacles)

        self._logger.debug('Processing frame {}'.format(timestamp))
        ok, tracked_obstacles = self._tracker.track(camera_frame)
        if not ok:
            self._logger.error(
                'Tracker failed at timestamp {}'.format(timestamp))
        obstacle_tracking_stream.send(
            ObstaclesMessage(timestamp, tracked_obstacles, 0))
        obstacle_tracking_stream.send(erdos.WatermarkMessage(timestamp))
    def on_watermark(self, timestamp, obstacles_output_stream):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        self._logger.debug('@{}: received watermark'.format(timestamp))
        obstacles_msg = self._obstacles_msgs.popleft()
        depth_msg = self._depth_msgs.popleft()
        vehicle_transform = self._pose_msgs.popleft().data.transform
        frame_msg = self._frame_msgs.popleft()

        obstacles_with_location = get_obstacle_locations(
            obstacles_msg.obstacles, depth_msg, vehicle_transform,
            self._camera_setup, self._logger)

        self._logger.info('@{}: {}'.format(timestamp, obstacles_with_location))

        if self._flags.visualize_obstacles_with_distance:
            frame_msg.frame.annotate_with_bounding_boxes(
                timestamp, obstacles_with_location, vehicle_transform)
            frame_msg.frame.visualize(
                self.config.name, pygame_display=pylot.utils.PYGAME_DISPLAY)

        obstacles_output_stream.send(
            ObstaclesMessage(timestamp, obstacles_with_location))
Ejemplo n.º 6
0
def main(argv):
    (obstacles_stream, traffic_lights_stream, obstacles_tracking_stream,
     open_drive_stream, global_trajectory_stream) = create_data_flow()
    # Run the data-flow.
    erdos.run_async()

    top_timestamp = erdos.Timestamp(coordinates=[sys.maxsize])
    open_drive_stream.send(erdos.WatermarkMessage(top_timestamp))

    waypoints = [[waypoint] for waypoint in read_waypoints()]
    global_trajectory_stream.send(
        erdos.Message(erdos.Timestamp(coordinates=[0]), waypoints))
    global_trajectory_stream.send(erdos.WatermarkMessage(top_timestamp))

    time_to_sleep = 1.0 / FLAGS.sensor_frequency
    count = 0
    while True:
        timestamp = erdos.Timestamp(coordinates=[count])
        if not FLAGS.obstacle_detection:
            obstacles_stream.send(ObstaclesMessage(timestamp, []))
            obstacles_stream.send(erdos.WatermarkMessage(timestamp))
        if not FLAGS.traffic_light_detection:
            traffic_lights_stream.send(TrafficLightsMessage(timestamp, []))
            traffic_lights_stream.send(erdos.WatermarkMessage(timestamp))
        if not FLAGS.obstacle_tracking:
            obstacles_tracking_stream.send(
                ObstacleTrajectoriesMessage(timestamp, []))
            obstacles_tracking_stream.send(erdos.WatermarkMessage(timestamp))
        count += 1
        # NOTE: We should offset sleep time by the time it takes to send the
        # messages.
        time.sleep(time_to_sleep)
Ejemplo n.º 7
0
    def on_watermark(self, timestamp, obstacle_tracking_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        frame_msg = self._frame_msgs.popleft()
        camera_frame = frame_msg.frame
        tracked_obstacles = []
        if len(self._obstacles_msgs) > 0:
            obstacles_msg = self._obstacles_msgs.popleft()
            assert frame_msg.timestamp == obstacles_msg.timestamp
            self._logger.debug(
                'Restarting trackers at frame {}'.format(timestamp))
            detected_obstacles = []
            for obstacle in obstacles_msg.obstacles:
                if (obstacle.label in VEHICLE_LABELS
                        or obstacle.label == 'person'):
                    detected_obstacles.append(obstacle)
            self._tracker.reinitialize(camera_frame, detected_obstacles)

        self._logger.debug('Processing frame {}'.format(timestamp))
        ok, tracked_obstacles = self._tracker.track(camera_frame)
        if not ok:
            self._logger.error(
                'Tracker failed at timestamp {}'.format(timestamp))

        obstacle_tracking_stream.send(
            ObstaclesMessage(timestamp, tracked_obstacles, 0))

        if self._flags.visualize_tracker_output:
            # Tracked obstacles have no label, draw white bbox.
            camera_frame.annotate_with_bounding_boxes(timestamp,
                                                      tracked_obstacles)
            camera_frame.visualize(self.config.name,
                                   pygame_display=pylot.utils.PYGAME_DISPLAY)
Ejemplo n.º 8
0
    def send_perfect_detections(self, perfect_obstacles_stream,
                                perfect_traffic_lights_stream, timestamp,
                                tl_camera_location):
        """Send perfect detections for agents and traffic lights.

        This method first connects to the simulator to extract all the
        agents and traffic light in a scenario. Next, it transforms them into
        the types Pylot expects, and sends them on the streams for perfect
        detections.

        Note: This is only used when executing using a perfect perception
        component.
        """
        if not (FLAGS.simulator_obstacle_detection
                or FLAGS.simulator_traffic_light_detection
                or FLAGS.evaluate_obstacle_detection
                or FLAGS.evaluate_obstacle_tracking):
            return
        from pylot.simulation.utils import extract_data_in_pylot_format
        actor_list = self._world.get_actors()
        (vehicles, people, traffic_lights, _,
         _) = extract_data_in_pylot_format(actor_list)
        if (FLAGS.simulator_obstacle_detection
                or FLAGS.evaluate_obstacle_detection
                or FLAGS.evaluate_obstacle_tracking):
            perfect_obstacles_stream.send(
                ObstaclesMessage(timestamp, vehicles + people))
            perfect_obstacles_stream.send(erdos.WatermarkMessage(timestamp))
        if FLAGS.simulator_traffic_light_detection:
            vec_transform = pylot.utils.Transform.from_simulator_transform(
                self._ego_vehicle.get_transform())
            tl_camera_transform = pylot.utils.Transform(
                tl_camera_location, pylot.utils.Rotation())
            visible_tls = []
            if self._town_name is None:
                self._town_name = self._world.get_map().name
            for tl in traffic_lights:
                if tl.is_traffic_light_visible(
                        vec_transform * tl_camera_transform,
                        self._town_name,
                        distance_threshold=FLAGS.
                        static_obstacle_distance_threshold):
                    if self._town_name not in ['Town01', 'Town02']:
                        delta_y = -5
                        if self._town_name == 'Town04':
                            delta_y = -2
                        # Move the traffic light location to the road.
                        tl.transform = tl.transform * pylot.utils.Transform(
                            pylot.utils.Location(delta_y, 0, 5),
                            pylot.utils.Rotation())
                    visible_tls.append(tl)
            perfect_traffic_lights_stream.send(
                TrafficLightsMessage(timestamp, visible_tls))
            perfect_traffic_lights_stream.send(
                erdos.WatermarkMessage(timestamp))
    def on_watermark(self, timestamp, traffic_lights_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        traffic_light_msg = self._traffic_lights.popleft()
        bgr_msg = self._bgr_msgs.popleft()
        depth_msg = self._depth_frame_msgs.popleft()
        segmented_msg = self._segmented_msgs.popleft()
        can_bus_msg = self._can_bus_msgs.popleft()
        vehicle_transform = can_bus_msg.data.transform
        self._frame_cnt += 1
        if (hasattr(self._flags, 'log_every_nth_message')
                and self._frame_cnt % self._flags.log_every_nth_message != 0):
            # There's no point to run the perfect detector if collecting
            # data, and only logging every nth frame.
            traffic_lights_stream.send(ObstaclesMessage([], timestamp))
            return

        # The camera setup sent with the image is relative to the car, we need
        # to transform it relative to the world to detect traffic lights.
        depth_msg.frame.camera_setup.set_transform(
            vehicle_transform * depth_msg.frame.camera_setup.transform)

        det_traffic_lights = get_traffic_lights_obstacles(
            traffic_light_msg.traffic_lights, depth_msg.frame,
            segmented_msg.frame, self._town_name)

        if (self._flags.visualize_detected_traffic_lights
                or self._flags.log_detector_output):
            bgr_msg.frame.annotate_with_bounding_boxes(bgr_msg.timestamp,
                                                       det_traffic_lights)
            if self._flags.visualize_detected_traffic_lights:
                bgr_msg.frame.visualize(self._name)
            if self._flags.log_detector_output:
                bgr_msg.frame.save(bgr_msg.timestamp.coordinates[0],
                                   self._flags.data_path, 'perfect-detector')

        # Send the detected traffic lights.
        traffic_lights_stream.send(
            ObstaclesMessage(det_traffic_lights, timestamp))
Ejemplo n.º 10
0
    def on_frame(self, msg, traffic_lights_stream):
        """ Invoked when the operator receives a message on the data stream."""
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(msg.frame.as_rgb_numpy_array(),
                                           axis=0)
        (boxes, scores, classes, num) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num[0])
        labels = [self._labels[label] for label in classes[0][:num_detections]]
        boxes = boxes[0][:num_detections]
        scores = scores[0][:num_detections]

        traffic_lights = self.__convert_to_detected_tl(
            boxes, scores, labels, msg.frame.camera_setup.height,
            msg.frame.camera_setup.width)

        self._logger.debug('@{}: {} detected traffic lights {}'.format(
            msg.timestamp, self._name, traffic_lights))

        if (self._flags.visualize_detected_traffic_lights
                or self._flags.log_traffic_light_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp,
                                                   traffic_lights,
                                                   self._bbox_colors)
            if self._flags.visualize_detected_traffic_lights:
                msg.frame.visualize(self._name)
            if self._flags.log_traffic_light_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'tl-detector-{}'.format(self._name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))

        traffic_lights_stream.send(
            ObstaclesMessage(traffic_lights, msg.timestamp, runtime))
Ejemplo n.º 11
0
    def __publish_ground_actors_data(self, timestamp, watermark_msg):
        # Get all the actors in the simulation.
        actor_list = self._world.get_actors()

        (vehicles, people, traffic_lights, speed_limits,
         traffic_stops) = extract_data_in_pylot_format(actor_list)

        obstacles_msg = ObstaclesMessage(timestamp, vehicles + people)
        self._ground_obstacles_stream.send(obstacles_msg)
        self._ground_obstacles_stream.send(watermark_msg)
        traffic_lights_msg = TrafficLightsMessage(timestamp, traffic_lights)
        self._ground_traffic_lights_stream.send(traffic_lights_msg)
        self._ground_traffic_lights_stream.send(watermark_msg)
        speed_limit_signs_msg = SpeedSignsMessage(timestamp, speed_limits)
        self._ground_speed_limit_signs_stream.send(speed_limit_signs_msg)
        self._ground_speed_limit_signs_stream.send(watermark_msg)
        stop_signs_msg = StopSignsMessage(timestamp, traffic_stops)
        self._ground_stop_signs_stream.send(stop_signs_msg)
        self._ground_stop_signs_stream.send(watermark_msg)
Ejemplo n.º 12
0
    def on_watermark(self, timestamp, obstacles_output_stream):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        self._logger.debug('@{}: received watermark'.format(timestamp))
        if timestamp.is_top:
            return
        obstacles_msg = self._obstacles_msgs.popleft()
        depth_msg = self._depth_msgs.popleft()
        vehicle_transform = self._pose_msgs.popleft().data.transform
        obstacles_with_location = get_obstacle_locations(
            obstacles_msg.obstacles, depth_msg, vehicle_transform,
            self._camera_setup, self._logger)
        self._logger.debug('@{}: {}'.format(timestamp,
                                            obstacles_with_location))
        obstacles_output_stream.send(
            ObstaclesMessage(timestamp, obstacles_with_location))
Ejemplo n.º 13
0
 def send_ground_data(self, timestamp):
     if not (FLAGS.carla_obstacle_detection
             or FLAGS.carla_traffic_light_detection):
         return
     actor_list = self._world.get_actors()
     (vehicles, people, traffic_lights, _,
      _) = pylot.simulation.utils.extract_data_in_pylot_format(actor_list)
     if FLAGS.carla_obstacle_detection:
         self._ground_obstacles_stream.send(
             ObstaclesMessage(timestamp, vehicles + people))
         self._ground_obstacles_stream.send(
             erdos.WatermarkMessage(timestamp))
     if FLAGS.carla_traffic_light_detection:
         vec_transform = pylot.utils.Transform.from_carla_transform(
             self._ego_vehicle.get_transform())
         tl_camera_transform = pylot.utils.Transform(
             CENTER_CAMERA_LOCATION, pylot.utils.Rotation())
         visible_tls = []
         if self._town_name is None:
             self._town_name = self._world.get_map().name
         for tl in traffic_lights:
             if tl.is_traffic_light_visible(
                     vec_transform * tl_camera_transform,
                     self._town_name,
                     distance_threshold=FLAGS.
                     static_obstacle_distance_threshold):
                 if self._town_name not in ['Town01', 'Town02']:
                     delta_y = -5
                     if self._town_name == 'Town04':
                         delta_y = -2
                     # Move the traffic light location to the road.
                     tl.transform = tl.transform * pylot.utils.Transform(
                         pylot.utils.Location(delta_y, 0, 5),
                         pylot.utils.Rotation())
                 visible_tls.append(tl)
         self._ground_traffic_lights_stream.send(
             TrafficLightsMessage(timestamp, visible_tls))
         self._ground_traffic_lights_stream.send(
             erdos.WatermarkMessage(timestamp))
Ejemplo n.º 14
0
 def on_watermark(self, timestamp, obstacle_tracking_stream):
     self._logger.debug('@{}: received watermark'.format(timestamp))
     if timestamp.is_top:
         return
     frame_msg = self._frame_msgs.popleft()
     camera_frame = frame_msg.frame
     tracked_obstacles = []
     detector_runtime = 0
     reinit_runtime = 0
     # Check if the most recent obstacle message has this timestamp.
     # If it doesn't, then the detector might have skipped sending
     # an obstacle message.
     if (len(self._obstacles_msgs) > 0
             and self._obstacles_msgs[0].timestamp == timestamp):
         obstacles_msg = self._obstacles_msgs.popleft()
         self._detection_update_count += 1
         if (self._detection_update_count %
                 self._flags.track_every_nth_detection == 0):
             # Reinitialize the tracker with new detections.
             self._logger.debug(
                 'Restarting trackers at frame {}'.format(timestamp))
             detected_obstacles = []
             for obstacle in obstacles_msg.obstacles:
                 if obstacle.is_vehicle() or obstacle.is_person():
                     detected_obstacles.append(obstacle)
             reinit_runtime, _ = self._reinit_tracker(
                 camera_frame, detected_obstacles)
             detector_runtime = obstacles_msg.runtime
     tracker_runtime, (ok, tracked_obstacles) = \
         self._run_tracker(camera_frame)
     assert ok, 'Tracker failed at timestamp {}'.format(timestamp)
     tracker_runtime = tracker_runtime + reinit_runtime
     tracker_delay = self.__compute_tracker_delay(timestamp.coordinates[0],
                                                  detector_runtime,
                                                  tracker_runtime)
     obstacle_tracking_stream.send(
         ObstaclesMessage(timestamp, tracked_obstacles, tracker_delay))
Ejemplo n.º 15
0
 def on_frame_msg(self, msg, obstacle_tracking_stream):
     """Invoked when a FrameMessage is received on the camera stream."""
     self._logger.debug('@{}: {} received frame'.format(
         msg.timestamp, self.config.name))
     assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
     image_np = msg.frame.as_bgr_numpy_array()
     results = self.run_model(image_np)
     obstacles = []
     for res in results:
         track_id = res['tracking_id']
         bbox = res['bbox']
         score = res['score']
         (label_id, ) = res['class'] - 1,
         if label_id > 80:
             continue
         label = self.trained_dataset.class_name[label_id]
         if label in ['Pedestrian', 'pedestrian']:
             label = 'person'
         elif label == 'Car':
             label = 'car'
         elif label == 'Cyclist':
             label == 'bicycle'
         if label in OBSTACLE_LABELS:
             bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1],
                                             bbox[3])
             bounding_box_3D = None
             if 'dim' in res and 'loc' in res and 'rot_y' in res:
                 bounding_box_3D = BoundingBox3D.from_dimensions(
                     res['dim'], res['loc'], res['rot_y'])
             obstacles.append(
                 Obstacle(bounding_box_3D,
                          score,
                          label,
                          track_id,
                          bounding_box_2D=bounding_box_2D))
     obstacle_tracking_stream.send(
         ObstaclesMessage(msg.timestamp, obstacles, 0))
Ejemplo n.º 16
0
    def on_msg_camera_stream(self, msg, obstacles_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message
                received.
            obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.ObstaclesMessage`
                messages.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self.config.name))
        start_time = time.time()
        # The models expect BGR images.
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        num_detections, res_boxes, res_scores, res_classes = self.__run_model(
            msg.frame.frame)
        obstacles = []
        for i in range(0, num_detections):
            if res_classes[i] in self._coco_labels:
                if (res_scores[i] >=
                        self._flags.obstacle_detection_min_score_threshold):
                    if (self._coco_labels[res_classes[i]] in OBSTACLE_LABELS):
                        obstacles.append(
                            Obstacle(BoundingBox2D(
                                int(res_boxes[i][1] *
                                    msg.frame.camera_setup.width),
                                int(res_boxes[i][3] *
                                    msg.frame.camera_setup.width),
                                int(res_boxes[i][0] *
                                    msg.frame.camera_setup.height),
                                int(res_boxes[i][2] *
                                    msg.frame.camera_setup.height)),
                                     res_scores[i],
                                     self._coco_labels[res_classes[i]],
                                     id=self._unique_id))
                        self._unique_id += 1
                    else:
                        self._logger.warning(
                            'Ignoring non essential detection {}'.format(
                                self._coco_labels[res_classes[i]]))
            else:
                self._logger.warning('Filtering unknown class: {}'.format(
                    res_classes[i]))

        self._logger.debug('@{}: {} obstacles: {}'.format(
            msg.timestamp, self.config.name, obstacles))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        # Send out obstacles.
        obstacles_stream.send(
            ObstaclesMessage(msg.timestamp, obstacles, runtime))
        obstacles_stream.send(erdos.WatermarkMessage(msg.timestamp))

        if self._flags.log_detector_output:
            msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,
                                                   None, self._bbox_colors)
            msg.frame.save(msg.timestamp.coordinates[0], self._flags.data_path,
                           'detector-{}'.format(self.config.name))
Ejemplo n.º 17
0
    def on_msg_camera_stream(self, msg, obstacles_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message
                received.
            obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.ObstaclesMessage`
                messages.
        """
        operator_time_total_start = time.time()
        start_time = time.time()
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self.config.name))
        inputs = msg.frame.as_rgb_numpy_array()

        results = []
        start_time = time.time()
        if MODIFIED_AUTOML:
            (boxes_np, scores_np, classes_np,
             num_detections_np) = self._tf_session.run(
                 self._detections_batch,
                 feed_dict={self._image_placeholder: inputs})
            num_detections = num_detections_np[0]
            boxes = boxes_np[0][:num_detections]
            scores = scores_np[0][:num_detections]
            classes = classes_np[0][:num_detections]
            results = zip(boxes, scores, classes)
        else:
            outputs_np = self._tf_session.run(
                self._detections_batch,
                feed_dict={self._image_placeholder: inputs})[0]
            for _, x, y, width, height, score, _class in outputs_np:
                results.append(((y, x, y + height, x + width), score, _class))
        obstacles = []
        for (ymin, xmin, ymax, xmax), score, _class in results:
            if np.isclose(ymin, ymax) or np.isclose(xmin, xmax):
                continue
            if MODIFIED_AUTOML:
                # The alternate NMS implementation screws up the class labels.
                _class = int(_class) + 1
            if _class in self._coco_labels:
                if (score >= self._flags.obstacle_detection_min_score_threshold
                        and self._coco_labels[_class]
                        in self._important_labels):
                    camera_setup = msg.frame.camera_setup
                    width, height = camera_setup.width, camera_setup.height
                    xmin, xmax = max(0, int(xmin)), min(int(xmax), width)
                    ymin, ymax = max(0, int(ymin)), min(int(ymax), height)
                    if xmin < xmax and ymin < ymax:
                        obstacles.append(
                            DetectedObstacle(BoundingBox2D(
                                xmin, xmax, ymin, ymax),
                                             score,
                                             self._coco_labels[_class],
                                             id=self._unique_id))
                        self._unique_id += 1
                        self._csv_logger.info(
                            "{},{},detection,{},{:4f}".format(
                                pylot.utils.time_epoch_ms(), msg.timestamp,
                                self._coco_labels[_class], score))
            else:
                self._logger.debug(
                    'Filtering unknown class: {}'.format(_class))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,
                                                   None, self._bbox_colors)
            if self._flags.visualize_detected_obstacles:
                msg.frame.visualize(self.config.name,
                                    pygame_display=pylot.utils.PYGAME_DISPLAY)
            if self._flags.log_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'detector-{}'.format(self.config.name))
        end_time = time.time()
        obstacles_stream.send(ObstaclesMessage(msg.timestamp, obstacles, 0))
        obstacles_stream.send(erdos.WatermarkMessage(msg.timestamp))
        operator_time_total_end = time.time()
        self._logger.debug("@{}: runtime of the detector: {}".format(
            msg.timestamp, (end_time - start_time) * 1000))
        self._logger.debug("@{}: total time spent: {}".format(
            msg.timestamp,
            (operator_time_total_end - operator_time_total_start) * 1000))
Ejemplo n.º 18
0
    def on_watermark(self, timestamp, obstacles_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message
                received.
            obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.ObstaclesMessage`
                messages.
        """
        if timestamp.is_top:
            return
        start_time = time.time()
        ttd_msg = self._ttd_msgs.popleft()
        frame_msg = self._frame_msgs.popleft()
        ttd = ttd_msg.data
        self.update_model_choice(ttd)
        frame = frame_msg.frame
        inputs = frame.as_rgb_numpy_array()
        detector_start_time = time.time()
        outputs_np = self._tf_session.run(
            self._signitures['prediction'],
            feed_dict={self._signitures['image_arrays']: [inputs]})[0]
        detector_end_time = time.time()
        self._logger.debug("@{}: detector runtime {}".format(
            timestamp, (detector_end_time - detector_start_time) * 1000))
        obstacles = []
        camera_setup = frame.camera_setup
        for _, ymin, xmin, ymax, xmax, score, _class in outputs_np:
            xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)
            if _class in self._coco_labels:
                if (score >= self._flags.obstacle_detection_min_score_threshold
                        and self._coco_labels[_class] in OBSTACLE_LABELS):
                    xmin, xmax = max(0, xmin), min(xmax, camera_setup.width)
                    ymin, ymax = max(0, ymin), min(ymax, camera_setup.height)
                    if xmin < xmax and ymin < ymax:
                        obstacles.append(
                            Obstacle(BoundingBox2D(xmin, xmax, ymin, ymax),
                                     score,
                                     self._coco_labels[_class],
                                     id=self._unique_id))
                        self._unique_id += 1
                        self._csv_logger.info(
                            "{},{},detection,{},{:4f}".format(
                                pylot.utils.time_epoch_ms(),
                                timestamp.coordinates[0],
                                self._coco_labels[_class], score))
            else:
                self._logger.debug(
                    'Filtering unknown class: {}'.format(_class))

        if self._flags.log_detector_output:
            frame.annotate_with_bounding_boxes(timestamp, obstacles, None,
                                               self._bbox_colors)
            frame.save(timestamp.coordinates[0], self._flags.data_path,
                       'detector-{}'.format(self.config.name))
        # end_time = time.time()
        obstacles_stream.send(ObstaclesMessage(timestamp, obstacles, 0))

        operator_time_total_end = time.time()
        self._logger.debug("@{}: total time spent: {}".format(
            timestamp, (operator_time_total_end - start_time) * 1000))
Ejemplo n.º 19
0
    def on_msg_camera_stream(self, msg, obstacles_stream):
        """ Invoked when the operator receives a message on the data stream."""
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        # The models expect BGR images.
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(msg.frame.frame, axis=0)
        (boxes, scores, classes, num_detections) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num_detections[0])
        res_classes = classes[0][:num_detections]
        res_boxes = boxes[0][:num_detections]
        res_scores = scores[0][:num_detections]

        obstacles = []
        for i in range(0, num_detections):
            if (res_classes[i] in self._coco_labels and res_scores[i] >=
                    self._flags.obstacle_detection_min_score_threshold):
                obstacles.append(
                    DetectedObstacle(
                        BoundingBox2D(
                            int(res_boxes[i][1] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][3] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][0] *
                                msg.frame.camera_setup.height),
                            int(res_boxes[i][2] *
                                msg.frame.camera_setup.height)), res_scores[i],
                        self._coco_labels[res_classes[i]]))
            else:
                self._logger.warning('Filtering unknown class: {}'.format(
                    res_classes[i]))

        self._logger.debug('@{}: {} obstacles: {}'.format(
            msg.timestamp, self._name, obstacles))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,
                                                   self._bbox_colors)
            if self._flags.visualize_detected_obstacles:
                msg.frame.visualize(self._name)
            if self._flags.log_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'detector-{}'.format(self._name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))
        # Send out obstacles.
        obstacles_stream.send(
            ObstaclesMessage(obstacles, msg.timestamp, runtime))
Ejemplo n.º 20
0
    def on_watermark(self, timestamp, obstacles_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message
                received.
            obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.ObstaclesMessage`
                messages.
        """
        start_time = time.time()
        #ttd_msg = self._ttd_msgs.popleft()
        frame_msg = self._frame_msgs.popleft()
        #ttd, detection_deadline = ttd_msg.data
        #self.update_model_choice(detection_deadline)
        frame = frame_msg.frame
        inputs = frame.as_rgb_numpy_array()
        detector_start_time = time.time()
        outputs_np = self._driver.serve_images([inputs])[0]
        detector_end_time = time.time()
        self._logger.debug("@{}: detector runtime {}".format(
            timestamp, (detector_end_time - detector_start_time) * 1000))
        obstacles = []
        camera_setup = frame.camera_setup
        for _, y, x, height, width, score, _class in outputs_np:
            xmin = int(x)
            ymin = int(y)
            xmax = int(x + width)
            ymax = int(y + height)
            if _class in self._coco_labels:
                if (score >= self._flags.obstacle_detection_min_score_threshold
                        and self._coco_labels[_class]
                        in self._important_labels):
                    xmin, xmax = max(0, xmin), min(xmax, camera_setup.width)
                    ymin, ymax = max(0, ymin), min(ymax, camera_setup.height)
                    if xmin < xmax and ymin < ymax:
                        obstacles.append(
                            DetectedObstacle(BoundingBox2D(
                                xmin, xmax, ymin, ymax),
                                             score,
                                             self._coco_labels[_class],
                                             id=self._unique_id))
                        self._unique_id += 1
                        self._csv_logger.info(
                            "{},{},detection,{},{:4f}".format(
                                pylot.utils.time_epoch_ms(),
                                timestamp.coordinates[0],
                                self._coco_labels[_class], score))
            else:
                self._logger.debug(
                    'Filtering unknown class: {}'.format(_class))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            frame.annotate_with_bounding_boxes(timestamp, obstacles, None,
                                               self._bbox_colors)
            if self._flags.visualize_detected_obstacles:
                frame.visualize(self.config.name,
                                pygame_display=pylot.utils.PYGAME_DISPLAY)
            if self._flags.log_detector_output:
                frame.save(timestamp.coordinates[0], self._flags.data_path,
                           'detector-{}'.format(self.config.name))
        end_time = time.time()
        obstacles_stream.send(ObstaclesMessage(timestamp, obstacles, 0))
        obstacles_stream.send(erdos.WatermarkMessage(timestamp))
        operator_time_total_end = time.time()
        self._logger.debug("@{}: total time spent: {}".format(
            timestamp, (operator_time_total_end - start_time) * 1000))
Ejemplo n.º 21
0
    def on_msg_camera_stream(self, msg, obstacles_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message
                received.
            obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.ObstaclesMessage`
                messages.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self.config.name))
        start_time = time.time()
        # The models expect BGR images.
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(msg.frame.frame, axis=0)
        (boxes, scores, classes, num_detections) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num_detections[0])
        res_classes = [int(cls) for cls in classes[0][:num_detections]]
        res_boxes = boxes[0][:num_detections]
        res_scores = scores[0][:num_detections]

        obstacles = []
        for i in range(0, num_detections):
            if res_classes[i] in self._coco_labels:
                if (res_scores[i] >=
                        self._flags.obstacle_detection_min_score_threshold
                        and self._coco_labels[
                            res_classes[i]] in self._important_labels):
                    obstacles.append(
                        DetectedObstacle(BoundingBox2D(
                            int(res_boxes[i][1] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][3] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][0] *
                                msg.frame.camera_setup.height),
                            int(res_boxes[i][2] *
                                msg.frame.camera_setup.height)),
                                         res_scores[i],
                                         self._coco_labels[res_classes[i]],
                                         id=self._unique_id))
                    self._unique_id += 1
            else:
                self._logger.warning('Filtering unknown class: {}'.format(
                    res_classes[i]))

        self._logger.debug('@{}: {} obstacles: {}'.format(
            msg.timestamp, self.config.name, obstacles))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,
                                                   None, self._bbox_colors)
            if self._flags.visualize_detected_obstacles:
                msg.frame.visualize(self.config.name)
            if self._flags.log_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'detector-{}'.format(self.config.name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        # Send out obstacles.
        obstacles_stream.send(
            ObstaclesMessage(msg.timestamp, obstacles, runtime))
Ejemplo n.º 22
0
    def on_watermark(self, timestamp, obstacles_error_stream):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        assert len(timestamp.coordinates) == 1
        op_start_time = time.time()
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        sim_time = timestamp.coordinates[0]
        while len(self._detector_start_end_times) > 0:
            print("INSIDE EVAL WATERMARK", timestamp)
            (end_time, start_time) = self._detector_start_end_times[0]

            if end_time <= game_time:
                heapq.heappop(self._detector_start_end_times)

                ego_transform, ground_obstacles = self.__get_ground_obstacles_at(end_time)

                #go_labels = []
                #for go in ground_obstacles:
                #    go_labels.append(go.label)

                obstacles = self.__get_obstacles_at(start_time)

                #do_labels = []
                #for do in obstacles:
                #    do_labels.append(do.label)
                #print(timestamp, go_labels, do_labels)

                if (len(obstacles) > 0 or len(ground_obstacles) > 0):
                    errs = pylot.perception.detection.utils.get_errors(
                        ground_obstacles, obstacles, ego_transform)

                    # Get runtime in ms
                    runtime = (time.time() - op_start_time) * 1000
                    # self._csv_logger.info('{},{},{},{},{:.4f}'.format(
                    #    time_epoch_ms(), sim_time, self.config.name, 'runtime',
                    #    runtime))

                    self._logger.info('errors calculated')

                    matchobs = []
                    for i in range(len(errs)):
                        ground_ob = errs[i][0]
                        det_ob = errs[i][1]
                        err_val = errs[i][2]

                        det_id = "NONE"
                        if (det_ob is not None):
                            det_ob.vis_error = err_val
                            det_id = det_ob.id

                        ego_point = ego_transform.location.as_numpy_array().reshape(1, 3)
                        ego_loc = ego_transform.inverse_transform_points(ego_point).reshape(3,)

                        ob_actual_point = ground_ob.transform.location.as_numpy_array().reshape(1, 3)
                        ob_actual_loc = ego_transform.inverse_transform_points(ob_actual_point).reshape(3,)

                        relative_dist = ob_actual_loc - ego_loc

                        #print([ground_ob.id, ego_loc, "===", ob_actual_loc, "====", relative_dist])
                        print([ground_ob.id, det_id, err_val])
                        
                        self._csv_logger.info('{},{},{},{},{},{:.4f},{:.4f},{:.4f},{:.4f}'.format(
                            time_epoch_ms(), sim_time, self.config.name, ground_ob.id, ground_ob.label,
                            relative_dist[0], relative_dist[1], relative_dist[2],
                            err_val))

                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))

                obstacles_error_stream.send(ObstaclesMessage(timestamp, obstacles))
                obstacles_error_stream.send(erdos.WatermarkMessage(timestamp))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
Ejemplo n.º 23
0
def main(argv):
    (obstacles_stream, traffic_lights_stream, obstacles_tracking_stream,
     open_drive_stream, global_trajectory_stream, control_display_stream,
     streams_to_send_top_on) = create_data_flow()
    # Run the data-flow.
    node_handle = erdos.run_async()
    signal.signal(signal.SIGINT, shutdown)

    # Send waypoints.
    waypoints = Waypoints.read_from_csv_file(FLAGS.waypoints_csv_file,
                                             FLAGS.target_speed)
    global_trajectory_stream.send(
        WaypointsMessage(
            erdos.Timestamp(coordinates=[0]), waypoints,
            pylot.planning.utils.BehaviorPlannerState.FOLLOW_WAYPOINTS))

    # Send top watermark on all streams that require it.
    top_msg = erdos.WatermarkMessage(erdos.Timestamp(is_top=True))
    open_drive_stream.send(top_msg)
    global_trajectory_stream.send(top_msg)
    for stream in streams_to_send_top_on:
        stream.send(top_msg)

    time_to_sleep = 1.0 / FLAGS.sensor_frequency
    count = 0
    try:
        while True:
            timestamp = erdos.Timestamp(coordinates=[count])
            if not FLAGS.obstacle_detection:
                obstacles_stream.send(ObstaclesMessage(timestamp, []))
                obstacles_stream.send(erdos.WatermarkMessage(timestamp))
            if not FLAGS.traffic_light_detection:
                traffic_lights_stream.send(TrafficLightsMessage(timestamp, []))
                traffic_lights_stream.send(erdos.WatermarkMessage(timestamp))
            if not FLAGS.obstacle_tracking:
                obstacles_tracking_stream.send(
                    ObstacleTrajectoriesMessage(timestamp, []))
                obstacles_tracking_stream.send(
                    erdos.WatermarkMessage(timestamp))
            count += 1
            if pylot.flags.must_visualize():
                import pygame
                from pygame.locals import K_n
                events = pygame.event.get()
                for event in events:
                    if event.type == pygame.KEYUP:
                        if event.key == K_n:
                            control_display_stream.send(
                                erdos.Message(erdos.Timestamp(coordinates=[0]),
                                              event.key))
                    elif event.type == pygame.QUIT:
                        raise KeyboardInterrupt
                    elif event.type == pygame.KEYDOWN:
                        if (event.key == pygame.K_c
                                and pygame.key.get_mods() & pygame.KMOD_CTRL):
                            raise KeyboardInterrupt

            # NOTE: We should offset sleep time by the time it takes to send
            # the messages.
            time.sleep(time_to_sleep)
    except KeyboardInterrupt:
        node_handle.shutdown()
        if pylot.flags.must_visualize():
            import pygame
            pygame.quit()