def on_watermark(self, timestamp, ground_tracking_stream): self._logger.debug('@{}: received watermark'.format(timestamp)) obstacles_msg = self._obstacles_raw_msgs.popleft() can_bus_msg = self._can_bus_msgs.popleft() # Use the most recent can_bus message to convert the past frames # of vehicles and people to our current perspective. can_bus_transform = can_bus_msg.data.transform obstacle_trajectories = [] # Only consider obstacles which still exist at the most recent # timestamp. for obstacle in obstacles_msg.obstacles: self._obstacles[obstacle.id].append(obstacle) cur_obstacle_trajectory = [] # Iterate through past frames of this obstacle. for past_obstacle_loc in self._obstacles[obstacle.id]: # Get the location of the center of the obstacle's bounding # box, in relation to the CanBus measurement. v_transform = past_obstacle_loc.transform * \ past_obstacle_loc.bounding_box.transform new_location = can_bus_transform.inverse_transform_points( [v_transform.location])[0] cur_obstacle_trajectory.append( pylot.utils.Transform(location=new_location, rotation=pylot.utils.Rotation())) obstacle_trajectories.append( ObstacleTrajectory(obstacle.label, obstacle.id, cur_obstacle_trajectory)) output_msg = ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories) ground_tracking_stream.send(output_msg)
def get_predictions(self, prediction_msg, ego_transform): """Extracts obstacle predictions out of the message. This method is useful to build obstacle predictions when the operator directly receives detections instead of predictions. The method assumes that the obstacles are static. """ predictions = [] if isinstance(prediction_msg, ObstaclesMessage): # Transform the obstacle into a prediction. self._logger.debug( 'Planner received obstacles instead of predictions.') predictions = [] for obstacle in prediction_msg.obstacles: obstacle_trajectory = ObstacleTrajectory(obstacle, []) prediction = ObstaclePrediction( obstacle_trajectory, obstacle.transform, 1.0, [ego_transform.inverse_transform() * obstacle.transform]) predictions.append(prediction) elif isinstance(prediction_msg, PredictionMessage): predictions = prediction_msg.predictions else: raise ValueError('Unexpected obstacles msg type {}'.format( type(prediction_msg))) return predictions
def on_watermark(self, timestamp, tracked_obstacles_stream): """Invoked when all input streams have received a watermark. Args: timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of the watermark. """ self._logger.debug('@{}: received watermark'.format(timestamp)) if timestamp.is_top: tracked_obstacles_stream.send(erdos.WatermarkMessage(timestamp)) return obstacles_msg = self._obstacles_msgs.popleft() depth_msg = self._depth_msgs.popleft() vehicle_transform = self._pose_msgs.popleft().data.transform obstacles_with_location = get_obstacle_locations( obstacles_msg.obstacles, depth_msg, vehicle_transform, self._camera_setup, self._logger) ids_cur_timestamp = [] obstacle_trajectories = [] for obstacle in obstacles_with_location: # Ignore obstacles that are far away. if (vehicle_transform.location.distance( obstacle.transform.location) > self._flags.dynamic_obstacle_distance_threshold): continue ids_cur_timestamp.append(obstacle.id) self._obstacle_history[obstacle.id].append(obstacle) # Transform obstacle location from global world coordinates to # ego-centric coordinates. cur_obstacle_trajectory = [] for obstacle in self._obstacle_history[obstacle.id]: new_location = \ vehicle_transform.inverse_transform_locations( [obstacle.transform.location])[0] cur_obstacle_trajectory.append( pylot.utils.Transform(new_location, pylot.utils.Rotation())) # The trajectory is relative to the current location. obstacle_trajectories.append( ObstacleTrajectory(obstacle, cur_obstacle_trajectory)) tracked_obstacles_stream.send( ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories)) tracked_obstacles_stream.send(erdos.WatermarkMessage(timestamp)) self._log_obstacles(timestamp, obstacles_with_location) self._timestamp_history.append(timestamp) self._timestamp_to_id[timestamp] = ids_cur_timestamp if len(self._timestamp_history) >= self._flags.tracking_num_steps: gc_timestamp = self._timestamp_history.popleft() for obstacle_id in self._timestamp_to_id[gc_timestamp]: self._obstacle_history[obstacle_id].popleft() if len(self._obstacle_history[obstacle_id]) == 0: del self._obstacle_history[obstacle_id] del self._timestamp_to_id[gc_timestamp]
def on_watermark(self, timestamp, tracked_obstacles_stream): """Invoked when all input streams have received a watermark. Args: timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of the watermark. """ self._logger.debug('@{}: received watermark'.format(timestamp)) obstacles_msg = self._obstacles_msgs.popleft() depth_msg = self._depth_msgs.popleft() vehicle_transform = self._pose_msgs.popleft().data.transform obstacles_with_location = get_obstacle_locations( obstacles_msg.obstacles, depth_msg, vehicle_transform, self._camera_setup, self._logger) ids_cur_timestamp = [] obstacle_trajectories = [] for obstacle in obstacles_with_location: ids_cur_timestamp.append(obstacle.id) self._obstacle_history[obstacle.id].append(obstacle) # Transform obstacle location from global world coordinates to # ego-centric coordinates. cur_obstacle_trajectory = [] for obstacle in self._obstacle_history[obstacle.id]: new_location = \ vehicle_transform.inverse_transform_locations( [obstacle.transform.location])[0] cur_obstacle_trajectory.append( pylot.utils.Transform(new_location, pylot.utils.Rotation())) obstacle_trajectories.append( ObstacleTrajectory(obstacle.label, obstacle.id, obstacle.bounding_box, cur_obstacle_trajectory)) tracked_obstacles_stream.send( ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories)) for obstacle in obstacles_with_location: obstacle_location = obstacle.transform.location x = obstacle_location.x y = obstacle_location.y z = obstacle_location.z self._csv_logger.debug('{},{},obstacle,{},{}'.format( pylot.utils.time_epoch_ms(), timestamp.coordinates[0], "[{} {}]".format(obstacle.id, obstacle.label), "[{:.4f} {:.4f} {:.4f}]".format(x, y, z))) self._timestamp_history.append(timestamp) self._timestamp_to_id[timestamp] = ids_cur_timestamp if len(self._timestamp_history) >= self._flags.tracking_num_steps: gc_timestamp = self._timestamp_history.popleft() for obstacle_id in self._timestamp_to_id[gc_timestamp]: self._obstacle_history[obstacle_id].popleft() if len(self._obstacle_history[obstacle_id]) == 0: del self._obstacle_history[obstacle_id] del self._timestamp_to_id[gc_timestamp]
def on_watermark(self, timestamp): """Invoked when all input streams have received a watermark. Args: timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of the watermark. """ tracking_msg = self._tracking_msgs.popleft() prediction_msg = self._prediction_msgs.popleft() vehicle_transform = self._pose_msgs.popleft().data.transform # TODO: The evaluator assumes that the obstacle tracker assigns the # same ids to the obstacles as they have in the CARLA simulation. # Start calculating metrics when we've taken sufficiently many steps. if len(self._predictions) == self._flags.prediction_num_future_steps: # Convert the tracking message to a dictionary with trajectories # in world coordinates, for speedup when calculating metrics. ground_trajectories_dict = {} for obstacle in tracking_msg.obstacle_trajectories: cur_trajectory = [] for past_transform in obstacle.trajectory: world_coord = vehicle_transform * past_transform cur_trajectory.append(world_coord) ground_trajectories_dict[obstacle.id] = \ ObstacleTrajectory(obstacle.label, obstacle.id, obstacle.bounding_box, cur_trajectory) # Evaluate the prediction corresponding to the current set of # ground truth past trajectories. self._calculate_metrics(timestamp, ground_trajectories_dict, self._predictions[0].predictions) # Convert the prediction to world coordinates and append it to the # queue. obstacle_predictions_list = [] for obstacle in prediction_msg.predictions: cur_trajectory = [] for past_transform in obstacle.trajectory: world_coord = vehicle_transform * past_transform cur_trajectory.append(world_coord) # Get the current transform of the obstacle, which is the last # trajectory value. cur_transform = obstacle.trajectory[-1] obstacle_predictions_list.append( ObstaclePrediction( obstacle.label, obstacle.id, cur_transform, obstacle.bounding_box, 1.0, # probability cur_trajectory)) self._predictions.append( PredictionMessage(timestamp, obstacle_predictions_list))
def on_watermark(self, timestamp, tracked_obstacles_stream): """Invoked when all input streams have received a watermark. Args: timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of the watermark. """ self._logger.debug('@{}: received watermark'.format(timestamp)) obstacles_msg = self._obstacles_msgs.popleft() depth_msg = self._depth_msgs.popleft() vehicle_transform = self._pose_msgs.popleft().data.transform frame_msg = self._frame_msgs.popleft() obstacles_with_location = get_obstacle_locations( obstacles_msg.obstacles, depth_msg, vehicle_transform, self._camera_setup, self._logger) ids_cur_timestamp = [] obstacle_trajectories = [] for obstacle in obstacles_with_location: ids_cur_timestamp.append(obstacle.id) self._obstacle_history[obstacle.id].append(obstacle) # Transform obstacle location from global world coordinates to # ego-centric coordinates. cur_obstacle_trajectory = [] for obstacle in self._obstacle_history[obstacle.id]: new_location = \ vehicle_transform.inverse_transform_locations( [obstacle.transform.location])[0] cur_obstacle_trajectory.append( pylot.utils.Transform(new_location, pylot.utils.Rotation())) obstacle_trajectories.append( ObstacleTrajectory(obstacle.label, obstacle.id, obstacle.bounding_box, cur_obstacle_trajectory)) tracked_obstacles_stream.send( ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories)) self._timestamp_history.append(timestamp) self._timestamp_to_id[timestamp] = ids_cur_timestamp if len(self._timestamp_history) >= self._flags.tracking_num_steps: gc_timestamp = self._timestamp_history.popleft() for obstacle_id in self._timestamp_to_id[gc_timestamp]: self._obstacle_history[obstacle_id].popleft() if len(self._obstacle_history[obstacle_id]) == 0: del self._obstacle_history[obstacle_id] del self._timestamp_to_id[gc_timestamp] if self._flags.visualize_obstacles_with_distance: frame_msg.frame.annotate_with_bounding_boxes( timestamp, obstacles_with_location, vehicle_transform) frame_msg.frame.visualize( self.config.name, pygame_display=pylot.utils.PYGAME_DISPLAY)
def on_watermark(self, timestamp, tracked_obstacles_stream): """Invoked when all input streams have received a watermark. Args: timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of the watermark. """ self._logger.debug('@{}: received watermark'.format(timestamp)) obstacles_msg = self._obstacles_msgs.popleft() depth_msg = self._depth_msgs.popleft() vehicle_transform = self._can_bus_msgs.popleft().data.transform frame_msg = self._frame_msgs.popleft() obstacles_with_location = get_obstacle_locations( obstacles_msg.obstacles, depth_msg, vehicle_transform, self._camera_setup, self._logger) ids_cur_timestamp = [] obstacle_trajectories = [] for obstacle in obstacles_with_location: ids_cur_timestamp.append(obstacle.id) self._obstacle_history[obstacle.id].append(obstacle) cur_obstacle_trajectory = [ obstacle.transform for obstacle in self._obstacle_history[obstacle.id] ] obstacle_trajectories.append( ObstacleTrajectory(obstacle.label, obstacle.id, obstacle.bounding_box, cur_obstacle_trajectory)) tracked_obstacles_stream.send( ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories)) self._timestamp_history.append(timestamp) self._timestamp_to_id[timestamp] = ids_cur_timestamp if len(self._timestamp_history) >= self._flags.tracking_num_steps: gc_timestamp = self._timestamp_history.popleft() for obstacle_id in self._timestamp_to_id[gc_timestamp]: self._obstacle_history[obstacle_id].popleft() if len(self._obstacle_history[obstacle_id]) == 0: del self._obstacle_history[obstacle_id] del self._timestamp_to_id[gc_timestamp] if self._flags.visualize_obstacles_with_distance: frame_msg.frame.annotate_with_bounding_boxes( timestamp, obstacles_with_location, vehicle_transform) frame_msg.frame.visualize(self.config.name)
def get_predictions(self, prediction_msg, ego_transform): predictions = [] if isinstance(prediction_msg, ObstaclesMessage): # Transform the obstacle into a prediction. predictions = [] for obstacle in prediction_msg.obstacles: obstacle_trajectory = ObstacleTrajectory(obstacle, []) prediction = ObstaclePrediction( obstacle_trajectory, obstacle.transform, 1.0, [ego_transform.inverse_transform() * obstacle.transform]) predictions.append(prediction) elif isinstance(prediction_msg, PredictionMessage): predictions = prediction_msg.predictions else: raise ValueError('Unexpected obstacles msg type {}'.format( type(prediction_msg))) return predictions
def on_watermark(self, timestamp: Timestamp, ground_tracking_stream: WriteStream): self._logger.debug('@{}: received watermark'.format(timestamp)) if timestamp.is_top: return obstacles_msg = self._obstacles_raw_msgs.popleft() pose_msg = self._pose_msgs.popleft() # Use the most recent pose message to convert the past frames # of vehicles and people to our current perspective. pose_transform = pose_msg.data.transform obstacle_trajectories = [] # Only consider obstacles which still exist at the most recent # timestamp. for obstacle in obstacles_msg.obstacles: if obstacle.id == self._vehicle_id and not \ self._flags.prediction_ego_agent: # If we are not performing ego-agent prediction, do not # track the ego-vehicle. continue if (pose_transform.location.distance(obstacle.transform.location) > self._flags.dynamic_obstacle_distance_threshold): # Ignore the obstacle if it is too far away. continue self._obstacles[obstacle.id].append(obstacle) cur_obstacle_trajectory = [] # Iterate through past frames of this obstacle. for past_obstacle_loc in self._obstacles[obstacle.id]: # Get the transform of the center of the obstacle's bounding # box, in relation to the Pose measurement. v_transform = past_obstacle_loc.transform * \ past_obstacle_loc.bounding_box.transform new_transform = (pose_transform.inverse_transform() * v_transform) cur_obstacle_trajectory.append(new_transform) obstacle_trajectories.append( ObstacleTrajectory(obstacle, cur_obstacle_trajectory)) output_msg = ObstacleTrajectoriesMessage(timestamp, obstacle_trajectories) ground_tracking_stream.send(output_msg)
def on_notification(self, timestamp): tracking_msg = self._tracking_msgs.popleft() prediction_msg = self._prediction_msgs.popleft() vehicle_transform = self._can_bus_msgs.popleft().data.transform # Start calculating metrics when we've taken sufficiently many steps. if len(self._predictions) == self._flags.prediction_num_future_steps: # Convert the tracking message to a dictionary with trajectories # in world coordinates, for speedup when calculating metrics. ground_trajectories_dict = {} for obstacle in tracking_msg.obstacle_trajectories: cur_trajectory = [] for past_transform in obstacle.trajectory: world_coord = vehicle_transform * past_transform cur_trajectory.append(world_coord) ground_trajectories_dict[obstacle.id] = \ ObstacleTrajectory(obstacle.label, obstacle.id, cur_trajectory) # Evaluate the prediction corresponding to the current set of # ground truth past trajectories. self.calculate_metrics(ground_trajectories_dict, self._predictions[0].predictions) # Convert the prediction to world coordinates and append it to the # queue. obstacle_predictions_list = [] for obstacle in prediction_msg.predictions: cur_trajectory = [] for past_transform in obstacle.trajectory: world_coord = vehicle_transform * past_transform cur_trajectory.append(world_coord) obstacle_predictions_list.append( ObstaclePrediction( obstacle.label, obstacle.id, 1.0, # probability cur_trajectory)) self._predictions.append( PredictionMessage(timestamp, obstacle_predictions_list))