Esempio n. 1
0
    def get_predictions(self, prediction_msg, ego_transform):
        """Extracts obstacle predictions out of the message.

        This method is useful to build obstacle predictions when
        the operator directly receives detections instead of predictions.
        The method assumes that the obstacles are static.
        """
        predictions = []
        if isinstance(prediction_msg, ObstaclesMessage):
            # Transform the obstacle into a prediction.
            self._logger.debug(
                'Planner received obstacles instead of predictions.')
            predictions = []
            for obstacle in prediction_msg.obstacles:
                obstacle_trajectory = ObstacleTrajectory(obstacle, [])
                prediction = ObstaclePrediction(
                    obstacle_trajectory, obstacle.transform, 1.0,
                    [ego_transform.inverse_transform() * obstacle.transform])
                predictions.append(prediction)
        elif isinstance(prediction_msg, PredictionMessage):
            predictions = prediction_msg.predictions
        else:
            raise ValueError('Unexpected obstacles msg type {}'.format(
                type(prediction_msg)))
        return predictions
Esempio n. 2
0
    def _postprocess_predictions(self, prediction_array, vehicle_trajectories,
                                 vehicle_ego_transforms):
        # The prediction_array consists of predictions with respect to each
        # vehicle. Transform each predicted trajectory to be in relation to the
        # ego-vehicle, then convert into an ObstaclePrediction.
        obstacle_predictions_list = []
        num_predictions = len(vehicle_trajectories)

        for idx in range(num_predictions):
            cur_prediction = prediction_array[idx]

            obstacle_transform = vehicle_trajectories[idx].obstacle.transform
            predictions = []
            # Because R2P2 only predicts (x,y) coordinates, we assume the
            # vehicle stays at the same height as its last location.
            for t in range(self._flags.prediction_num_future_steps):
                cur_point = vehicle_ego_transforms[idx].transform_points(
                    np.array([[
                        cur_prediction[t][0], cur_prediction[t][1],
                        vehicle_ego_transforms[idx].location.z
                    ]]))[0]
                # R2P2 does not predict vehicle orientation, so we use our
                # estimated orientation of the vehicle at its latest location.
                predictions.append(
                    Transform(location=Location(cur_point[0], cur_point[1],
                                                cur_point[2]),
                              rotation=vehicle_ego_transforms[idx].rotation))

            # Probability; currently a filler value because we are taking
            # just one sample from distribution
            obstacle_predictions_list.append(
                ObstaclePrediction(vehicle_trajectories[idx],
                                   obstacle_transform, 1.0, predictions))
        return obstacle_predictions_list
Esempio n. 3
0
    def on_watermark(self, timestamp):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        tracking_msg = self._tracking_msgs.popleft()
        prediction_msg = self._prediction_msgs.popleft()
        vehicle_transform = self._pose_msgs.popleft().data.transform

        # TODO: The evaluator assumes that the obstacle tracker assigns the
        # same ids to the obstacles as they have in the CARLA simulation.

        # Start calculating metrics when we've taken sufficiently many steps.
        if len(self._predictions) == self._flags.prediction_num_future_steps:
            # Convert the tracking message to a dictionary with trajectories
            # in world coordinates, for speedup when calculating metrics.
            ground_trajectories_dict = {}
            for obstacle in tracking_msg.obstacle_trajectories:
                cur_trajectory = []
                for past_transform in obstacle.trajectory:
                    world_coord = vehicle_transform * past_transform
                    cur_trajectory.append(world_coord)

                ground_trajectories_dict[obstacle.id] = \
                    ObstacleTrajectory(obstacle.label,
                                       obstacle.id,
                                       obstacle.bounding_box,
                                       cur_trajectory)
            # Evaluate the prediction corresponding to the current set of
            # ground truth past trajectories.
            self._calculate_metrics(timestamp, ground_trajectories_dict,
                                    self._predictions[0].predictions)

        # Convert the prediction to world coordinates and append it to the
        # queue.
        obstacle_predictions_list = []
        for obstacle in prediction_msg.predictions:
            cur_trajectory = []
            for past_transform in obstacle.trajectory:
                world_coord = vehicle_transform * past_transform
                cur_trajectory.append(world_coord)
            # Get the current transform of the obstacle, which is the last
            # trajectory value.
            cur_transform = obstacle.trajectory[-1]
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    cur_transform,
                    obstacle.bounding_box,
                    1.0,  # probability
                    cur_trajectory))
        self._predictions.append(
            PredictionMessage(timestamp, obstacle_predictions_list))
Esempio n. 4
0
    def generate_predicted_trajectories(self, msg: Message,
                                        linear_prediction_stream: WriteStream):
        self._logger.debug('@{}: received trajectories message'.format(
            msg.timestamp))
        obstacle_predictions_list = []

        nearby_obstacle_trajectories, nearby_obstacles_ego_transforms = \
            msg.get_nearby_obstacles_info(self._flags.prediction_radius)
        num_predictions = len(nearby_obstacle_trajectories)

        self._logger.info(
            '@{}: Getting linear predictions for {} obstacles'.format(
                msg.timestamp, num_predictions))

        for idx in range(len(nearby_obstacle_trajectories)):
            obstacle_trajectory = nearby_obstacle_trajectories[idx]
            # Time step matrices used in regression.
            num_steps = min(self._flags.prediction_num_past_steps,
                            len(obstacle_trajectory.trajectory))
            ts = np.zeros((num_steps, 2))
            future_ts = np.zeros((self._flags.prediction_num_future_steps, 2))
            for t in range(num_steps):
                ts[t][0] = -t
                ts[t][1] = 1
            for i in range(self._flags.prediction_num_future_steps):
                future_ts[i][0] = i + 1
                future_ts[i][1] = 1

            xy = np.zeros((num_steps, 2))
            for t in range(num_steps):
                # t-th most recent step
                transform = obstacle_trajectory.trajectory[-(t + 1)]
                xy[t][0] = transform.location.x
                xy[t][1] = transform.location.y
            linear_model_params = np.linalg.lstsq(ts, xy, rcond=None)[0]
            # Predict future steps and convert to list of locations.
            predict_array = np.matmul(future_ts, linear_model_params)
            predictions = []
            for t in range(self._flags.prediction_num_future_steps):
                # Linear prediction does not predict vehicle orientation, so we
                # use our estimated orientation of the vehicle at its latest
                # location.
                predictions.append(
                    Transform(location=Location(x=predict_array[t][0],
                                                y=predict_array[t][1]),
                              rotation=nearby_obstacles_ego_transforms[idx].
                              rotation))
            obstacle_predictions_list.append(
                ObstaclePrediction(obstacle_trajectory,
                                   obstacle_trajectory.obstacle.transform, 1.0,
                                   predictions))
        linear_prediction_stream.send(
            PredictionMessage(msg.timestamp, obstacle_predictions_list))
        linear_prediction_stream.send(erdos.WatermarkMessage(msg.timestamp))
Esempio n. 5
0
 def get_predictions(self, prediction_msg, ego_transform):
     predictions = []
     if isinstance(prediction_msg, ObstaclesMessage):
         # Transform the obstacle into a prediction.
         predictions = []
         for obstacle in prediction_msg.obstacles:
             obstacle_trajectory = ObstacleTrajectory(obstacle, [])
             prediction = ObstaclePrediction(
                 obstacle_trajectory, obstacle.transform, 1.0,
                 [ego_transform.inverse_transform() * obstacle.transform])
             predictions.append(prediction)
     elif isinstance(prediction_msg, PredictionMessage):
         predictions = prediction_msg.predictions
     else:
         raise ValueError('Unexpected obstacles msg type {}'.format(
             type(prediction_msg)))
     return predictions
    def generate_predicted_trajectories(self, msg, linear_prediction_stream):
        self._logger.debug('@{}: received trajectories message'.format(
            msg.timestamp))
        obstacle_predictions_list = []

        for obstacle in msg.obstacle_trajectories:
            # Time step matrices used in regression.
            num_steps = min(self._flags.prediction_num_past_steps,
                            len(obstacle.trajectory))
            ts = np.zeros((num_steps, 2))
            future_ts = np.zeros((self._flags.prediction_num_future_steps, 2))
            for t in range(num_steps):
                ts[t][0] = -t
                ts[t][1] = 1
            for i in range(self._flags.prediction_num_future_steps):
                future_ts[i][0] = i + 1
                future_ts[i][1] = 1

            xy = np.zeros((num_steps, 2))
            for t in range(num_steps):
                # t-th most recent step
                transform = obstacle.trajectory[-(t + 1)]
                xy[t][0] = transform.location.x
                xy[t][1] = transform.location.y
            linear_model_params = np.linalg.lstsq(ts, xy)[0]
            # Predict future steps and convert to list of locations.
            predict_array = np.matmul(future_ts, linear_model_params)
            predictions = []
            for t in range(self._flags.prediction_num_future_steps):
                predictions.append(
                    Transform(location=Location(x=predict_array[t][0],
                                                y=predict_array[t][1]),
                              rotation=Rotation()))
            # Get the current transform of the obstacle, which is the last
            # trajectory value.
            cur_transform = obstacle.trajectory[-1]
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    cur_transform,
                    obstacle.bounding_box,
                    1.0,  # probability
                    predictions))
        linear_prediction_stream.send(
            PredictionMessage(msg.timestamp, obstacle_predictions_list))
Esempio n. 7
0
    def on_notification(self, timestamp):
        tracking_msg = self._tracking_msgs.popleft()
        prediction_msg = self._prediction_msgs.popleft()
        vehicle_transform = self._can_bus_msgs.popleft().data.transform

        # Start calculating metrics when we've taken sufficiently many steps.
        if len(self._predictions) == self._flags.prediction_num_future_steps:
            # Convert the tracking message to a dictionary with trajectories
            # in world coordinates, for speedup when calculating metrics.
            ground_trajectories_dict = {}
            for obstacle in tracking_msg.obstacle_trajectories:
                cur_trajectory = []
                for past_transform in obstacle.trajectory:
                    world_coord = vehicle_transform * past_transform
                    cur_trajectory.append(world_coord)

                ground_trajectories_dict[obstacle.id] = \
                    ObstacleTrajectory(obstacle.label,
                                       obstacle.id,
                                       cur_trajectory)
            # Evaluate the prediction corresponding to the current set of
            # ground truth past trajectories.
            self.calculate_metrics(ground_trajectories_dict,
                                   self._predictions[0].predictions)

        # Convert the prediction to world coordinates and append it to the
        # queue.
        obstacle_predictions_list = []
        for obstacle in prediction_msg.predictions:
            cur_trajectory = []
            for past_transform in obstacle.trajectory:
                world_coord = vehicle_transform * past_transform
                cur_trajectory.append(world_coord)
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    1.0,  # probability
                    cur_trajectory))
        self._predictions.append(
            PredictionMessage(timestamp, obstacle_predictions_list))
Esempio n. 8
0
    def on_watermark(self, timestamp, prediction_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))

        can_bus_msg = self._can_bus_msgs.popleft()
        point_cloud_msg = self._point_cloud_msgs.popleft()
        tracking_msg = self._tracking_msgs.popleft()

        binned_lidar = self.get_occupancy_grid(point_cloud_msg.point_cloud.points)

        self._vehicle_transform = can_bus_msg.data.transform
        
        # Get the ego-vehicle track and bounding box.
        ego_vehicle_trajectory = None
        for vehicle in tracking_msg.obstacle_trajectories:
            if vehicle.id == self.vehicle_id:
                ego_vehicle_trajectory = vehicle.trajectory
                ego_vehicle_bbox = vehicle.bounding_box
                break
        assert ego_vehicle_trajectory is not None, "ego-vehicle trajectory not found"
        ego_vehicle_trajectory = np.stack(
            [[point.location.x, point.location.y] for
              point in ego_vehicle_trajectory])

        # If we haven't seen enough past locations yet, pad the start
        # of the trajectory with copies of the earliest location.
        num_past_locations = ego_vehicle_trajectory.shape[0]
        if num_past_locations < self._flags.prediction_num_past_steps:
            initial_copies = np.repeat([np.array(ego_vehicle_trajectory[0])],
                                       self._flags.prediction_num_past_steps - num_past_locations,
                                       axis=0)
            ego_vehicle_trajectory = np.vstack(
                (initial_copies, ego_vehicle_trajectory))
        ego_vehicle_trajectory = np.expand_dims(ego_vehicle_trajectory, axis=0)

        z = torch.tensor(np.random.normal(
            size=(1, self._flags.prediction_num_future_steps, 2))).to(
            torch.float32).to(self._device)
        ego_vehicle_trajectory = torch.tensor(ego_vehicle_trajectory).to(torch.float32).to(self._device)
        binned_lidar = torch.tensor(binned_lidar).to(torch.float32).to(self._device)

        prediction_array, _ = self._r2p2_model.forward(z,
                                  ego_vehicle_trajectory,
                                  binned_lidar)
        prediction_array = prediction_array.cpu().detach().numpy()[0]

        # Convert prediction array to a list of Location objects.
        predictions = []
        for t in range(self._flags.prediction_num_future_steps):
            predictions.append(
                Transform(location=Location(x=prediction_array[t][0],
                                            y=prediction_array[t][1]),
                          rotation=Rotation()))

        obstacle_predictions_list = []
        obstacle_predictions_list.append(
            ObstaclePrediction('vehicle',
                               self.vehicle_id,
                               self._vehicle_transform,
                               ego_vehicle_bbox,
                               1.0, # Probability; currently a filler value because we are taking just one sample from distribution
                               predictions))
        prediction_stream.send(
            PredictionMessage(timestamp, obstacle_predictions_list))