Exemple #1
0
    def on_watermark(self, timestamp: erdos.Timestamp,
                     prediction_stream: erdos.WriteStream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        if timestamp.is_top:
            return
        point_cloud_msg = self._point_cloud_msgs.popleft()
        tracking_msg = self._tracking_msgs.popleft()

        start_time = time.time()
        nearby_trajectories, nearby_vehicle_ego_transforms, \
            nearby_trajectories_tensor, binned_lidars_tensor = \
            self._preprocess_input(tracking_msg, point_cloud_msg)

        num_predictions = len(nearby_trajectories)
        self._logger.info(
            '@{}: Getting R2P2 predictions for {} vehicles'.format(
                timestamp, num_predictions))

        if num_predictions == 0:
            prediction_stream.send(PredictionMessage(timestamp, []))
            return

        # Run the forward pass.
        z = torch.tensor(
            np.random.normal(size=(num_predictions,
                                   self._flags.prediction_num_future_steps,
                                   2))).to(torch.float32).to(self._device)
        model_start_time = time.time()
        prediction_array, _ = self._r2p2_model.forward(
            z, nearby_trajectories_tensor, binned_lidars_tensor)
        model_runtime = (time.time() - model_start_time) * 1000
        self._csv_logger.debug("{},{},{},{:.4f}".format(
            time_epoch_ms(), timestamp.coordinates[0],
            'r2p2-modelonly-runtime', model_runtime))
        prediction_array = prediction_array.cpu().detach().numpy()

        obstacle_predictions_list = self._postprocess_predictions(
            prediction_array, nearby_trajectories,
            nearby_vehicle_ego_transforms)
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.debug("{},{},{},{:.4f}".format(
            time_epoch_ms(), timestamp.coordinates[0], 'r2p2-runtime',
            runtime))
        prediction_stream.send(
            PredictionMessage(timestamp, obstacle_predictions_list))
Exemple #2
0
    def on_watermark(self, timestamp):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        tracking_msg = self._tracking_msgs.popleft()
        prediction_msg = self._prediction_msgs.popleft()
        vehicle_transform = self._pose_msgs.popleft().data.transform

        # TODO: The evaluator assumes that the obstacle tracker assigns the
        # same ids to the obstacles as they have in the CARLA simulation.

        # Start calculating metrics when we've taken sufficiently many steps.
        if len(self._predictions) == self._flags.prediction_num_future_steps:
            # Convert the tracking message to a dictionary with trajectories
            # in world coordinates, for speedup when calculating metrics.
            ground_trajectories_dict = {}
            for obstacle in tracking_msg.obstacle_trajectories:
                cur_trajectory = []
                for past_transform in obstacle.trajectory:
                    world_coord = vehicle_transform * past_transform
                    cur_trajectory.append(world_coord)

                ground_trajectories_dict[obstacle.id] = \
                    ObstacleTrajectory(obstacle.label,
                                       obstacle.id,
                                       obstacle.bounding_box,
                                       cur_trajectory)
            # Evaluate the prediction corresponding to the current set of
            # ground truth past trajectories.
            self._calculate_metrics(timestamp, ground_trajectories_dict,
                                    self._predictions[0].predictions)

        # Convert the prediction to world coordinates and append it to the
        # queue.
        obstacle_predictions_list = []
        for obstacle in prediction_msg.predictions:
            cur_trajectory = []
            for past_transform in obstacle.trajectory:
                world_coord = vehicle_transform * past_transform
                cur_trajectory.append(world_coord)
            # Get the current transform of the obstacle, which is the last
            # trajectory value.
            cur_transform = obstacle.trajectory[-1]
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    cur_transform,
                    obstacle.bounding_box,
                    1.0,  # probability
                    cur_trajectory))
        self._predictions.append(
            PredictionMessage(timestamp, obstacle_predictions_list))
    def generate_predicted_trajectories(self, msg: Message,
                                        linear_prediction_stream: WriteStream):
        self._logger.debug('@{}: received trajectories message'.format(
            msg.timestamp))
        obstacle_predictions_list = []

        nearby_obstacle_trajectories, nearby_obstacles_ego_transforms = \
            msg.get_nearby_obstacles_info(self._flags.prediction_radius)
        num_predictions = len(nearby_obstacle_trajectories)

        self._logger.info(
            '@{}: Getting linear predictions for {} obstacles'.format(
                msg.timestamp, num_predictions))

        for idx in range(len(nearby_obstacle_trajectories)):
            obstacle_trajectory = nearby_obstacle_trajectories[idx]
            # Time step matrices used in regression.
            num_steps = min(self._flags.prediction_num_past_steps,
                            len(obstacle_trajectory.trajectory))
            ts = np.zeros((num_steps, 2))
            future_ts = np.zeros((self._flags.prediction_num_future_steps, 2))
            for t in range(num_steps):
                ts[t][0] = -t
                ts[t][1] = 1
            for i in range(self._flags.prediction_num_future_steps):
                future_ts[i][0] = i + 1
                future_ts[i][1] = 1

            xy = np.zeros((num_steps, 2))
            for t in range(num_steps):
                # t-th most recent step
                transform = obstacle_trajectory.trajectory[-(t + 1)]
                xy[t][0] = transform.location.x
                xy[t][1] = transform.location.y
            linear_model_params = np.linalg.lstsq(ts, xy, rcond=None)[0]
            # Predict future steps and convert to list of locations.
            predict_array = np.matmul(future_ts, linear_model_params)
            predictions = []
            for t in range(self._flags.prediction_num_future_steps):
                # Linear prediction does not predict vehicle orientation, so we
                # use our estimated orientation of the vehicle at its latest
                # location.
                predictions.append(
                    Transform(location=Location(x=predict_array[t][0],
                                                y=predict_array[t][1]),
                              rotation=nearby_obstacles_ego_transforms[idx].
                              rotation))
            obstacle_predictions_list.append(
                ObstaclePrediction(obstacle_trajectory,
                                   obstacle_trajectory.obstacle.transform, 1.0,
                                   predictions))
        linear_prediction_stream.send(
            PredictionMessage(msg.timestamp, obstacle_predictions_list))
        linear_prediction_stream.send(erdos.WatermarkMessage(msg.timestamp))
    def generate_predicted_trajectories(self, msg, linear_prediction_stream):
        self._logger.debug('@{}: received trajectories message'.format(
            msg.timestamp))
        obstacle_predictions_list = []

        for obstacle in msg.obstacle_trajectories:
            # Time step matrices used in regression.
            num_steps = min(self._flags.prediction_num_past_steps,
                            len(obstacle.trajectory))
            ts = np.zeros((num_steps, 2))
            future_ts = np.zeros((self._flags.prediction_num_future_steps, 2))
            for t in range(num_steps):
                ts[t][0] = -t
                ts[t][1] = 1
            for i in range(self._flags.prediction_num_future_steps):
                future_ts[i][0] = i + 1
                future_ts[i][1] = 1

            xy = np.zeros((num_steps, 2))
            for t in range(num_steps):
                # t-th most recent step
                transform = obstacle.trajectory[-(t + 1)]
                xy[t][0] = transform.location.x
                xy[t][1] = transform.location.y
            linear_model_params = np.linalg.lstsq(ts, xy)[0]
            # Predict future steps and convert to list of locations.
            predict_array = np.matmul(future_ts, linear_model_params)
            predictions = []
            for t in range(self._flags.prediction_num_future_steps):
                predictions.append(
                    Transform(location=Location(x=predict_array[t][0],
                                                y=predict_array[t][1]),
                              rotation=Rotation()))
            # Get the current transform of the obstacle, which is the last
            # trajectory value.
            cur_transform = obstacle.trajectory[-1]
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    cur_transform,
                    obstacle.bounding_box,
                    1.0,  # probability
                    predictions))
        linear_prediction_stream.send(
            PredictionMessage(msg.timestamp, obstacle_predictions_list))
Exemple #5
0
    def on_notification(self, timestamp):
        tracking_msg = self._tracking_msgs.popleft()
        prediction_msg = self._prediction_msgs.popleft()
        vehicle_transform = self._can_bus_msgs.popleft().data.transform

        # Start calculating metrics when we've taken sufficiently many steps.
        if len(self._predictions) == self._flags.prediction_num_future_steps:
            # Convert the tracking message to a dictionary with trajectories
            # in world coordinates, for speedup when calculating metrics.
            ground_trajectories_dict = {}
            for obstacle in tracking_msg.obstacle_trajectories:
                cur_trajectory = []
                for past_transform in obstacle.trajectory:
                    world_coord = vehicle_transform * past_transform
                    cur_trajectory.append(world_coord)

                ground_trajectories_dict[obstacle.id] = \
                    ObstacleTrajectory(obstacle.label,
                                       obstacle.id,
                                       cur_trajectory)
            # Evaluate the prediction corresponding to the current set of
            # ground truth past trajectories.
            self.calculate_metrics(ground_trajectories_dict,
                                   self._predictions[0].predictions)

        # Convert the prediction to world coordinates and append it to the
        # queue.
        obstacle_predictions_list = []
        for obstacle in prediction_msg.predictions:
            cur_trajectory = []
            for past_transform in obstacle.trajectory:
                world_coord = vehicle_transform * past_transform
                cur_trajectory.append(world_coord)
            obstacle_predictions_list.append(
                ObstaclePrediction(
                    obstacle.label,
                    obstacle.id,
                    1.0,  # probability
                    cur_trajectory))
        self._predictions.append(
            PredictionMessage(timestamp, obstacle_predictions_list))
Exemple #6
0
    def on_watermark(self, timestamp, prediction_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))

        can_bus_msg = self._can_bus_msgs.popleft()
        point_cloud_msg = self._point_cloud_msgs.popleft()
        tracking_msg = self._tracking_msgs.popleft()

        binned_lidar = self.get_occupancy_grid(point_cloud_msg.point_cloud.points)

        self._vehicle_transform = can_bus_msg.data.transform
        
        # Get the ego-vehicle track and bounding box.
        ego_vehicle_trajectory = None
        for vehicle in tracking_msg.obstacle_trajectories:
            if vehicle.id == self.vehicle_id:
                ego_vehicle_trajectory = vehicle.trajectory
                ego_vehicle_bbox = vehicle.bounding_box
                break
        assert ego_vehicle_trajectory is not None, "ego-vehicle trajectory not found"
        ego_vehicle_trajectory = np.stack(
            [[point.location.x, point.location.y] for
              point in ego_vehicle_trajectory])

        # If we haven't seen enough past locations yet, pad the start
        # of the trajectory with copies of the earliest location.
        num_past_locations = ego_vehicle_trajectory.shape[0]
        if num_past_locations < self._flags.prediction_num_past_steps:
            initial_copies = np.repeat([np.array(ego_vehicle_trajectory[0])],
                                       self._flags.prediction_num_past_steps - num_past_locations,
                                       axis=0)
            ego_vehicle_trajectory = np.vstack(
                (initial_copies, ego_vehicle_trajectory))
        ego_vehicle_trajectory = np.expand_dims(ego_vehicle_trajectory, axis=0)

        z = torch.tensor(np.random.normal(
            size=(1, self._flags.prediction_num_future_steps, 2))).to(
            torch.float32).to(self._device)
        ego_vehicle_trajectory = torch.tensor(ego_vehicle_trajectory).to(torch.float32).to(self._device)
        binned_lidar = torch.tensor(binned_lidar).to(torch.float32).to(self._device)

        prediction_array, _ = self._r2p2_model.forward(z,
                                  ego_vehicle_trajectory,
                                  binned_lidar)
        prediction_array = prediction_array.cpu().detach().numpy()[0]

        # Convert prediction array to a list of Location objects.
        predictions = []
        for t in range(self._flags.prediction_num_future_steps):
            predictions.append(
                Transform(location=Location(x=prediction_array[t][0],
                                            y=prediction_array[t][1]),
                          rotation=Rotation()))

        obstacle_predictions_list = []
        obstacle_predictions_list.append(
            ObstaclePrediction('vehicle',
                               self.vehicle_id,
                               self._vehicle_transform,
                               ego_vehicle_bbox,
                               1.0, # Probability; currently a filler value because we are taking just one sample from distribution
                               predictions))
        prediction_stream.send(
            PredictionMessage(timestamp, obstacle_predictions_list))