def infer_map(
    test_loader: torch.utils.data.DataLoader,
    encoder: EncoderRNN,
    decoder: DecoderRNN,
    start_idx: int,
    forecasted_save_dir: str,
    model_utils: ModelUtils,
):
    """Infer function for map-based LSTM baselines and save the forecasted trajectories.

    Args:
        test_loader: DataLoader for the test set
        encoder: Encoder network instance
        decoder: Decoder network instance
        start_idx: start index for the current joblib batch
        forecasted_save_dir: Directory where forecasted trajectories are to be saved
        model_utils: ModelUtils instance

    """
    args = parse_arguments()
    global best_loss
    forecasted_trajectories = {}
    for i, (_input, target, helpers) in enumerate(test_loader):

        _input = _input.to(device)

        batch_helpers = list(zip(*helpers))

        helpers_dict = {}
        for k, v in config.LSTM_HELPER_DICT_IDX.items():
            helpers_dict[k] = batch_helpers[v]

        # Set to eval mode
        encoder.eval()
        decoder.eval()

        # Encoder
        batch_size = _input.shape[0]
        input_length = _input.shape[1]

        # Iterate over every element in the batch
        for batch_idx in range(batch_size):
            num_candidates = len(
                helpers_dict["CANDIDATE_CENTERLINES"][batch_idx])
            curr_centroids = helpers_dict["CENTROIDS"][batch_idx]
            seq_id = int(helpers_dict["SEQ_PATHS"][batch_idx])
            abs_outputs = []

            # Predict using every centerline candidate for the current trajectory
            for candidate_idx in range(num_candidates):
                curr_centerline = helpers_dict["CANDIDATE_CENTERLINES"][
                    batch_idx][candidate_idx]
                curr_nt_dist = helpers_dict["CANDIDATE_NT_DISTANCES"][
                    batch_idx][candidate_idx]

                _input = torch.FloatTensor(
                    np.expand_dims(curr_nt_dist[:args.obs_len].astype(float),
                                   0)).to(device)

                # Initialize encoder hidden state
                encoder_hidden = model_utils.init_hidden(
                    1, encoder.module.hidden_size
                    if use_cuda else encoder.hidden_size)

                # Encode observed trajectory
                for ei in range(input_length):
                    encoder_input = _input[:, ei, :]
                    encoder_hidden = encoder(encoder_input, encoder_hidden)

                # Initialize decoder input with last coordinate in encoder
                decoder_input = encoder_input[:, :2]

                # Initialize decoder hidden state as encoder hidden state
                decoder_hidden = encoder_hidden

                decoder_outputs = torch.zeros((1, args.pred_len, 2)).to(device)

                # Decode hidden state in future trajectory
                for di in range(args.pred_len):
                    decoder_output, decoder_hidden = decoder(
                        decoder_input, decoder_hidden)
                    decoder_outputs[:, di, :] = decoder_output

                    # Use own predictions as inputs at next step
                    decoder_input = decoder_output

                # Get absolute trajectory
                abs_helpers = {}
                abs_helpers["REFERENCE"] = np.expand_dims(
                    np.array(helpers_dict["CANDIDATE_DELTA_REFERENCES"]
                             [batch_idx][candidate_idx]),
                    0,
                )
                abs_helpers["CENTERLINE"] = np.expand_dims(curr_centerline, 0)

                abs_input, abs_output = baseline_utils.get_abs_traj(
                    _input.clone().cpu().numpy(),
                    decoder_outputs.detach().clone().cpu().numpy(),
                    args,
                    abs_helpers,
                )

                # array of shape (1,30,2) to list of (30,2)
                abs_outputs.append(abs_output[0])
            forecasted_trajectories[seq_id] = abs_outputs

    os.makedirs(forecasted_save_dir, exist_ok=True)
    with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
              "wb") as f:
        pkl.dump(forecasted_trajectories, f)
예제 #2
0
    def infer_and_save_traj_absolute(
        self,
        grid_search: Any,
        train_output: np.ndarray,
        test_input: np.ndarray,
        test_references: Union[np.ndarray, None],
        test_seq_ids: np.ndarray,
        test_translation: Union[np.ndarray, None],
        test_rotation: Union[np.ndarray, None],
        start_idx: int,
        args: Any,
        num_features: int,
        horizon: int,
        save_dir: str,
    ) -> None:
        """Non-map baselines inference. This function does the inference based on the given model, and saves the forecasted trajectories.

        Args:
            grid_search: GridSearchCV object
            train_output: Train output
            test_input: Test input
            test_references: References for reverting delta transformation of candidate trajectories,
            test_seq_ids: csv name for the sequence,
            test_translation: Translation used for normalizing trajectories,
            test_rotation: Rotation used for normalizing trajectories,
            start_idx: start_idx for current joblib batch,
            args: Arguments passed to the baseline script,
            num_features: Number of features,
            horizon: Prediction Horizon, 
            save_dir: Directory where forecasted trajectories are to be saved      

        """
        test_num_tracks = test_input.shape[0]
        print(f"Absolute Inference currently at index {start_idx} ...")

        forecasted_trajectories = {}

        # Preprocess and get neighbors
        pipeline_steps = grid_search.best_estimator_.named_steps.keys()
        preprocessed_test_input = np.copy(test_input)
        for step in pipeline_steps:
            curr_step = grid_search.best_estimator_.named_steps[step]

            # Get neighbors
            if step == "regressor":
                neigh_idx = curr_step.kneighbors(
                    preprocessed_test_input,
                    return_distance=False,
                    n_neighbors=args.n_neigh,
                )

            # Preprocess
            else:
                if curr_step is not None:
                    preprocessed_test_input = curr_step.transform(
                        preprocessed_test_input)

        # Predict for each trajectory
        for i in range(test_num_tracks):

            curr_test_input = np.repeat(test_input[i],
                                        repeats=args.n_neigh,
                                        axis=0)
            neigh_output = train_output[neigh_idx][
                i, :, :horizon, :]  # num_neigh x curr_pred_len x 2

            abs_helpers = {}
            if args.use_delta:
                abs_helpers["REFERENCE"] = np.array(
                    [test_references[i] for _ in range(args.n_neigh)])
            if args.normalize:
                abs_helpers["TRANSLATION"] = np.array(
                    [test_translation[i] for _ in range(args.n_neigh)])
                abs_helpers["ROTATION"] = np.array(
                    [test_rotation[i] for _ in range(args.n_neigh)])

            # Convert trajectory to map frame
            abs_input, abs_output = baseline_utils.get_abs_traj(
                curr_test_input.copy().reshape(
                    (-1, args.obs_len, num_features), order="F"),
                neigh_output.copy(),
                args,
                helpers=abs_helpers,
            )
            forecasted_trajectories[test_seq_ids[i]] = abs_output
        with open(f"{save_dir}/{start_idx}.pkl", "wb") as f:
            pkl.dump(forecasted_trajectories, f)
def infer_absolute(
    test_loader: torch.utils.data.DataLoader,
    encoder: EncoderRNN,
    decoder: DecoderRNN,
    start_idx: int,
    forecasted_save_dir: str,
    model_utils: ModelUtils,
):
    """Infer function for non-map LSTM baselines and save the forecasted trajectories.

    Args:
        test_loader: DataLoader for the test set
        encoder: Encoder network instance
        decoder: Decoder network instance
        start_idx: start index for the current joblib batch
        forecasted_save_dir: Directory where forecasted trajectories are to be saved
        model_utils: ModelUtils instance

    """
    args = parse_arguments()
    forecasted_trajectories = {}

    for i, (_input, target, helpers) in enumerate(test_loader):

        _input = _input.to(device)

        batch_helpers = list(zip(*helpers))

        helpers_dict = {}
        for k, v in config.LSTM_HELPER_DICT_IDX.items():
            helpers_dict[k] = batch_helpers[v]

        # Set to eval mode
        encoder.eval()
        decoder.eval()

        # Encoder
        batch_size = _input.shape[0]
        input_length = _input.shape[1]
        input_shape = _input.shape[2]

        # Initialize encoder hidden state
        encoder_hidden = model_utils.init_hidden(
            batch_size,
            encoder.module.hidden_size if use_cuda else encoder.hidden_size)

        # Encode observed trajectory
        for ei in range(input_length):
            encoder_input = _input[:, ei, :]
            encoder_hidden = encoder(encoder_input, encoder_hidden)

        # Initialize decoder input with last coordinate in encoder
        decoder_input = encoder_input[:, :2]

        # Initialize decoder hidden state as encoder hidden state
        decoder_hidden = encoder_hidden

        decoder_outputs = torch.zeros(
            (batch_size, args.pred_len, 2)).to(device)

        # Decode hidden state in future trajectory
        for di in range(args.pred_len):
            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden)
            decoder_outputs[:, di, :] = decoder_output

            # Use own predictions as inputs at next step
            decoder_input = decoder_output

        # Get absolute trajectory
        abs_helpers = {}
        abs_helpers["REFERENCE"] = np.array(helpers_dict["DELTA_REFERENCE"])
        abs_helpers["TRANSLATION"] = np.array(helpers_dict["TRANSLATION"])
        abs_helpers["ROTATION"] = np.array(helpers_dict["ROTATION"])
        abs_inputs, abs_outputs = baseline_utils.get_abs_traj(
            _input.clone().cpu().numpy(),
            decoder_outputs.detach().clone().cpu().numpy(),
            args,
            abs_helpers,
        )

        for i in range(abs_outputs.shape[0]):
            seq_id = int(helpers_dict["SEQ_PATHS"][i])
            forecasted_trajectories[seq_id] = [abs_outputs[i]]

    with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
              "wb") as f:
        pkl.dump(forecasted_trajectories, f)
예제 #4
0
    def infer_and_save_traj_map(
        self,
        grid_search: Any,
        train_output: np.ndarray,
        test_nt: np.ndarray,
        test_centerlines: np.ndarray,
        test_references: np.ndarray,
        test_seq_ids: np.ndarray,
        start_idx: int,
        args: Any,
        num_features: int,
        horizon: int,
        save_dir: str,
    ) -> np.ndarray:
        """Map-based baselines inference. This function does the inference based on the given model, and saves the forecasted trajectories.

        Args:
            grid_search: GridSearchCV object,
            train_output: Train output of shape [num_tracks, pred_len, num_features]
            test_nt: Candidate trajectories in centerline curvilinear coordinates,
            test_centerlines: Candidate centerlines,
            test_references: References for reverting delta transformation of candidate trajectories,
            test_seq_ids: csv name for the sequence,
            start_idx: start_idx for current joblib batch,
            args: Arguments passed to the baseline script,
            num_features: Number of features,
            horizon: Prediction Horizon, 
            save_dir: Directory where forecasted trajectories are to be saved 
        Returns:
            forecasted_trajectories: Forecasted trajectories 

        """
        test_num_tracks = test_nt.shape[0]
        print(f"Map-based Inference currently at index {start_idx} ...")

        forecasted_trajectories = {}

        # Predict for each trajectory
        for i in range(test_num_tracks):

            # Helpers for current track
            test_nt_i = test_nt[i]
            test_cl_i = test_centerlines[i]
            test_references_i = test_references[i]

            curr_forecasted_trajectories = []

            # Predict using each of candidate centerlines
            for (
                    test_nt_i_curr_candidate,
                    test_cl_i_curr_candidate,
                    test_ref_i_curr_candidate,
            ) in zip(test_nt_i, test_cl_i, test_references_i):
                test_input = test_nt_i_curr_candidate[:args.
                                                      obs_len, :].reshape(
                                                          (1,
                                                           2 * args.obs_len),
                                                          order="F")

                # Preprocess and get neighbors
                pipeline_steps = grid_search.best_estimator_.named_steps.keys()
                preprocessed_test_input = np.copy(test_input)
                for step in pipeline_steps:
                    curr_step = grid_search.best_estimator_.named_steps[step]

                    # Get neighbors
                    if step == "regressor":
                        neigh_idx = curr_step.kneighbors(
                            preprocessed_test_input,
                            return_distance=False,
                            n_neighbors=args.n_neigh,
                        )

                    # Preprocess
                    else:
                        if curr_step is not None:
                            print(preprocessed_test_input.shape)
                            preprocessed_test_input = curr_step.transform(
                                preprocessed_test_input)

                y_pred = train_output[neigh_idx][
                    0, :, :horizon, :]  # num_neighbors x curr_pred_len x 2
                test_input = np.repeat(test_input,
                                       repeats=args.n_neigh,
                                       axis=0)

                abs_helpers = {}
                abs_helpers["CENTERLINE"] = [
                    test_cl_i_curr_candidate for i in range(args.n_neigh)
                ]
                if args.use_delta:

                    abs_helpers["REFERENCE"] = np.array([
                        test_ref_i_curr_candidate for i in range(args.n_neigh)
                    ])

                # Convert trajectory to map frame
                abs_input, abs_output = baseline_utils.get_abs_traj(
                    test_input.copy().reshape((-1, args.obs_len, num_features),
                                              order="F"),
                    y_pred.copy(),
                    args,
                    helpers=abs_helpers,
                )
                curr_forecasted_trajectories.extend(abs_output)
            forecasted_trajectories[
                test_seq_ids[i]] = curr_forecasted_trajectories
        with open(f"{save_dir}/{start_idx}.pkl", "wb") as f:
            pkl.dump(forecasted_trajectories, f)