Ejemplo n.º 1
0
def converter_csv_to_argo(input_path: str, output_path: str):
    afl = ArgoverseForecastingLoader(input_path)
    output_all = {}
    counter = 1
    for data in afl:
        print('\r' + str(counter) + '/' + str(len(afl)), end="")
        seq_id = int(data.current_seq.name[:-4])
        output_all[seq_id] = np.expand_dims(data.agent_traj[20:, :], 0)
        counter += 1
    generate_forecasting_h5(output_all, output_path)  # this might take awhile
Ejemplo n.º 2
0
    def run(self, global_step, global_epoch, example, results, write=False):
        """
        Helper function to generate the result h5 file for argoverse forecasting challenge

        Args:
            data: a dictionary of trajectory, with the key being the sequence ID. For each sequence, the
                  trajectory should be stored in a (9,30,2) np.ndarray
            output_path: path to the output directory to store the output h5 file
            filename: to be used as the name of the file
            probabilities (optional) : normalized probability for each trajectory

        Returns:

        """
        batch_features = example
        preds = np.concatenate(
            [x[0:1].detach().cpu().numpy() for x in results["reg"]], axis=0)
        cls = np.concatenate(
            [x[0:1].detach().cpu().numpy() for x in results["cls"]], axis=0)

        for i, file_id in enumerate(batch_features['file_id']):
            seq_id = int(file_id.stem)
            pred_trajs = preds[i]
            agent_cls = cls[i]
            _, idx = np.unique(pred_trajs, axis=0, return_index=True)
            pred_trajs = pred_trajs[np.sort(idx)][:6]
            agent_cls = agent_cls[np.sort(idx)][:6]
            self._traj_all[seq_id] = pred_trajs
            self._cls_all[seq_id] = agent_cls

        if write:  # each process write a pickle
            output_pkl_path = Path(
                self._output_dir) / f'res_{self.args.local_rank}.pkl'
            with output_pkl_path.open('wb') as f:
                pickle.dump({'traj': self._traj_all, 'cls': self._cls_all}, f)
            dist.barrier(
            )  # make sure that ever process has finished file writing
            if self.args.main_process:
                trajs = {}
                cls = {}
                for i in range(self.args.world_size):
                    source_pkl_path = Path(self._output_dir) / f'res_{i}.pkl'
                    with source_pkl_path.open('rb') as f2:
                        res = pickle.load(f2)
                        trajs.update(res['traj'])
                        cls.update(res['cls'])
                generate_forecasting_h5(trajs,
                                        self._output_dir,
                                        filename='res_mgpu',
                                        probabilities=cls)
                print(f'------dump finish, num of samples:{len(cls)}--------')
Ejemplo n.º 3
0
        if cfg.add_mlp_agent > 0:
            agents = data["normal_agents_history"].to(device)
            agents_availabilty = data["agents_availabilty"].to(device)
            entery = [[
                *model_args, history, agents, agents_availabilty, {}, True
            ], history]
        else:
            entery = [[*model_args, history, None, None, {}, True], history]

        output, conf = model(entery)

        output = output.reshape(
            (data["history_positions"].shape[0], 1, 30, 2)).cpu()
        for i in range(output.shape[0]):
            saved = []
            for j in range(output.shape[1]):
                rot_output = transform_points(
                    output[i][j], yaw_as_rotation33(ego_yaw[i])) + centroids[i]
                saved.append(rot_output)
            forecasted_trajectories[seq_id[i]] = saved

print("here")
output_path = "/work/vita/ayromlou/argo_code/new_multi_modal_code_sadegh/results1"

if os.path.exists(output_path):
    os.rmdir(output_path)
os.mkdir(output_path)

generate_forecasting_h5(forecasted_trajectories,
                        output_path)  #this might take awhile
Ejemplo n.º 4
0
            for i in range(out.size(0)):
                seq_id = int(norm_centers_ls[seq_index][0])
                norm_center = norm_centers_ls[seq_index][1]

                pred_y = out[i].view((-1, 2)).cumsum(axis=0).cpu().numpy()
                pred_y += norm_center.reshape(-1, 2)
                pred_y = np.array([pred_y])
                assert (pred_y.shape == (1, 30, 2))
                # y = gt[i].view((-1, 2)).cumsum(axis=0).cpu().numpy()
                forecasted_trajectories[seq_id] = pred_y
                seq_index += 1

    from argoverse.evaluation.competition_util import generate_forecasting_h5
    generate_forecasting_h5(forecasted_trajectories,
                            output_path,
                            filename=FILENAME)

#%%
#this might take awhile
# metric_results = get_displacement_errors_and_miss_rate(
#     forecasted_trajectories, gt_trajectories, max_n_guesses, horizon, miss_threshold
# )
# return metric_results

# overfit the small dataset
# model.train()
# for epoch in range(epochs):
#     print(f"start training at epoch:{epoch}")
#     acc_loss = .0
#     num_samples = 1
Ejemplo n.º 5
0
 def save_trajectory(self,output_dict,save_path):
     generate_forecasting_h5(output_dict, save_path)
     print("done")
Ejemplo n.º 6
0
def main():
    # Import all settings for experiment.
    args = parser.parse_args()
    model = import_module(args.model)
    config, _, collate_fn, net, loss, post_process, opt = model.get_model()

    # load pretrain model
    ckpt_path = args.weight
    if not os.path.isabs(ckpt_path):
        ckpt_path = os.path.join(config["save_dir"], ckpt_path)
    ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
    load_pretrain(net, ckpt["state_dict"])
    net.eval()

    # Data loader for evaluation
    dataset = ArgoTestDataset(args.split, config, train=False)
    data_loader = DataLoader(
        dataset,
        batch_size=config["val_batch_size"],
        num_workers=config["val_workers"],
        collate_fn=collate_fn,
        shuffle=True,
        pin_memory=True,
    )

    # begin inference
    preds = {}
    gts = {}
    cities = {}
    for ii, data in tqdm(enumerate(data_loader)):
        data = dict(data)
        with torch.no_grad():
            output = net(data)
            results = [x[0:1].detach().cpu().numpy() for x in output["reg"]]
        for i, (argo_idx, pred_traj) in enumerate(zip(data["argo_id"],
                                                      results)):
            preds[argo_idx] = pred_traj.squeeze()
            cities[argo_idx] = data["city"][i]
            gts[argo_idx] = data["gt_preds"][i][
                0] if "gt_preds" in data else None

    # save for further visualization
    res = dict(
        preds=preds,
        gts=gts,
        cities=cities,
    )
    # torch.save(res,f"{config['save_dir']}/results.pkl")

    # evaluate or submit
    if args.split == "val":
        # for val set: compute metric
        from argoverse.evaluation.eval_forecasting import (
            compute_forecasting_metrics, )
        # Max #guesses (K): 6
        _ = compute_forecasting_metrics(preds, gts, cities, 6, 30, 20)
        # Max #guesses (K): 1
        _ = compute_forecasting_metrics(preds, gts, cities, 1, 30, 20)
    else:
        # for test set: save as h5 for submission in evaluation server
        from argoverse.evaluation.competition_util import generate_forecasting_h5
        generate_forecasting_h5(
            preds, f"{config['save_dir']}/submit.h5")  # this might take awhile
    import ipdb
    ipdb.set_trace()