Esempio n. 1
0
def compute_on_dataset(model, data_loader, device, timer=None, show=False):
    '''
        Get predictions by model inference.
            - output: ['box3d_lidar', 'scores', 'label_preds', 'metadata'];
            - detections: type: dict, length: 3769, keys: image_ids, detections[image_id] = output;
    '''
    model.eval()
    cpu_device = torch.device("cpu")

    results_dict = {}

    for i, batch in enumerate(data_loader):
        if i == 1:
            prog_bar = torchie.ProgressBar(len(data_loader.dataset) - 1)
        example = example_to_device(batch, device=device)
        with torch.no_grad():
            outputs = model(example, return_loss=False,
                            rescale=not show)  # list_length=batch_size: 8
            for output in outputs:  # output.keys(): ['box3d_lidar', 'scores', 'label_preds', 'metadata']
                token = output["metadata"][
                    "token"]  # token should be the image_id
                for k, v in output.items():
                    if k not in [
                            "metadata",
                    ]:
                        output[k] = v.to(cpu_device)
                results_dict.update({
                    token: output,
                })
                if i >= 1:
                    prog_bar.update()

    return results_dict
Esempio n. 2
0
def compute_on_dataset(model, data_loader, device, timer=None, show=False):
    model.eval()
    results_dict = []
    cpu_device = torch.device("cpu")

    results_dict = {}
    prog_bar = torchie.ProgressBar(len(data_loader.dataset))
    for i, batch in enumerate(data_loader):
        # example = example_convert_to_torch(batch, device=device)
        example = example_to_device(batch, device=device)
        with torch.no_grad():
            outputs = model(example, return_loss=False, rescale=not show)
            for output in outputs:
                token = output["metadata"]["token"]
                for k, v in output.items():
                    if k not in [
                        "metadata",
                    ]:
                        output[k] = v.to(cpu_device)
                results_dict.update(
                    {token: output,}
                )
                prog_bar.update()

    return results_dict
Esempio n. 3
0
def visualize(model, data_loader, device, timer=None, show=False):
    model.eval()

    for i, batch in enumerate(data_loader):
        example = example_to_device(batch, device=device)
        with torch.no_grad():

            ax1, ax2 = init_plot()

            draw_pointcloud(ax1, example)

            plot_image(ax2, example)

            draw_annotations(ax1, example, len(batch['metadata']))

            outputs = model(example, return_loss=False, rescale=not show)

            draw_predictions(ax1, outputs)

            plt.show(block=True)

    return results_dict
Esempio n. 4
0
def test_v2(dataloader,
            model,
            device="cuda",
            distributed=False,
            eval_id=None,
            vis_id=None):
    '''
       example:
           python test_v2.py --eval_id 6 8 --vis_id 6
    '''
    # prepare model
    if distributed:
        model = model.module
    model.eval()

    # prepare samples
    kitti_dataset = dataloader.dataset  # det3d.datasets.kitti.kitti.KittiDataset
    samples = []
    valid_ids = get_dataset_ids('val')
    for id in eval_id:
        index = valid_ids.index(id)
        samples.append(kitti_dataset[index])
    batch_samples = collate_kitti(samples)
    example = example_to_device(batch_samples, device=torch.device(device))

    # evaluation
    results_dict = {}
    with torch.no_grad():
        # outputs: predicted results in lidar coord.
        outputs = model(example, return_loss=False, rescale=True)
        for output in outputs:
            token = output["metadata"]["token"]
            for k, v in output.items():
                if k not in [
                        "metadata",
                ]:
                    output[k] = v.to(torch.device("cpu"))
            results_dict.update({
                token: output,
            })

        # pred_annos: convert predictions in lidar to cam coord.
        res_dir = os.path.join("./", "sample_eval_results")
        os.makedirs(res_dir, exist_ok=True)
        pred_annos = kitti_dataset.convert_detection_to_kitti_annos(
            results_dict, partial=True)

        # save predicted results to txt files.
        for dt in pred_annos:
            with open(
                    os.path.join(res_dir,
                                 "%06d.txt" % int(dt["metadata"]["token"])),
                    "w") as fout:
                lines = kitti.annos_to_kitti_label(dt)
                for line in lines:
                    fout.write(line + "\n")

    # visualization part
    if vis_id is not None:
        assert vis_id in eval_id
        from det3d.visualization.kitti_data_vis.kitti.kitti_object import show_lidar_with_boxes_rect
        import numpy as np

        index = eval_id.index(vis_id)
        pred_box_loc = pred_annos[index]['location']
        pred_box_dim = pred_annos[index]['dimensions']
        pred_box_ry = pred_annos[index]['rotation_y'].reshape(-1, 1)
        pred_boxes = np.concatenate(
            (pred_box_loc, pred_box_dim[:, [1, 2, 0]], pred_box_ry), axis=1)
        pred_scores = pred_annos[index]['score']

        index = valid_ids.index(vis_id)
        show_lidar_with_boxes_rect(
            sample_id=vis_id,
            pred_boxes3d=pred_boxes,
            pred_scores=pred_scores,
        )