예제 #1
0
파일: test.py 프로젝트: zzdxlee/Det3D
def test(
    dataloader,
    model,
    save_dir="",
    device="cuda",
    distributed=False,
):

    if distributed:
        model = model.module

    dataset = dataloader.dataset

    device = torch.device(device)
    num_devices = get_world_size()

    detections = compute_on_dataset(model, dataloader, device)
    synchronize()

    predictions = _accumulate_predictions_from_multiple_gpus(detections)

    if not is_main_process():
        return

    return dataset.evaluation(predictions, str(save_dir))
예제 #2
0
def test(
    dataloader, model, save_dir="", device="cuda", distributed=False,
):

    if distributed:
        model = model.module

    dataset = dataloader.dataset

    device = torch.device(device)
    num_devices = get_world_size()

    # detections = compute_on_dataset(model, dataloader, device)
    # torch.save(detections, "/home/jty/pcdet/final_predictions_debug.pkl")
    detections=torch.load( "/home/jty/pcdet/final_predictions_debug.pkl")
    import pickle
    # with open(
    #         '/home/jty/pcdet/pvrcnn/OpenLidarPerceptron/output/cfgs/kitti_models/pvrcnn_deecamp/default/eval/epoch_24/val/default/result.pkl',
    #         'rb') as input_file:
    #     dt = pickle.load(input_file)

    with open(
            '/home/jty/Downloads/result.pkl',
            'rb') as input_file:
        dt = pickle.load(input_file)
    synchronize()

    # predictions = _accumulate_predictions_from_multiple_gpus(dt)
    # predictions={}
    if not is_main_process():
        return

    return dataset.evaluation(dt, str(save_dir))
예제 #3
0
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return

    predictions = {}
    for p in all_predictions:
        predictions.update(p)

    return predictions