示例#1
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("fcos_core.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))
    print(f"{bcolors.WARNING}{0}{bcolors.ENDC}".format(predictions))
    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    print(f"{bcolors.WARNING}Inference: predictions{bcolors.ENDC}")
    print(f"{bcolors.WARNING}{0}{bcolors.ENDC}".format(predictions))
    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
示例#2
0
def inference(
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    start_iter=0,
    break_iter=0,
    speed_only=False,
    benchmark=False,
    cfg=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("fcos_core.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    if break_iter == 0:
        break_iter = len(dataset)
    if benchmark:
        timers = [Timer() for i in range(11)]
    else:
        timers = None
    total_timer.tic()
    predictions = compute_on_dataset(model,
                                     data_loader,
                                     device,
                                     inference_timer,
                                     start_iter=start_iter,
                                     break_iter=break_iter,
                                     speed_only=speed_only,
                                     benchmark=benchmark,
                                     timers=timers)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str,
            total_time * num_devices / (break_iter - start_iter), num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, fps {}, on {} devices)"
        .format(
            total_infer_time,
            inference_timer.total_time * num_devices /
            (break_iter - start_iter),
            (break_iter - start_iter) /
            (inference_timer.total_time * num_devices),
            num_devices,
        ))
    if benchmark:
        for i in range(len(timers)):
            logger.info("timer {}: {} s)".format(
                i,
                timers[i].total_time * num_devices / (break_iter - start_iter),
            ))
        return

    if speed_only or benchmark:
        return

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    # if output_folder:
    #     torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )
    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)