def evaluate_from_results_file(gt_data_loader, results_from_file):
    coco = get_coco_api_from_dataset(gt_data_loader.dataset)
    iou_types = ["bbox"]  # NOTE(rjbruin): hardcoded to only do bboxes
    coco_evaluator = CocoEvaluator(coco, iou_types)
    coco_evaluator.put_results(results_from_file)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(torch.get_num_threads())
    return coco_evaluator
def perform_eval_inference(model, data_loader, device):
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    results_for_file = {iou_type: [] for iou_type in coco_evaluator.iou_types}
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)
        model_time = time.time() - model_time

        # DEBUG
        if isinstance(targets, dict):
            targets = [targets]
        targets = [{
            k: v.to(cpu_device).detach().numpy()
            for k, v in t.items()
        } for t in targets]
        outputs = [{
            k: v.to(cpu_device).detach().numpy()
            for k, v in o.items()
        } for o in outputs]

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        batch_results_for_file = coco_evaluator.update_inference(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

        for iou_type in coco_evaluator.iou_types:
            results_for_file[iou_type].extend(batch_results_for_file[iou_type])

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    # coco_evaluator.synchronize_between_processes()

    return results_for_file
Пример #3
0
def evaluate_preloaded(gt_from_file, results_from_file):
    # Use dataset object loaded from file instead of from dataset
    coco = COCO()
    coco.dataset = gt_from_file
    coco.createIndex()

    iou_types = ["bbox"]  # NOTE(rjbruin): hardcoded to only do bboxes
    coco_evaluator = CocoEvaluator(coco, iou_types)
    coco_evaluator.put_results(results_from_file)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    # torch.set_num_threads(torch.get_num_threads())
    return coco_evaluator
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator