Пример #1
0
def check_completed_tags():
    # Evaluator is only valid on master rank - all others will have nothing.
    # So, assemble lists of completed runs on master
    if is_main_process():
        evaluator = get_evaluator()

        # loop over all all epoch, result pairs that have finished
        all_results = {}
        for t, r in evaluator.finished_tasks().items():
            # Note: one indirection due to possibility of multiple test datasets
            # we only care about the first
            map_results = r# [0]
            bbox_map = map_results.results["bbox"]['AP']
            segm_map = map_results.results["segm"]['AP']
            all_results.update({ t : (bbox_map, segm_map) })

        return all_results
    
    return {}
def do_coco_evaluation(
    dataset,
    predictions,
    box_only,
    output_folder,
    iou_types,
    expected_results,
    expected_results_sigma_tol,
):
    logger = logging.getLogger("maskrcnn_benchmark.inference")

    # Different path here, fast parallel method not available, fall back to effectively the old
    # path.
    if box_only:
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
        if not is_main_process():
            return

        logger.info("Evaluating bbox proposals")
        areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
        res = COCOResults("box_proposal")
        for limit in [100, 1000]:
            for area, suffix in areas.items():
                stats = evaluate_box_proposals(
                    predictions, dataset, area=area, limit=limit
                )
                key = "AR{}@{:d}".format(suffix, limit)
                res.results["box_proposal"][key] = stats["ar"].item()
        logger.info(res)
        check_expected_results(res, expected_results, expected_results_sigma_tol)
        if output_folder:
            torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
        return
    logger.info("Preparing results for COCO format")
    coco_results = {}
    if "bbox" in iou_types:
        logger.info("Preparing bbox results")
        coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
    if "segm" in iou_types:
        logger.info("Preparing segm results")
        coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
    if 'keypoints' in iou_types:
        logger.info('Preparing keypoints results')
        coco_results['keypoints'] = prepare_for_coco_keypoint(predictions, dataset)

    # Gather all prepared predictions of each type from all ranks
    if "bbox" in iou_types:
        temp_bbox_list = all_gather(coco_results["bbox"])
    if "segm" in iou_types:
        temp_segm_list = all_gather(coco_results["segm"])
    if "keypoints" in iou_types:
        temp_keypoints_list = all_gather(coco_results["keypoints"])

    # Only main process will call COCO
    if not is_main_process():
        return

    # Unpack the gathered results into a single List[Entry]
    if "bbox" in iou_types:
        coco_results["bbox"] = [i for j in temp_bbox_list for i in j]
    if "segm" in iou_types:
        coco_results["segm"] = [i for j in temp_segm_list for i in j]
    if "keypoints" in iou_types:
        coco_results["keypoints"] = [i for j in temp_keypoints_list for i in j]

    # Submit to async evaluator
    get_evaluator().submit_task(get_tag(),
                                evaluate_coco,
                                dataset,
                                coco_results,
                                iou_types,
                                output_folder)
    # Note: None of these are possible now
    # logger.info(results)
    # check_expected_results(results, expected_results, expected_results_sigma_tol)
    # if output_folder:
    #     torch.save(results, os.path.join(output_folder, "coco_results.pth"))

    # Note: results is now empty, the relevant future is held in the hidden
    # AsyncEvaluator object
    return None, coco_results