Esempio n. 1
0
def run_test(cfg, model, test_data_loader, distributed, logger):
    if distributed:
        model = model.module
    torch.cuda.empty_cache()
    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()

    test_result = []
    logger.info('START TEST with size: ' + str(len(test_data_loader)))
    for iteration, (fg_imgs, fg_txts, bg_imgs, bg_txts) in enumerate(tqdm(test_data_loader)):
        for fg_img, fg_txt, bg_img, bg_txt in zip(fg_imgs, fg_txts, bg_imgs, bg_txts):
            fg_img['entities'] = fg_img['entities'].to(device)
            fg_img['relations'] = fg_img['relations'].to(device)
            fg_img['graph'] = fg_img['graph'].to(device)
            fg_txt['entities'] = fg_txt['entities'].to(device)
            fg_txt['relations'] = fg_txt['relations'].to(device)
            fg_txt['graph'] = fg_txt['graph'].to(device)
            bg_img['entities'] = bg_img['entities'].to(device)
            bg_img['relations'] = bg_img['relations'].to(device)
            bg_img['graph'] = bg_img['graph'].to(device)
            bg_txt['entities'] = bg_txt['entities'].to(device)
            bg_txt['relations'] = bg_txt['relations'].to(device)
            bg_txt['graph'] = bg_txt['graph'].to(device)

        synchronize()
        test_output = model(fg_imgs, fg_txts, bg_imgs, bg_txts, is_test=True)
        gathered_result = all_gather(to_cpu(test_output).cpu())
        test_result.append(gathered_result)
    return test_result
Esempio n. 2
0
 def finalize_use_feature(self):
     pair_df_per_gpu = self.pair_df
     all_pair_df = all_gather(pair_df_per_gpu)
     if not is_main_process():
         return
     pd.concat(all_pair_df).to_pickle(self.output_folder +
                                      "/all_pair_df.pickle")
Esempio n. 3
0
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return
    # merge the list of dicts
    predictions = {}
    for p in all_predictions:
        predictions.update(p)

    return predictions
Esempio n. 4
0
def run_val(cfg, model, listener, val_data_loaders, distributed, logger):
    if distributed:
        model = model.module
    torch.cuda.empty_cache()
    iou_types = ("bbox", )
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints", )
    if cfg.MODEL.RELATION_ON:
        iou_types = iou_types + ("relations", )
    if cfg.MODEL.ATTRIBUTE_ON:
        iou_types = iou_types + ("attributes", )

    dataset_names = cfg.DATASETS.VAL
    val_result = []
    for dataset_name, val_data_loader in zip(dataset_names, val_data_loaders):

        dataset_result = listener_inference(
            cfg,
            model,
            listener,
            val_data_loader,
            dataset_name=dataset_name,
            iou_types=iou_types,
            box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
            device=cfg.MODEL.DEVICE,
            expected_results=cfg.TEST.EXPECTED_RESULTS,
            expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
            output_folder=None,
            logger=logger,
        )
        synchronize()
        if type(dataset_result) is not tuple:
            dataset_result = (dataset_result, )
        val_result.append(dataset_result)

    organized_result = [[val_result[i][j] for i in range(len(val_result))]
                        for j in range(len(val_result[0]))]
    final_result = []
    for i in range(len(organized_result)):
        # support for multi gpu distributed testing
        gathered_result = all_gather(torch.tensor(organized_result[i]).cpu())
        gathered_result = [t.view(-1) for t in gathered_result]
        gathered_result = torch.cat(gathered_result, dim=-1).view(-1)
        valid_result = gathered_result[gathered_result >= 0]
        val_result = float(valid_result.mean())
        final_result.append(val_result)
        del gathered_result, valid_result
        torch.cuda.empty_cache()
    return tuple(final_result)
Esempio n. 5
0
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return
    # merge the list of dicts
    predictions = {}
    for p in all_predictions:
        predictions.update(p)
    # convert a dict where the key is the index in a list
    image_ids = list(sorted(predictions.keys()))
    if len(image_ids) != image_ids[-1] + 1:
        logger = logging.getLogger("maskrcnn_benchmark.inference")
        logger.warning(
            "Number of images that were gathered from multiple processes is not "
            "a contiguous set. Some images might be missing from the evaluation"
        )

    # convert to a list
    predictions = [predictions[i] for i in image_ids]
    return predictions
Esempio n. 6
0
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu,
                                               return_dict=False,
                                               only_gather=False):
    if _dict_to_list is None:
        return
    if get_world_size() == 1:
        return predictions_per_gpu
    all_predictions = all_gather(predictions_per_gpu)
    if only_gather:
        return all_predictions
    if not is_main_process():
        return
    # merge the list of dicts
    predictions = {}
    for p in all_predictions:
        predictions.update(p)

    if return_dict:
        return predictions

    return _dict_to_list(predictions)
Esempio n. 7
0
def run_val(cfg, model, val_data_loader, distributed, logger):
    if distributed:
        model = model.module
    torch.cuda.empty_cache()
    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()

    val_result = []
    logger.info('START VALIDATION with size: ' + str(len(val_data_loader)))
    for iteration, (fg_imgs, fg_txts, bg_imgs, bg_txts) in enumerate(tqdm(val_data_loader)):
        for fg_img, fg_txt, bg_img, bg_txt in zip(fg_imgs, fg_txts, bg_imgs, bg_txts):
            fg_img['entities'] = fg_img['entities'].to(device)
            fg_img['relations'] = fg_img['relations'].to(device)
            fg_img['graph'] = fg_img['graph'].to(device)
            fg_txt['entities'] = fg_txt['entities'].to(device)
            fg_txt['relations'] = fg_txt['relations'].to(device)
            fg_txt['graph'] = fg_txt['graph'].to(device)
            bg_img['entities'] = bg_img['entities'].to(device)
            bg_img['relations'] = bg_img['relations'].to(device)
            bg_img['graph'] = bg_img['graph'].to(device)
            bg_txt['entities'] = bg_txt['entities'].to(device)
            bg_txt['relations'] = bg_txt['relations'].to(device)
            bg_txt['graph'] = bg_txt['graph'].to(device)

        loss_list = model(fg_imgs, fg_txts, bg_imgs, bg_txts)

        losses = sum(loss_list)
        
        synchronize()
        val_result.append(float(losses))
    # support for multi gpu distributed testing
    gathered_result = all_gather(torch.tensor(val_result).cpu())
    gathered_result = [t.view(-1) for t in gathered_result]
    gathered_result = torch.cat(gathered_result, dim=-1).view(-1)
    valid_result = gathered_result[gathered_result>=0]
    val_result = float(valid_result.mean())
    del gathered_result, valid_result
    torch.cuda.empty_cache()
    return val_result
Esempio n. 8
0
 def finalize_extract_feature(self):
     features_df_per_gpu = self.features_df
     all_features_df = all_gather(features_df_per_gpu)
     if not is_main_process():
         return
     pd.concat(all_features_df).to_pickle(self.df_file)
Esempio n. 9
0
def do_coco_evaluation(
    dataset,
    predictions,
    box_only,
    output_folder,
    iou_types,
    expected_results,
    expected_results_sigma_tol,
):
    logger = logging.getLogger("maskrcnn_benchmark.inference")

    # Different path here, fast parallel method not available, fall back to effectively the old
    # path.
    if box_only:
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
        if not is_main_process():
            return

        logger.info("Evaluating bbox proposals")
        areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
        res = COCOResults("box_proposal")
        for limit in [100, 1000]:
            for area, suffix in areas.items():
                stats = evaluate_box_proposals(predictions,
                                               dataset,
                                               area=area,
                                               limit=limit)
                key = "AR{}@{:d}".format(suffix, limit)
                res.results["box_proposal"][key] = stats["ar"].item()
        logger.info(res)
        check_expected_results(res, expected_results,
                               expected_results_sigma_tol)
        if output_folder:
            torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
        return
    logger.info("Preparing results for COCO format")
    coco_results = {}
    if "bbox" in iou_types:
        logger.info("Preparing bbox results")
        coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
    if "segm" in iou_types:
        logger.info("Preparing segm results")
        coco_results["segm"] = prepare_for_coco_segmentation(
            predictions, dataset)
    if 'keypoints' in iou_types:
        logger.info('Preparing keypoints results')
        coco_results['keypoints'] = prepare_for_coco_keypoint(
            predictions, dataset)

    # Gather all prepared predictions of each type from all ranks
    if "bbox" in iou_types:
        temp_bbox_list = all_gather(coco_results["bbox"])
    if "segm" in iou_types:
        temp_segm_list = all_gather(coco_results["segm"])
    if "keypoints" in iou_types:
        temp_keypoints_list = all_gather(coco_results["keypoints"])

    # Only main process will call COCO
    if not is_main_process():
        return

    # Unpack the gathered results into a single List[Entry]
    if "bbox" in iou_types:
        coco_results["bbox"] = [i for j in temp_bbox_list for i in j]
    if "segm" in iou_types:
        coco_results["segm"] = [i for j in temp_segm_list for i in j]
    if "keypoints" in iou_types:
        coco_results["keypoints"] = [i for j in temp_keypoints_list for i in j]

    results = evaluate_coco(dataset, coco_results, iou_types, output_folder)
    # Submit to async evaluator
    # get_evaluator().submit_task(get_tag(),
    #                            evaluate_coco,
    #                            dataset,
    #                            coco_results,
    #                            iou_types,
    #                            output_folder)
    # Note: None of these are possible now
    # logger.info(results)
    check_expected_results(results, expected_results,
                           expected_results_sigma_tol)
    # if output_folder:
    #     torch.save(results, os.path.join(output_folder, "coco_results.pth"))

    # Note: results is now empty, the relevant future is held in the hidden
    # AsyncEvaluator object
    return results, coco_results