コード例 #1
0
def run_test(
        model,
        im_dir=None,
        dataset_name=None,
        test_detection=None,
        word_embeddings=None,
        test_image_id_list=None,
        prior_mask=None,
        action_dic_inv=None,
        output_file=None,
        output_dict_file=None,
        object_thres=0.4,
        human_thres=0.6,
        prior_flag=1,
        device=torch.device("cuda"),
        cfg=None,
):

    logger = logging.getLogger("DRG.inference")
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(test_image_id_list)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    np.random.seed(cfg.TEST.RNG_SEED)
    detection = []
    detect_app_dict = {}
    x = 0

    for count, image_id in enumerate(tqdm(test_image_id_list)):
        detect_app_dict[image_id] = []

        # print("im_dir")
        # print(im_dir)
        # print("image_id")
        # print(image_id)
        # print("test_detection")
        # print(test_detection)
        # print("word_embeddings")
        # print(word_embeddings)
        # print("prior_mask")
        # print(prior_mask)
        # print("action_dic_inv")
        # print(action_dic_inv)
        # print("object_thres")
        # print(object_thres)
        # print("human_thres")
        # print(human_thres)
        # print("prior_flag")
        # print(prior_flag)

        x = image_id
        im_detect(model, im_dir, image_id, test_detection, word_embeddings,
                  prior_mask, action_dic_inv, object_thres, human_thres,
                  prior_flag, detection, detect_app_dict, device, cfg)
        break
コード例 #2
0
    def __init__(self, proposal_matcher, box_coder,
                 generate_labels_func,
                 sigmoid_focal_loss,
                 mask_activation,
                 num_prototypes,
                 mask_to_train,
                 bbox_reg_beta=0.11,
                 regress_norm=1.0,
                 mask_norm=1.0,
                 mask_with_logits=False):
        super(YolactLossComputation, self).__init__(
                proposal_matcher, box_coder,
                generate_labels_func,
                sigmoid_focal_loss,
                bbox_reg_beta,
                regress_norm
        )
        if mask_with_logits:
            self.mask_activation = None
        else:
            self.mask_activation = mask_activation
        self.mask_with_logits = mask_with_logits
        self.num_prototypes = num_prototypes
        # don't copy masks because it is slow
        # self.copied_fields = ['labels', 'masks']
        self.mask_norm = mask_norm
        self.mask_to_train = mask_to_train

        if DEBUG:
            self.timer = Timer()
コード例 #3
0
ファイル: test_net_HICO_app.py プロジェクト: vt-vl-lab/DRG
def run_test(model,
             dataset_name=None,
             test_detection=None,
             word_embeddings=None,
             output_file=None,
             output_file_human=None,
             output_file_object=None,
             object_thres=0.4,
             human_thres=0.6,
             device=None,
             cfg=None,
             opt=None):
    logger = logging.getLogger("DRG.inference")
    logger.info("Start evaluation on {} dataset.".format(dataset_name))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    DATA_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'Data'))

    image_list = glob.glob(
        os.path.join(DATA_DIR, 'hico_20160224_det', 'images', 'test2015',
                     '*.jpg'))
    np.random.seed(cfg.TEST.RNG_SEED)
    detection = {}
    detection_human = {}
    detection_object = {}

    for idx, line in enumerate(tqdm(image_list)):

        image_id = int(line[-9:-4])

        if image_id in test_detection:
            im_detect(model, image_id, test_detection, word_embeddings,
                      object_thres, human_thres, detection, detection_human,
                      detection_object, device, opt)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)

    num_devices = 1
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(image_list),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(image_list),
            num_devices,
        ))

    pickle.dump(detection, open(output_file, "wb"))
    pickle.dump(detection_human, open(output_file_human, "wb"))
    pickle.dump(detection_object, open(output_file_object, "wb"))
コード例 #4
0
ファイル: inference_ens.py プロジェクト: SYangDong/tse-t
def inference(
        model,
        data_loader,
        postprocessor,
        semi_loss,
        dataset_name,
        iou_types=("bbox",),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
        anchor_strides=None
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader,postprocessor,semi_loss,anchor_strides, device, inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset), num_devices
        )
    )
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        )
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
コード例 #5
0
def run_test(
        model,
        dataset_name=None,
        im_dir=None,
        test_detection=None,
        word_embeddings=None,
        test_image_id_list=None,
        prior_mask=None,
        action_dic_inv=None,
        output_file=None,
        output_dict_file=None,
        object_thres=0.4,
        human_thres=0.6,
        prior_flag=1,
        device=torch.device("cuda"),
        cfg=None,
):

    logger = logging.getLogger("DRG.inference")
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(test_image_id_list)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    np.random.seed(cfg.TEST.RNG_SEED)
    detection = []
    detect_human_centric_dict = {}

    for count, image_id in enumerate(tqdm(test_image_id_list)):
        detect_human_centric_dict[image_id] = []
        im_detect(model, im_dir, image_id, test_detection, word_embeddings,
                  prior_mask, action_dic_inv, object_thres, human_thres,
                  prior_flag, detection, detect_human_centric_dict, device,
                  cfg)

    pickle.dump(detect_human_centric_dict, open(output_dict_file, "wb"))
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)

    num_devices = 1
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(test_image_id_list),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(test_image_id_list),
            num_devices,
        ))

    pickle.dump(detection, open(output_file, "wb"))
コード例 #6
0
def inference(cfg,
              model,
              data_loader,
              dataset_name,
              iou_types=("bbox", ),
              box_only=False,
              device="cuda",
              draw_preds=False,
              is_target_task=False,
              icwt_21_objs=False,
              compute_average_recall_RPN=False,
              is_train=True,
              result_dir=None,
              evaluate_segmentation=True,
              eval_segm_with_gt_bboxes=False):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    res = compute_predictions(
        cfg,
        dataset,
        model,
        build_transform(cfg),
        icwt_21_objs,
        compute_average_recall_RPN=not is_train,
        is_train=is_train,
        result_dir=result_dir,
        evaluate_segmentation=evaluate_segmentation,
        eval_segm_with_gt_bboxes=eval_segm_with_gt_bboxes)

    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))

    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    return total_time
コード例 #7
0
def inference(
        model,
        data_loader,
        dataset_name,
        device="cuda"
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device, inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset), num_devices
        )
    )
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        )
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    results =  do_voc_evaluation(dataset=dataset,
                                 predictions=predictions,
                                 logger=logger)
    return results
コード例 #8
0
def run_test(cfg, model, distributed):
    if distributed:
        model = model.module
    torch.cuda.empty_cache()  # TODO check if it helps
    iou_types = ("bbox", )
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints", )
    # output_folders = [None] * len(cfg.DATASETS.TEST)
    # dataset_names = cfg.DATASETS.TEST
    dataset_names = cfg.DATASETS.NAS_VAL if not cfg.NAS.TRAIN_SINGLE_MODEL else cfg.DATASETS.TEST
    output_folders = [None] * len(dataset_names)

    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            mkdir(output_folder)
            output_folders[idx] = output_folder
    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        is_distributed=distributed)

    if cfg.NAS.TRAIN_SINGLE_MODEL:
        if get_rank() == 0:
            print('==' * 20, 'Evaluating single model...', '==' * 20)
        for output_folder, dataset_name, data_loader_val in zip(
                output_folders, dataset_names, data_loaders_val):
            inference(
                model,
                data_loader_val,
                dataset_name=dataset_name,
                iou_types=iou_types,
                box_only=False
                if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
                bbox_aug=cfg.TEST.BBOX_AUG.ENABLED,
                device=cfg.MODEL.DEVICE,
                expected_results=cfg.TEST.EXPECTED_RESULTS,
                expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
                output_folder=output_folder,
                c2d_json_path=cfg.MODEL.SEG_BRANCH.JSON_PATH,
                cfg=cfg,
            )
            synchronize()
        if get_rank() == 0:
            if 'coco' in cfg.DATASETS.NAME.lower():
                print('Evaluating panoptic results on COCO...')
                os.system(
                    'sh panoptic_scripts/bash_coco_val_evaluate.sh {} | tee pq_results'
                    .format(cfg.OUTPUT_DIR))
    elif not cfg.NAS.SKIP_NAS_TEST:
        if get_rank() == 0:
            print('==' * 10, 'Start NAS testing', '==' * 10)
        timer = Timer()
        timer.tic()
        searcher = PathPrioritySearch(cfg, base_dir='./nas_test')
        searcher.generate_fair_test(
        )  # load cache results and generate new model for test
        searcher.search(model, output_folders, dataset_names, distributed)
        searcher.save_topk()
        total_time = timer.toc()
        total_time_str = get_time_str(total_time)
        if get_rank() == 0:
            print('Finish NAS testing, total time:{}'.format(total_time_str))
        os._exit(0)
    else:
        print('Skipping NAS testing...')
コード例 #9
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default=
        "/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--ckpt",
        help=
        "The path to the checkpoint for test, default is the latest checkpoint.",
        default=None,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    parser.add_argument(
        "--build-model",
        default="",
        metavar="FILE",
        help="path to NAS model build file",
        type=str,
    )

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    save_dir = ""
    logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(cfg)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    assert len(args.build_model) != 0, 'args.build_model should be provided'
    model_config = json.load(open(args.build_model, 'r'))
    if isinstance(model_config, list):
        assert len(model_config) == 1
        model_config = model_config[0]
    print('Testing single model:', model_config)

    model = build_detection_model(cfg, model_config)
    model.to(cfg.MODEL.DEVICE)

    # Initialize mixed-precision if necessary
    use_mixed_precision = cfg.DTYPE == 'float16'
    amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)

    output_dir = cfg.OUTPUT_DIR
    checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
    ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt
    _ = checkpointer.load(ckpt, use_latest=args.ckpt is None)

    iou_types = ("bbox", )
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints", )
    # output_folders = [None] * len(cfg.DATASETS.TEST)
    # dataset_names = cfg.DATASETS.TEST
    dataset_names = cfg.DATASETS.NAS_VAL if not cfg.NAS.TRAIN_SINGLE_MODEL else cfg.DATASETS.TEST
    output_folders = [None] * len(dataset_names)

    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            mkdir(output_folder)
            output_folders[idx] = output_folder
    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        is_distributed=distributed,
                                        test_only=cfg.TEST_ONLY)

    if cfg.NAS.TRAIN_SINGLE_MODEL:
        if get_rank() == 0:
            print('==' * 20, 'Evaluating single model...', '==' * 20)
        for output_folder, dataset_name, data_loader_val in zip(
                output_folders, dataset_names, data_loaders_val):
            inference(
                model,
                data_loader_val,
                dataset_name=dataset_name,
                iou_types=iou_types,
                box_only=False
                if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
                bbox_aug=cfg.TEST.BBOX_AUG.ENABLED,
                device=cfg.MODEL.DEVICE,
                expected_results=cfg.TEST.EXPECTED_RESULTS,
                expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
                output_folder=output_folder,
                c2d_json_path=cfg.MODEL.SEG_BRANCH.JSON_PATH,
                cfg=cfg,
                test_only=cfg.TEST_ONLY)
            synchronize()
    elif not cfg.NAS.SKIP_NAS_TEST:
        if get_rank() == 0:
            print('==' * 10, 'Start NAS testing', '==' * 10)
        timer = Timer()
        timer.tic()
        searcher = PathPrioritySearch(cfg, './nas_test')
        searcher.generate_fair_test(
        )  # load cache results and generate new model for test
        searcher.search(model, output_folders, dataset_names, distributed)
        searcher.save_topk()
        total_time = timer.toc()
        total_time_str = get_time_str(total_time)
        if get_rank() == 0:
            print('Finish NAS testing, total time:{}'.format(total_time_str))
        return
    else:
        print('Skipping NAS testing...')
コード例 #10
0
def run_test(model,
             dataset_name=None,
             im_dir=None,
             test_detection=None,
             word_embeddings=None,
             test_image_id_list=None,
             prior_mask=None,
             action_dic_inv=None,
             output_file=None,
             output_dict_file=None,
             object_thres=0.4,
             human_thres=0.6,
             prior_flag=1,
             device=torch.device("cuda"),
             cfg=None):

    logger = logging.getLogger("DRG.inference")
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(test_image_id_list)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    np.random.seed(cfg.TEST.RNG_SEED)
    detection = []
    detect_object_centric_dict = {}

    for count, image_id in enumerate(tqdm(test_image_id_list)):
        detect_object_centric_dict[image_id] = []
        im_detect(model, im_dir, image_id, test_detection, word_embeddings,
                  prior_mask, action_dic_inv, object_thres, human_thres,
                  prior_flag, detection, detect_object_centric_dict, device,
                  cfg)

    pickle.dump(detect_object_centric_dict, open(output_dict_file, "wb"))

    for image_id, detected_lists in detect_object_centric_dict.items():
        visited_human_list = []
        for detected_human in detected_lists:
            exist_human = 0
            for visites_human in visited_human_list:
                if bbox_iou(visites_human,
                            detected_human['person_box']) > 0.98:
                    exist_human = 1
                    break
            if exist_human == 1:
                continue
            visited_human_list.append(detected_human['person_box'])
            dic = {}
            dic['image_id'] = image_id
            dic['person_box'] = detected_human['person_box']
            prediction_H = detected_human['prediction_H']
            person_score = detected_human['person_score']
            Score_obj_list = []
            for detected_object in detected_lists:
                if bbox_iou(detected_object['person_box'],
                            detected_human['person_box']) > 0.98:
                    Score_obj_list.append(detected_object['Score_obj'])
                    # prediction_H.append(detected_object['prediction_H'])

            Score_obj = np.asarray(Score_obj_list)
            max_idx = np.argmax(Score_obj, 0)[4:]

            # agent mAP
            for i in range(29):
                #'''
                # walk, smile, run, stand
                if (i == 3) or (i == 17) or (i == 22) or (i == 27):
                    agent_name = action_dic_inv[i] + '_agent'
                    dic[agent_name] = person_score * prediction_H[i]
                    continue

                # cut
                if i == 2:
                    agent_name = 'cut_agent'
                    dic[agent_name] = person_score * max(
                        Score_obj[max_idx[2]][4 + 2],
                        Score_obj[max_idx[4]][4 + 4])
                    continue
                if i == 4:
                    continue

                # eat
                if i == 9:
                    agent_name = 'eat_agent'
                    dic[agent_name] = person_score * max(
                        Score_obj[max_idx[9]][4 + 9],
                        Score_obj[max_idx[16]][4 + 16])
                    continue
                if i == 16:
                    continue

                # hit
                if i == 19:
                    agent_name = 'hit_agent'
                    dic[agent_name] = person_score * max(
                        Score_obj[max_idx[19]][4 + 19],
                        Score_obj[max_idx[20]][4 + 20])
                    continue
                if i == 20:
                    continue

                # These 2 classes need to save manually because there is '_' in action name
                if i == 6:
                    agent_name = 'talk_on_phone_agent'
                    dic[agent_name] = person_score * Score_obj[max_idx[i]][4 +
                                                                           i]
                    continue

                if i == 8:
                    agent_name = 'work_on_computer_agent'
                    dic[agent_name] = person_score * Score_obj[max_idx[i]][4 +
                                                                           i]
                    continue

                # all the rest
                agent_name = action_dic_inv[i].split("_")[0] + '_agent'
                dic[agent_name] = person_score * Score_obj[max_idx[i]][4 + i]

            # role mAP
            for i in range(29):
                # walk, smile, run, stand. Won't contribute to role mAP
                if (i == 3) or (i == 17) or (i == 22) or (i == 27):
                    dic[action_dic_inv[i]] = np.append(
                        np.full(4, np.nan).reshape(1, 4),
                        person_score * prediction_H[i])
                    continue

                # Impossible to perform this action
                if person_score * Score_obj[max_idx[i]][4 + i] == 0:
                    dic[action_dic_inv[i]] = np.append(
                        np.full(4, np.nan).reshape(1, 4),
                        person_score * Score_obj[max_idx[i]][4 + i])

                # Action with >0 score
                else:
                    dic[action_dic_inv[i]] = np.append(
                        Score_obj[max_idx[i]][:4],
                        person_score * Score_obj[max_idx[i]][4 + i])

            detection.append(dic)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)

    num_devices = 1
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(test_image_id_list),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(test_image_id_list),
            num_devices,
        ))

    pickle.dump(detection, open(output_file, "wb"))
コード例 #11
0
def inference(
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device=torch.device("cuda"),
    expected_results=0,
    expected_results_sigma_tol=0,
    output_folder=None,
    cfg=None,
    bbox_aug=False,
    visualize_results=False,
    visualization_label="coco",
    only_visualization=False,
):
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))

    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    roi_predictions, img_predictions, attention_maps = compute_on_dataset(
        model, data_loader, device, bbox_aug=bbox_aug, timer=inference_timer)

    # wait for all processes to complete before measuring the time
    synchronize()

    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    if roi_predictions:
        roi_predictions = _accumulate_predictions_from_multiple_gpus(
            roi_predictions)
    if img_predictions:
        img_predictions = _accumulate_predictions_from_multiple_gpus(
            img_predictions)
    if attention_maps:
        attention_maps = _accumulate_predictions_from_multiple_gpus(
            attention_maps)

    if not is_main_process():
        return

    if roi_predictions and len(roi_predictions) > 0:
        for prediction in roi_predictions:
            if prediction.has_field("pred_scores"):
                prediction.add_field('second_scores',
                                     prediction.get_field('pred_scores'))
                del prediction.extra_fields["pred_scores"]
            if prediction.has_field("pred_labels"):
                prediction.add_field('second_labels',
                                     prediction.get_field('pred_labels'))
                del prediction.extra_fields["pred_labels"]

        if output_folder:
            torch.save(roi_predictions,
                       os.path.join(output_folder, "roi_predictions.pth"))

        print('Visualize results')
        if output_folder and visualize_results:
            categories = import_file(
                "maskrcnn_benchmark.data.datasets.categories.{}_categories".
                format(visualization_label),
                os.path.join(
                    os.path.dirname(os.path.dirname(cfg.PATHS_CATALOG)),
                    'data', 'categories',
                    '{}_categories.py'.format(visualization_label)), True)
            visualizer = Visualizer(categories=categories.CATEGORIES, cfg=cfg)
            visualizer.visualize_attentions(
                attention_maps, dataset,
                os.path.join(output_folder, 'attention_map'))
            visualizer.visualize_predictions(
                roi_predictions, dataset,
                os.path.join(output_folder, 'visualization'))
            if only_visualization:
                return

        extra_args = dict(
            box_only=box_only,
            iou_types=iou_types,
            expected_results=expected_results,
            expected_results_sigma_tol=expected_results_sigma_tol,
        )

        print('ROI: Evaluate')
        evaluate_roi(dataset=dataset,
                     predictions=roi_predictions,
                     output_folder=output_folder,
                     **extra_args)

    if img_predictions and len(img_predictions) > 0:
        if output_folder:
            torch.save(img_predictions,
                       os.path.join(output_folder, "img_predictions.pth"))
        print('IMAGE: Evaluate')
        evaluate_img(dataset=dataset,
                     predictions=img_predictions,
                     output_folder=output_folder)