Example #1
0
def inference(
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    skip_eval=False,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = (torch.distributed.get_world_size()
                   if torch.distributed.is_initialized() else 1)
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    start_time = time.time()
    predictions = compute_on_dataset(model, data_loader, device)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=total_time))
    logger.info(
        "Total inference time: {} ({} s / img per device, on {} devices)".
        format(total_time_str, total_time * num_devices / len(dataset),
               num_devices))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    if skip_eval:
        logger.info("Skipping evaluation. Stored predictions to {}".format(
            os.path.join(output_folder, "predictions.pth")))
        return

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #2
0
def main():
    config_file = './semi_test/retinanet_R-50-FPN_1x_semi.yaml'
    output_folder = './output_folder'
    str_pth = "./model_path"

    cfg.merge_from_file(config_file)

    data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=False)
    dataset = data_loaders_val[0].dataset

    range_start = 2
    range_end = 3

    predictions = torch.load(str_pth)[range_start:range_end]
    dataset_imgs = {}
    id_to_img_map = {}
    
    for _prediction in predictions:
        ind_sort = _prediction.get_field('scores').argsort(descending=True)[:12]
        _prediction.bbox = _prediction.bbox[ind_sort]
        _prediction.extra_fields['scores'] = _prediction.extra_fields['scores'][ind_sort]
        _prediction.extra_fields['labels'] = _prediction.extra_fields['labels'][ind_sort]
        _prediction = _prediction.resize([640,478])



     
    iCount = 0
    for i in range(range_start,range_end):
        _id = dataset.id_to_img_map[i]
        id_to_img_map[iCount] = _id
        dataset_imgs[_id] = dataset.coco.imgs[_id]
        iCount += 1

    dataset.coco.imgs = dataset_imgs
    dataset.id_to_img_map = id_to_img_map

    extra_args = dict(
        box_only=False,
        iou_types=('bbox',),
        expected_results=[],
        expected_results_sigma_tol=4
        )

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)
    r=evaluate(dataset=dataset,
                        predictions=predictions,
                        output_folder=output_folder,
                        **extra_args)
    check_expected_results(r, [], 4)
  
    print(r[0])
Example #3
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = (torch.distributed.get_world_size()
                   if torch.distributed.is_initialized() else 1)
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    start_time = time.time()
    predictions = compute_on_dataset(model, data_loader, device)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=total_time))
    logger.info(
        "Total inference time: {} ({} s / img per device, on {} devices)".
        format(total_time_str, total_time * num_devices / len(dataset),
               num_devices))

    # We have an optimised path for COCO which takes advantage of more parallelism.
    # If not using COCO, fall back to regular path: gather predictions from all ranks
    # and call evaluate on those results.
    if not isinstance(dataset, datasets.COCODataset):
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
        if not is_main_process():
            return

    if output_folder and is_main_process():
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox",),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = (
        torch.distributed.deprecated.get_world_size()
        if torch.distributed.deprecated.is_initialized()
        else 1
    )
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
    start_time = time.time()
    predictions = compute_on_dataset(model, data_loader, device)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=total_time))
    logger.info(
        "Total inference time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset), num_devices
        )
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
def eval(
        predictions,
        data_loader,
        iou_types=("bbox", ),
        box_only=False,
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    dataset = data_loader.dataset

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #6
0
def test_while_train(cfg, model, distributed, logger, curr_iter, val_tags,
                     data_loader, output_folder):
    torch.cuda.empty_cache()
    logger.info("start testing while training...")

    # only the first one for test

    model.eval()
    results_dict = {}
    device = torch.device('cuda')
    cpu_device = torch.device("cpu")
    meters = MetricLogger(delimiter="  ", )

    for bid, (images, targets, image_ids, phrase_ids, sent_ids, sentence,
              precompute_bbox, precompute_score, feature_map, vocab_label_elmo,
              sent_sg, topN_box) in enumerate(tqdm(data_loader)):

        # if bid>3:
        #     break
        vocab_label_elmo = [vocab.to(device) for vocab in vocab_label_elmo]
        features_list = [feat.to(device) for feat in feature_map]

        with torch.no_grad():

            loss_dict, results = model(images, features_list, targets,
                                       phrase_ids, sentence, precompute_bbox,
                                       precompute_score, image_ids,
                                       vocab_label_elmo, sent_sg, topN_box)

            loss_dict_reduced = reduce_loss_dict(loss_dict)
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())

            meters.update(loss=losses_reduced, **loss_dict_reduced)
            # collect and move result to cpu memory
            moved_res = []

            if cfg.MODEL.VG.TWO_STAGE:

                if cfg.MODEL.RELATION_ON and cfg.MODEL.RELATION.USE_RELATION_CONST:

                    batch_gt_boxes, batch_pred_box, batch_pred_box_topN, batch_pred_box_det,\
                    batch_pred_similarity, batch_pred_similarity_topN, batch_rel_pred_similarity, batch_rel_gt_label, batch_topN_boxes, batch_reg_offset_topN, batch_rel_score_mat=results

                    for idx, each_gt_boxes in enumerate(batch_gt_boxes):
                        moved_res.append(
                            (each_gt_boxes.to(cpu_device),
                             batch_pred_box[idx].to(cpu_device),
                             batch_pred_box_topN[idx].to(cpu_device),
                             batch_pred_box_det[idx].to(cpu_device),
                             batch_pred_similarity[idx].to(cpu_device),
                             batch_pred_similarity_topN[idx].to(cpu_device),
                             batch_rel_pred_similarity[idx].to(cpu_device),
                             batch_rel_gt_label[idx].to(cpu_device),
                             batch_topN_boxes[idx].to(cpu_device),
                             batch_reg_offset_topN[idx].to(cpu_device),
                             batch_rel_score_mat[idx]))

                else:
                    batch_gt_boxes, batch_pred_box, batch_pred_box_topN, batch_pred_box_det, batch_pred_similarity = results
                    for idx, each_gt_boxes in enumerate(batch_gt_boxes):
                        moved_res.append(
                            (each_gt_boxes.to(cpu_device),
                             batch_pred_box[idx].to(cpu_device),
                             batch_pred_box_topN[idx].to(cpu_device),
                             batch_pred_box_det[idx].to(cpu_device),
                             batch_pred_similarity[idx].to(cpu_device)))

            else:
                batch_gt_boxes, batch_pred_box, batch_pred_box_det, batch_pred_similarity = results
                for idx, each_gt_boxes in enumerate(batch_gt_boxes):
                    moved_res.append(
                        (each_gt_boxes.to(cpu_device),
                         batch_pred_box[idx].to(cpu_device),
                         batch_pred_box_det[idx].to(cpu_device),
                         batch_pred_similarity[idx].to(cpu_device)))

            results_dict.update({
                img_id + '_' + sent_id: result
                for img_id, sent_id, result in zip(image_ids, sent_ids,
                                                   moved_res)
            })

    synchronize()

    (predictions,
     image_ids) = _accumulate_predictions_from_multiple_gpus(results_dict)

    if output_folder:
        with open(
                os.path.join(output_folder,
                             "predictions_{}.pkl".format(curr_iter)),
                'wb') as f:
            pickle.dump(predictions, f)
        torch.save(
            predictions,
            os.path.join(output_folder,
                         "predictions_{}.pth".format(curr_iter)))

    torch.cuda.empty_cache()
    if not is_main_process():
        return

    logger.info('Total items num is {}'.format(len(predictions)))

    # with open(os.path.join(cfg.OUTPUT_DIR, 'prediction.pkl'), 'wb') as handle:
    #     pickle.dump(predictions, handle, protocol=pickle.HIGHEST_PROTOCOL)

    iou_types = ("bbox", )
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints", )
    box_only = False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY
    expected_results = cfg.TEST.EXPECTED_RESULTS
    expected_results_sigma_tol = cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL

    extra_args = dict(
        box_only=False,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    acc, acc_topN, acc_det, acc_rel_softmax = evaluate(
        dataset=data_loader.dataset,
        predictions=predictions,
        image_ids=image_ids,
        curr_iter=curr_iter,
        output_folder=None,
        **extra_args)

    record = {val_tags[k]: v for (k, v) in meters.meters.items()}
    logger.log(TFBoardHandler_LEVEL, (record, curr_iter))
    logger.info("current accuracy is: {}".format(acc))
    logger.info("current topN accuracy is: {}".format(acc_topN))
    logger.info("current accuracy with detection score is: {}".format(acc_det))
    logger.info(
        "current rel constrain accuracy is: {}".format(acc_rel_softmax))
    logger.log(TFBoardHandler_LEVEL,
               ({
                   val_tags['acc']: acc,
                   val_tags['acc_topN']: acc_topN,
                   val_tags['acc_det']: acc_det,
                   val_tags['acc_rel_softmax']: acc_rel_softmax
               }, curr_iter))
    logger.info("test done !")
Example #7
0
output_name = 'BBAM_Mask_RCNN_logs_mcg85'
output_folder = '%s/inference/voc_2012_val_cocostyle/' % output_name

extra_args = dict(
    box_only=False,
    iou_types=["bbox", "segm"],
    expected_results=[],
    expected_results_sigma_tol=4,
)
dataset_names = cfg.DATASETS.TEST
data_loaders = make_data_loader(cfg, is_train=False, is_distributed=False)[0]
dataset = data_loaders.dataset
print(len(dataset))
predictions = torch.load(os.path.join(output_folder, "predictions.pth"))
upscale = 0.6

output_folder = output_folder + 'after_crf'
if not os.path.exists(output_folder):
    os.makedirs(output_folder)
evaluate(
    dataset=dataset,
    predictions=predictions,
    output_folder=output_folder,
    do_CRF=True,
    upscale=upscale,
    **extra_args,
)
# search "# job lib multiprocess here" to implement multi-processing

print(output_folder)
# print("17500")
Example #8
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions, losses = compute_on_dataset(model, data_loader, device,
                                             inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)

    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    (predictions,
     image_ids) = _accumulate_predictions_from_multiple_gpus(predictions)
    # torch.cuda.empty_cache()
    if not is_main_process():
        return
    logger.info('Total items num is {}'.format(len(predictions)))

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    acc, acc_topN, acc_det, acc_rel_softmax = evaluate(
        dataset=data_loader.dataset,
        predictions=predictions,
        image_ids=image_ids,
        curr_iter='final',
        output_folder=None,
        **extra_args)
    logger.info("current accuracy is: {}".format(acc))
    logger.info("current topN accuracy is: {}".format(acc_topN))
    logger.info("current accuracy with detection score is: {}".format(acc_det))
    logger.info("test done !")
Example #9
0
def inference(
        model,
        cfg,
        data_loader,
        dataset_name,
        iou_types=("bbox",),
        box_only=False,
        bbox_aug=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
        eval_attributes=False,
        save_predictions=False,
        skip_performance_eval=False,
        labelmap_file='',
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device, bbox_aug, inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset), num_devices
        )
    )
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        )
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions, cfg.TEST.GATHER_ON_CPU)

    if not is_main_process():
        return

    if output_folder and save_predictions:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
    
    if output_folder and cfg.TEST.SAVE_RESULTS_TO_TSV:
        logger.info("Convert prediction results to tsv format and save.")
        output_tsv_name = 'predictions_forcebox.tsv' if eval_attributes else 'predictions.tsv'
        convert_predictions_to_tsv(
            predictions, dataset, output_folder,
            data_subset=cfg.TEST.TSV_SAVE_SUBSET,
            labelmap_file=labelmap_file,
            output_tsv_name=output_tsv_name,
            relation_on=cfg.MODEL.RELATION_ON,
        )
    
    if skip_performance_eval:
        logger.info("Skip performance evaluation and return.")
        return

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
        save_predictions=save_predictions
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #10
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default=
        "/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    save_dir = ""
    logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(cfg)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    model = build_detection_model(cfg)
    model.to(cfg.MODEL.DEVICE)

    output_dir = cfg.OUTPUT_DIR
    checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
    _ = checkpointer.load(cfg.MODEL.WEIGHT)

    iou_types = ("bbox", )
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            mkdir(output_folder)
            output_folders[idx] = output_folder

    if cfg.TEST.MULTI_SCALE:
        data_loaders_val = []
        for min_size_test, max_size_test in cfg.TEST.MULTI_SIZES:
            cfg.defrost()
            cfg.INPUT.MIN_SIZE_TEST = min_size_test
            cfg.INPUT.MAX_SIZE_TEST = max_size_test
            cfg.freeze()
            data_loaders_val.extend(
                make_data_loader(cfg,
                                 is_train=False,
                                 is_distributed=distributed))
        output_folders = output_folders * len(cfg.TEST.MULTI_SIZES)
        dataset_names = dataset_names * len(cfg.TEST.MULTI_SIZES)
    else:
        data_loaders_val = make_data_loader(cfg,
                                            is_train=False,
                                            is_distributed=distributed)

    predictions = []

    for output_folder, dataset_name, data_loader_val in zip(
            output_folders, dataset_names, data_loaders_val):
        prediction = inference(
            model,
            data_loader_val,
            dataset_name=dataset_name,
            iou_types=iou_types,
            box_only=cfg.MODEL.RPN_ONLY,
            device=cfg.MODEL.DEVICE,
            expected_results=cfg.TEST.EXPECTED_RESULTS,
            expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
            output_folder=output_folder,
        )
        synchronize()
        predictions.append(prediction)

    if cfg.TEST.MULTI_SCALE:

        logger.info("Processing multi-scale bbox voting....")
        voted_predictions = voting(
            predictions,
            args.local_rank)  # box_voting(predictions, args.local_rank)
        torch.save(voted_predictions,
                   os.path.join(output_folders[0], 'predictions.pth'))

        extra_args = dict(
            box_only=cfg.MODEL.RPN_ONLY,
            iou_types=iou_types,
            expected_results=cfg.TEST.EXPECTED_RESULTS,
            expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
        )

        evaluate(dataset=data_loaders_val[0].dataset,
                 predictions=voted_predictions,
                 output_folder=output_folders[0],
                 **extra_args)

    else:
        for prediction, output_folder, dataset_name, data_loader_val in zip(
                predictions, output_folders, dataset_names, data_loaders_val):
            extra_args = dict(
                box_only=cfg.MODEL.RPN_ONLY,
                iou_types=iou_types,
                expected_results=cfg.TEST.EXPECTED_RESULTS,
                expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
            )

            evaluate(dataset=data_loader_val.dataset,
                     predictions=prediction,
                     output_folder=output_folder,
                     **extra_args)
    return 0
Example #11
0
def inference(
        model,
        iteration,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model,
                                     data_loader,
                                     device,
                                     synchronize_gather=False,
                                     timer=inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(
        predictions, synchronize_gather=False)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        iteration=iteration,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #12
0
def inference(
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    bbox_aug=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    c2d_json_path=None,
    rngs=None,
    cfg=None,
    test_only=None,
):
    # import resource
    # rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    # resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    # calibrate bn statistics
    if 'search' in cfg.MODEL.BACKBONE.CONV_BODY:
        print('recalibrate bn')
        model = bn_statistic(model, rngs, cfg)
        print('recalibrate finished!')
        model.eval()
    predictions, seg_result = compute_on_dataset(model,
                                                 data_loader,
                                                 device,
                                                 bbox_aug=bbox_aug,
                                                 c2d_json_path=c2d_json_path,
                                                 rngs=rngs,
                                                 test_only=test_only)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(
        predictions, seg_result)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
        # test_only=test_only
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    # compute predictions and do evaluation in the same time to save memory
    # TODO support multiple gpus evaluation
    if cfg.MODEL.YOLACT_ON and not cfg.MODEL.YOLACT.CONVERT_MASK_TO_POLY and \
        isinstance(dataset, datasets.COCODataset):
        return do_coco_compute_and_evalute(model, data_loader, device,
                                           output_folder)
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from maskrcnn_benchmark.data.datasets.cityscapes import CityscapesDataset
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader

output_folder = '/home/brianyao/Documents/maskrcnn-benchmark/inference/cityscapes_fine_instanceonly_seg_val_cocostyle'
anno_file = '/media/DATA/Cityscapes/annotations/instancesonly_filtered_gtFine_val.json'
root = '/media/DATA/Cityscapes/leftImg8bit/val'
dataset = CityscapesDataset(anno_file, root, True)

cfg.merge_from_file('../configs/cityscapes/mask_rcnn_coco_eval.yaml')
cfg.merge_from_list([])
cfg.freeze()

data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=False)
data_loader = data_loaders_val[0]

extra_args = dict(
    box_only=False,
    iou_types=("bbox", "segm"),
    expected_results=[],
    expected_results_sigma_tol=4,
)

predictions = torch.load(
    '../inference/cityscapes_fine_instanceonly_seg_val_cocostyle/predictions.pth'
)

evaluate(data_loader.dataset, predictions, output_folder, **extra_args)
Example #15
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
        ignore_uncertain=False,  # add by hui
        use_iod_for_ignore=False,  # add by hui
        eval_standard='coco',  # add by hui
        use_last_prediction=False,  # add by hui for debug
        evaluate_method='',  # add by hui
        voc_iou_ths=(0.5, ),  # add by hui
        gt_file=None,  # add by hui
        use_ignore_attr=True):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    start_time = time.time()
    if not use_last_prediction:  # add by hui
        predictions = compute_on_dataset(model, data_loader, device)

        # wait for all processes to complete before measuring the time
        synchronize()
        total_time = time.time() - start_time
        total_time_str = str(datetime.timedelta(seconds=total_time))
        logger.info(
            "Total inference time: {} ({} s / img per device, on {} devices)".
            format(total_time_str, total_time * num_devices / len(dataset),
                   num_devices))

        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    else:  # add by hui
        predictions = torch.load(os.path.join(output_folder,
                                              'predictions.pth'))  # add by hui
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    # add by hui ##################################################3
    from maskrcnn_benchmark.data import datasets
    if isinstance(dataset, datasets.COCODataset):
        extra_args.update(
            dict(ignore_uncertain=ignore_uncertain,
                 use_iod_for_ignore=use_iod_for_ignore,
                 eval_standard=eval_standard,
                 gt_file=gt_file,
                 use_ignore_attr=use_ignore_attr))
    extra_args.update(
        dict(evaluate_method=evaluate_method, voc_iou_ths=voc_iou_ths))
    # ###################################################################################################3

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #16
0
def inference(
    cfg,
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    logger=None,
):
    load_prediction_from_cache = cfg.TEST.ALLOW_LOAD_FROM_CACHE and output_folder is not None and os.path.exists(
        os.path.join(output_folder, "eval_results.pytorch"))
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    if logger is None:
        logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    if load_prediction_from_cache:
        predictions = torch.load(
            os.path.join(output_folder, "eval_results.pytorch"),
            map_location=torch.device("cpu"))['predictions']
    else:
        predictions = compute_on_dataset(
            model,
            data_loader,
            device,
            synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER,
            timer=inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    if not load_prediction_from_cache:
        predictions = _accumulate_predictions_from_multiple_gpus(
            predictions, synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER)

    if not is_main_process():
        return -1.0

    #if output_folder is not None and not load_prediction_from_cache:
    #    torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    if cfg.TEST.CUSTUM_EVAL:
        torch.save(predictions,
                   os.path.join(cfg.OUTPUT_DIR, 'custom_prediction.pytorch'))
        print(
            str(os.path.join(cfg.OUTPUT_DIR, 'custom_prediction.pytorch')) +
            ' SAVED !')
        return -1.0

    return evaluate(cfg=cfg,
                    dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    logger=logger,
                    **extra_args)
Example #17
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    if 'search' in cfg.MODEL.BACKBONE.CONV_BODY:
        num_states = model.module.num_states if isinstance(
            model,
            torch.nn.parallel.DistributedDataParallel) else model.num_states
        rngs = [3] * num_states
        print('Evaluation on the single path of supernet: {}'.format(rngs))
        model = bn_statistic(model, rngs)
    else:
        rngs = None
    predictions = compute_on_dataset(model, data_loader, device, rngs,
                                     inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
import logging

cfg.merge_from_file('configs/vg_attribute.yaml')
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=False)

output_folder = "/var/maskrcnn-benchmark/models/detection_with_attribute_bs8/inference/vg_val/"
iou_types = "bbox"

dataset = data_loaders_val[0].dataset
eval_attributes = False
box_only = True
predictions = torch.load(
    "/var/maskrcnn-benchmark/models/detection_with_attribute_bs8/inference/vg_val/predictions.pth"
)

evaluate(dataset=dataset,
         predictions=predictions,
         output_folder=output_folder,
         box_only=box_only,
         eval_attributes=eval_attributes,
         iou_types=iou_types)
Example #19
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        bbox_aug=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset

    # zero-shot models should have class embeddings for inference to map predicted embeddings to classes
    module = model.module if isinstance(
        model, torch.nn.parallel.DistributedDataParallel) else model
    if hasattr(module, 'roi_heads') and 'box' in module.roi_heads:
        if module.roi_heads['box'].predictor.embedding_based:
            module.roi_heads['box'].predictor.set_class_embeddings(
                data_loader.dataset.class_emb_mtx)

    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device, bbox_aug,
                                     inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #20
0
def inference(model,
              data_loader,
              dataset_name,
              iou_types=("bbox", ),
              box_only=False,
              device="cuda",
              expected_results=(),
              expected_results_sigma_tol=4,
              output_folder=None,
              kitti_output=False):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions_multi = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions_multi,
                   os.path.join(output_folder, "predictions.pth"))

    if kitti_output:
        CATEGORIES = [
            "__background",
            "Car",
            "Pedestrian",
            "Cyclist",
        ]

        # for image_id, prediction in enumerate(predictions):
        for image_id in range(len(predictions.keys())):
            original_id = dataset.id_to_img_map[image_id]
            img_info = dataset.get_img_info(image_id)
            image_width = img_info["width"]
            image_height = img_info["height"]
            prediction = predictions[image_id]
            prediction = prediction.resize((image_width, image_height))
            file_name = os.path.join(output_folder, 'det_results',
                                     '{0:06d}.txt'.format(original_id))
            with open(file_name, 'w') as output_file:
                scores = predictions[image_id].get_field("scores").tolist()
                labels = predictions[image_id].get_field("labels").tolist()
                labels = [CATEGORIES[i] for i in labels]
                boxes = prediction.bbox
                for box, score, label in zip(boxes, scores, labels):
                    left, top, right, bottom = box
                    if score > 0.0001:
                        output_file.write(
                            '{:s} {:.2f} {:d} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}\n'
                            .format(
                                label,
                                -1,
                                -1,
                                -10,  # type, truncated, occluded, alpha
                                left,
                                top,
                                right,
                                bottom,
                                # bbox: left, top, right, bottom
                                -1,
                                -1,
                                -1,  # dimensions: height, width, length
                                -1,
                                -1,
                                -1,  # location: x,y,z
                                -10,
                                np.log(score)  # rotation_y, score
                            ))
                    # print('{}: {} {} {} {} Score: {}'.format(label, left, top, right, bottom, score))
            print('Saved results to ', file_name)

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions_multi,
                    output_folder=output_folder,
                    **extra_args)
Example #21
0
def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer)
    # pdb.set_trace()

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)

    # pdb.set_trace()
    # (Pdb) type(predictions)
    # <class 'list'>
    # (Pdb) len(predictions)
    # 1
    # (Pdb) predictions[0]
    # BoxList(num_boxes=1143, image_width=2133, image_height=1200, mode=xyxy)
    # (Pdb) predictions[0].get_field('labels').shape
    # torch.Size([1143])
    # (Pdb) predictions[0].get_field('scores').shape
    # torch.Size([1143])

    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
def inference(model,
              data_loader,
              dataset_name,
              iou_types=("bbox", ),
              box_only=False,
              device="cuda",
              expected_results=(),
              expected_results_sigma_tol=4,
              output_folder=None,
              skip_eval=False,
              dllogger=None):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = (torch.distributed.get_world_size()
                   if torch.distributed.is_initialized() else 1)
    dataset = data_loader.dataset
    dllogger.log(step="PARAMETER",
                 data={
                     "eval_dataset_name": dataset_name,
                     "eval_num_samples": len(dataset)
                 })
    start_time = time.time()
    predictions = compute_on_dataset(model, data_loader, device)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=total_time))
    dllogger.log(step=tuple(),
                 data={
                     "e2e_infer_time": total_time,
                     "inference_perf_fps": len(dataset) / total_time
                 })
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    logger.info(
        "Total inference time: {} ({} s / img per device, on {} devices)".
        format(total_time_str, total_time * num_devices / len(dataset),
               num_devices))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    if skip_eval:
        dllogger.log(step="PARAMETER",
                     data={
                         "skip_eval":
                         True,
                         "predictions_saved_path":
                         os.path.join(output_folder, "predictions.pth")
                     })
        return

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #23
0
def inference(
        reid_model,
        model,
        data_loaders,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    modes = ['test', 'query']
    # modes = ['query', 'test']
    dataset_all = []
    predictions_all = []
    for mode, data_loader in zip(modes, data_loaders):
        # for i, batch in enumerate(tqdm(data_loader)):
        #     images, targets, image_ids = batch
        print(mode, len(data_loader.dataset))
        device = torch.device(device)
        num_devices = get_world_size()
        logger = logging.getLogger("maskrcnn_benchmark.inference")
        logger.info("Start evaluation on {} dataset({} images).".format(
            dataset_name, len(data_loader.dataset)))
        total_timer = Timer()
        inference_timer = Timer()
        total_timer.tic()
        dataset = data_loader.dataset
        predictions = compute_on_dataset(reid_model, model, data_loader,
                                         device, mode)
        # wait for all processes to complete before measuring the time
        synchronize()
        total_time = total_timer.toc()
        total_time_str = get_time_str(total_time)
        logger.info(
            "Total run time: {} ({} s / img per device, on {} devices)".format(
                total_time_str,
                total_time * num_devices / len(data_loader.dataset),
                num_devices))
        total_infer_time = get_time_str(inference_timer.total_time)
        logger.info(
            "Model inference time: {} ({} s / img per device, on {} devices)".
            format(
                total_infer_time,
                inference_timer.total_time * num_devices /
                len(data_loader.dataset),
                num_devices,
            ))

        # predictions = _accumulate_predictions_from_multiple_gpus(predictions) # we use 1 gpus

        if not is_main_process():
            return

        if output_folder:
            if mode == 'test':
                torch.save(predictions,
                           os.path.join(output_folder, "predictions.pth"))
            elif mode == 'query':
                torch.save(predictions,
                           os.path.join(output_folder, "evaluations.pth"))
            else:
                raise KeyError(mode)
        dataset_all.append(dataset)
        predictions_all.append(predictions)

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset_all,
                    predictions=predictions_all,
                    output_folder=output_folder,
                    **extra_args)
Example #24
0
def inference(
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    phase=1,
    shot=10,
    split=1,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    print('loading mean class attentions')
    input_dir = "saved_attentions/"

    meta_attentions = pickle.load(
        open(
            os.path.join(
                input_dir, 'meta_type_{}'.format(split),
                str(phase) + '_shots_' + str(shot) +
                '_mean_class_attentions.pkl'), 'rb'))

    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer, meta_attentions)
    # wait for all processes to complete before measuring the time
    synchronize()

    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)

    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
Example #25
0
def inference(
    cfg,
    model,
    data_loader,
    dataset_name,
    iou_types=("bbox", ),
    box_only=False,
    device="cuda",
    expected_results=(),
    expected_results_sigma_tol=4,
    output_folder=None,
    logger=None,
):
    load_prediction_from_cache = cfg.TEST.ALLOW_LOAD_FROM_CACHE and output_folder is not None and os.path.exists(
        os.path.join(output_folder, "eval_results.pytorch"))
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    if logger is None:
        logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(
        dataset_name, len(dataset)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    if load_prediction_from_cache:
        predictions = torch.load(
            os.path.join(output_folder, "eval_results.pytorch"),
            map_location=torch.device("cpu"))['predictions']
    else:
        predictions = compute_on_dataset(
            model,
            data_loader,
            device,
            synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER,
            timer=inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        ))

    if not load_prediction_from_cache:
        predictions = _accumulate_predictions_from_multiple_gpus(
            predictions, synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER)

    if not is_main_process():
        return -1.0

    #if output_folder is not None and not load_prediction_from_cache:
    #    torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    # vocab_file = json.load(open('./datasets/vg/VG-SGG-dicts-with-attri.json'))
    # idx2label = vocab_file['idx_to_label'] # label is object,size 150
    # idx2pred = vocab_file['idx_to_predicate'] # predicate is relation,size 50
    # idx2attr = vocab_file['idx_to_attribute'] # attribute, size 200

    # def get_info(prediction):
    #     ## get obj
    #     pred_obj = [idx2label[str(i)] for i in prediction.get_field('pred_labels').tolist()] # obj index to label

    #     ## get relation
    #     pred_rel_label = prediction.get_field('pred_rel_scores') #  relation score, shape of (nbr_predition, 51)
    #     pred_rel_label[:,0] = 0 # (nbr_predition, 0)
    #     pred_rel_score, pred_rel_label = pred_rel_label.max(-1) # pred_rel_score -> score,pred_rel_label -> max index position

    #     ## concate obj - rela - obj
    #     pred_rel_pair = prediction.get_field('rel_pair_idxs').tolist() # obj1 * obj2 matrix
    #     pred_rels = [(pred_obj[i[0]], idx2pred[str(j)], pred_obj[i[1]]) for i, j in
    #                 zip(pred_rel_pair, pred_rel_label.tolist())] # concate to  obj1-rela-obj2 triplets

    #     ## get attribute
    #     pred_attr = prediction.get_field('pred_attributes').to(torch.float32) # gey attributes score, shape of (nbr_predition, 201)
    #     pred_attr[:,0] = 0 # (nbr_predition, 0)
    #     pred_attr_score, pred_attr_index = pred_attr.max(-1) #
    #     pred_attr_label = [idx2attr[str(i)] for i in pred_attr_index.tolist()] #

    #     ## concate obj - attr
    #     pred_obj_attr = [(obj, attr) for obj, attr in zip(pred_obj, pred_attr_label)] # obj-attr

    #     return pred_rels, pred_obj, pred_obj_attr, pred_rel_score, pred_rel_label

    # print('predicting attributes!')
    # rels, obj, attr, score, label = get_info(predictions)

    if cfg.TEST.CUSTUM_EVAL:
        detected_sgg = custom_sgg_post_precessing(predictions)
        with open(os.path.join(cfg.DETECTED_SGG_DIR, 'custom_prediction.json'),
                  'w') as outfile:
            json.dump(detected_sgg, outfile)
        print(
            '=====> ' +
            str(os.path.join(cfg.DETECTED_SGG_DIR, 'custom_prediction.json')) +
            ' SAVED !')
        return -1.0

    return evaluate(cfg=cfg,
                    dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    logger=logger,
                    **extra_args)
Example #26
0
def ms_inference(
        model,
        data_loader_val_mstest,
        dataset_name,
        iou_types=("bbox", ),
        box_only=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    # pdb.set_trace()

    data_loader_small = data_loader_val_mstest[0][0]
    data_loader_medium = data_loader_val_mstest[1][0]
    data_loader_large = data_loader_val_mstest[2][0]

    #################################### small ####################################
    dataset_small = data_loader_small.dataset

    logger.info("Start evaluation on {} dataset_small({} images).".format(
        dataset_name, len(dataset_small)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    predictions_small = compute_on_dataset(model, data_loader_small, device,
                                           inference_timer)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset_small),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset_small),
            num_devices,
        ))

    predictions_small = _accumulate_predictions_from_multiple_gpus(
        predictions_small)

    #################################### medium ####################################
    dataset_medium = data_loader_medium.dataset

    logger.info("Start evaluation on {} dataset_medium({} images).".format(
        dataset_name, len(dataset_medium)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    predictions_medium = compute_on_dataset(model, data_loader_medium, device,
                                            inference_timer)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset_medium),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset_medium),
            num_devices,
        ))

    predictions_medium = _accumulate_predictions_from_multiple_gpus(
        predictions_medium)

    #################################### large ####################################
    dataset_large = data_loader_large.dataset

    logger.info("Start evaluation on {} dataset_large({} images).".format(
        dataset_name, len(dataset_large)))
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    predictions_large = compute_on_dataset(model, data_loader_large, device,
                                           inference_timer)

    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset_large),
            num_devices))
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".
        format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset_large),
            num_devices,
        ))

    predictions_large = _accumulate_predictions_from_multiple_gpus(
        predictions_large)

    if not is_main_process():
        return

    dataset = [dataset_small, dataset_medium, dataset_large]
    predictions = [predictions_small, predictions_medium, predictions_large]

    # dataset = dataset_large
    # predictions = predictions_large

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
def inference(model,
              data_loader,
              dataset_name,
              iou_types=("bbox", ),
              box_only=False,
              device="cuda",
              expected_results=(),
              expected_results_sigma_tol=4,
              output_folder=None,
              predictions=None,
              working_directory="/tmp",
              chunk_predictions=False,
              compute_pre_results=True,
              panoptic_confidence_thresh=0.6,
              panoptic_overlap_thresh=0.5,
              panoptic_stuff_min_area=(64 * 64)):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = (torch.distributed.get_world_size()
                   if torch.distributed.is_initialized() else 1)
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset

    # for now, short circuit for "order".
    if "order" in iou_types:
        predictions = compute_order_on_dataset(model, data_loader, device)
        return

    given_predictions = not (predictions is None)
    if not given_predictions:
        # make sure these are divisible.
        if chunk_predictions:
            print("chunking predictions")

        logger.info("Start evaluation on {} dataset({} images).".format(
            dataset_name, len(dataset)))
        start_time = time.time()
        predictions = compute_on_dataset(model, data_loader, device)
        # wait for all processes to complete before measuring the time
        synchronize()
        total_time = time.time() - start_time
        total_time_str = str(datetime.timedelta(seconds=total_time))
        logger.info(
            "Total inference time: {} ({} s / img per device, on {} devices)".
            format(total_time_str, total_time * num_devices / len(dataset),
                   num_devices))

        predictions = _accumulate_predictions_from_multiple_gpus(
            predictions,
            working_directory=working_directory,
            skip_gather=chunk_predictions)
        # if we decided to keep the chunks, we only get filenames back.

    if not is_main_process():
        return

    # OK, should all be done within a single process now.
    if output_folder and not given_predictions:
        if chunk_predictions:
            parent_path = os.path.dirname(predictions[0])

            for i, chunk_path in enumerate(predictions):
                chunk_save_path = os.path.join(output_folder,
                                               os.path.basename(chunk_path))
                shutil.move(chunk_path, chunk_save_path)

                predictions[i] = chunk_save_path

            # remove the parent.
            os.rmdir(parent_path)

            print("chunks:")
            print(predictions)
        else:
            torch.save(predictions,
                       os.path.join(output_folder, "predictions.pth"))

    extra_args = dict(box_only=box_only,
                      iou_types=iou_types,
                      expected_results=expected_results,
                      expected_results_sigma_tol=expected_results_sigma_tol,
                      working_directory=working_directory,
                      save_panoptic_results=True,
                      save_pre_results=compute_pre_results,
                      panoptic_confidence_thresh=panoptic_confidence_thresh,
                      panoptic_overlap_thresh=panoptic_overlap_thresh,
                      panoptic_stuff_min_area=panoptic_stuff_min_area)

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)