示例#1
0
def do_evaluation(cfg, model, output_dir, distributed):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    assert isinstance(model, SSD), 'Wrong module.'
    test_datasets = build_dataset(dataset_list=cfg.DATASETS.TEST, is_test=True)
    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()
    if not model.is_test:
        model.is_test = True
    predictor = Predictor(cfg=cfg,
                          model=model,
                          iou_threshold=cfg.TEST.NMS_THRESHOLD,
                          score_threshold=cfg.TEST.CONFIDENCE_THRESHOLD,
                          device=device)

    cpu_device = torch.device("cpu")
    logger = logging.getLogger("SSD.inference")
    for dataset_name, test_dataset in zip(cfg.DATASETS.TEST, test_datasets):
        logger.info("Test dataset {} size: {}".format(dataset_name,
                                                      len(test_dataset)))
        indices = list(range(len(test_dataset)))
        if distributed:
            indices = indices[distributed_util.get_rank()::distributed_util.
                              get_world_size()]

        # show progress bar only on main process.
        progress_bar = tqdm if distributed_util.is_main_process() else iter
        logger.info('Progress on {} 0:'.format(cfg.MODEL.DEVICE.upper()))
        predictions = {}
        for i in progress_bar(indices):
            image = test_dataset.get_image(i)
            output = predictor.predict(image)
            boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
            predictions[i] = (boxes, labels, scores)
        distributed_util.synchronize()
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
        if not distributed_util.is_main_process():
            return

        final_output_dir = os.path.join(output_dir, dataset_name)
        if not os.path.exists(final_output_dir):
            os.makedirs(final_output_dir)
        torch.save(predictions,
                   os.path.join(final_output_dir, 'predictions.pth'))
        evaluate(dataset=test_dataset,
                 predictions=predictions,
                 output_dir=final_output_dir)
示例#2
0
def do_evaluation(cfg, model, output_dir, distributed, datasets_dict=None):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    assert isinstance(model, SSD), 'Wrong module.'
    if datasets_dict is not None:
        if cfg.TEST.MODE == "joint":
            test_datasets = DetectionConcatDataset(datasets_dict.values())
        else:
            test_datasets = datasets_dict.values()
        joint_dataset_name = "Concatenation of validation splits"
        datasets_names = datasets_dict.keys()
    else:
        test_datasets = build_dataset(dataset_list=cfg.DATASETS.TEST,
                                      is_test=True)
        datasets_names = cfg.DATASETS.TEST
        joint_dataset_name = "Concatenation of test sets"

    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()
    predictor = Predictor(cfg=cfg,
                          iou_threshold=cfg.TEST.NMS_THRESHOLD,
                          score_threshold=cfg.TEST.CONFIDENCE_THRESHOLD,
                          device=device,
                          model=model)
    # evaluate all test datasets.
    logger = logging.getLogger("SSD.inference")

    metrics = {}

    if cfg.TEST.MODE == "split":
        logger.info('Will evaluate {} dataset(s):'.format(len(test_datasets)))
        for dataset_name, test_dataset in zip(datasets_names, test_datasets):
            metric = _evaluation(cfg, dataset_name, test_dataset, predictor,
                                 distributed, output_dir)
            metrics[dataset_name] = metric
            distributed_util.synchronize()
    else:
        logger.info('Will evaluate {} image(s):'.format(len(test_datasets)))
        metric = _evaluation(cfg, joint_dataset_name, test_datasets, predictor,
                             distributed, output_dir)
        metrics[joint_dataset_name] = metric
        distributed_util.synchronize()

    return metrics
示例#3
0
def _evaluation(cfg, dataset_name, test_dataset, predictor, distributed, output_dir):
    """ Perform evaluating on one dataset
    Args:
        cfg:
        dataset_name: dataset's name
        test_dataset: Dataset object
        predictor: Predictor object, used to to prediction.
        distributed: whether distributed evaluating or not
        output_dir: path to save prediction results
    Returns:
        evaluate result
    """
    cpu_device = torch.device("cpu")
    logger = logging.getLogger("SSD.inference")
    logger.info("Evaluating {} dataset({} images):".format(dataset_name, len(test_dataset)))
    indices = list(range(len(test_dataset)))
    if distributed:
        indices = indices[distributed_util.get_rank()::distributed_util.get_world_size()]

    # show progress bar only on main process.
    progress_bar = tqdm if distributed_util.is_main_process() else iter
    logger.info('Progress on {} 0:'.format(cfg.MODEL.DEVICE.upper()))
    predictions = {}
    for i in progress_bar(indices):
        image = test_dataset.get_image(i)
        #print(type(image))
        
        #image=numpy(image)
        #transform=PredictionTransform(cfg.INPUT.IMAGE_SIZE, cfg.INPUT.PIXEL_MEAN)
        #image=transform(image)
        output = predictor.predict(image)
        print('output')
        boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
        predictions[i] = (boxes, labels, scores)
    distributed_util.synchronize()
    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not distributed_util.is_main_process():
        return

    final_output_dir = os.path.join(output_dir, dataset_name)
    if not os.path.exists(final_output_dir):
        os.makedirs(final_output_dir)
    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
    return evaluate(dataset=test_dataset, predictions=predictions, output_dir=final_output_dir)
示例#4
0
def do_evaluation(cfg, model, output_dir, distributed):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    assert isinstance(model, SSD), 'Wrong module.'
    test_datasets = build_dataset(dataset_list=cfg.DATASETS.TEST, is_test=True)
    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()
    predictor = Predictor(cfg=cfg,
                          model=model,
                          iou_threshold=cfg.TEST.NMS_THRESHOLD,
                          score_threshold=cfg.TEST.CONFIDENCE_THRESHOLD,
                          device=device)
    # evaluate all test datasets.
    logger = logging.getLogger("SSD.inference")
    logger.info('Will evaluate {} dataset(s):'.format(len(test_datasets)))
    for dataset_name, test_dataset in zip(cfg.DATASETS.TEST, test_datasets):
        _evaluation(cfg, dataset_name, test_dataset, predictor, distributed,
                    output_dir)
        distributed_util.synchronize()