Beispiel #1
0
def inference(model,
              data_loader,
              dataset_name,
              device,
              output_folder=None,
              use_cached=False,
              **kwargs):
    dataset = data_loader.dataset
    logger = logging.getLogger("SSD.inference")
    logger.info("Evaluating {} dataset({} images):".format(
        dataset_name, len(dataset)))
    predictions_path = os.path.join(output_folder, 'predictions.pth')
    if use_cached and os.path.exists(predictions_path):
        predictions = torch.load(predictions_path, map_location='cpu')
    else:
        predictions = compute_on_dataset(model, data_loader, device)
        synchronize()
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return
    if output_folder:
        torch.save(predictions, predictions_path)
    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_dir=output_folder,
                    **kwargs)
Beispiel #2
0
def do_evaluation(cfg, model, output_dir, distributed):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    assert isinstance(model, SSD), 'Wrong module.'
    test_datasets = build_dataset(dataset_list=cfg.DATASETS.TEST, is_test=True)
    device = torch.device(cfg.MODEL.DEVICE)
    model.eval()
    if not model.is_test:
        model.is_test = True
    predictor = Predictor(cfg=cfg,
                          model=model,
                          iou_threshold=cfg.TEST.NMS_THRESHOLD,
                          score_threshold=cfg.TEST.CONFIDENCE_THRESHOLD,
                          device=device)

    cpu_device = torch.device("cpu")
    logger = logging.getLogger("SSD.inference")
    for dataset_name, test_dataset in zip(cfg.DATASETS.TEST, test_datasets):
        logger.info("Test dataset {} size: {}".format(dataset_name,
                                                      len(test_dataset)))
        indices = list(range(len(test_dataset)))
        if distributed:
            indices = indices[distributed_util.get_rank()::distributed_util.
                              get_world_size()]

        # show progress bar only on main process.
        progress_bar = tqdm if distributed_util.is_main_process() else iter
        logger.info('Progress on {} 0:'.format(cfg.MODEL.DEVICE.upper()))
        predictions = {}
        for i in progress_bar(indices):
            image = test_dataset.get_image(i)
            output = predictor.predict(image)
            boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
            predictions[i] = (boxes, labels, scores)
        distributed_util.synchronize()
        predictions = _accumulate_predictions_from_multiple_gpus(predictions)
        if not distributed_util.is_main_process():
            return

        final_output_dir = os.path.join(output_dir, dataset_name)
        if not os.path.exists(final_output_dir):
            os.makedirs(final_output_dir)
        torch.save(predictions,
                   os.path.join(final_output_dir, 'predictions.pth'))
        evaluate(dataset=test_dataset,
                 predictions=predictions,
                 output_dir=final_output_dir)
Beispiel #3
0
def inference(model, data_loader, dataset_name, output_folder: pathlib.Path,
              **kwargs):
    dataset = data_loader.dataset
    logger = logging.getLogger("SSD.inference")
    logger.info("Evaluating {} dataset({} images):".format(
        dataset_name, len(dataset)))
    predictions = compute_on_dataset(model, data_loader)
    predictions = convert_predictions(predictions)
    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_dir=output_folder,
                    **kwargs)
Beispiel #4
0
def _evaluation(cfg, dataset_name, test_dataset, predictor, distributed, output_dir):
    """ Perform evaluating on one dataset
    Args:
        cfg:
        dataset_name: dataset's name
        test_dataset: Dataset object
        predictor: Predictor object, used to to prediction.
        distributed: whether distributed evaluating or not
        output_dir: path to save prediction results
    Returns:
        evaluate result
    """
    cpu_device = torch.device("cpu")
    logger = logging.getLogger("SSD.inference")
    logger.info("Evaluating {} dataset({} images):".format(dataset_name, len(test_dataset)))
    indices = list(range(len(test_dataset)))
    if distributed:
        indices = indices[distributed_util.get_rank()::distributed_util.get_world_size()]

    # show progress bar only on main process.
    progress_bar = tqdm if distributed_util.is_main_process() else iter
    logger.info('Progress on {} 0:'.format(cfg.MODEL.DEVICE.upper()))
    predictions = {}
    for i in progress_bar(indices):
        image = test_dataset.get_image(i)
        #print(type(image))
        
        #image=numpy(image)
        #transform=PredictionTransform(cfg.INPUT.IMAGE_SIZE, cfg.INPUT.PIXEL_MEAN)
        #image=transform(image)
        output = predictor.predict(image)
        print('output')
        boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
        predictions[i] = (boxes, labels, scores)
    distributed_util.synchronize()
    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not distributed_util.is_main_process():
        return

    final_output_dir = os.path.join(output_dir, dataset_name)
    if not os.path.exists(final_output_dir):
        os.makedirs(final_output_dir)
    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
    return evaluate(dataset=test_dataset, predictions=predictions, output_dir=final_output_dir)
Beispiel #5
0
def inference(cfg,
              model,
              data_loader,
              dataset_name,
              device='cuda',
              output_dir=None):
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger('ssd.inference')
    dataset = data_loader.dataset
    logger.info(
        f'Start evaluation on {dataset_name} dataset ({len(dataset)} images)')

    total_timer = Timer()
    inference_timer = Timer()

    total_timer.tic()

    predictions = compute_on_dataset(model, data_loader, device,
                                     inference_timer)
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        f"Total run time: {total_time_str} ({total_time * num_devices / len(dataset)} s / img per device, on {num_devices} devices)"
    )
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        f"Model inference time: {total_infer_time} ({inference_timer.total_time * num_devices / len(dataset)} s / img per device, on {num_devices} devices)"
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_dir:
        torch.save(predictions, os.path.join(output_dir, 'predictions.pth'))

    size = (cfg.INPUT.SIZE, cfg.INPUT.SIZE)
    return evaluate(dataset, predictions, output_dir, size=size)
 def compute_map(self):
     image_ids = list(sorted(self.predictions.keys()))
     predictions = [self.predictions[i] for i in image_ids]
     return evaluate(dataset=self.test_dataset,
                     predictions=predictions,
                     output_dir=self.output_dir)