def benchmark(config):
    """Computes and stores predictions and then
    evaluates them on COCO metrics and supplementary benchmarking script."""

    print("Benchmarking Baseline method {}.".format(
        config["detector"]["type"]))

    # Create new directory for run outputs
    # In what location should we put this new directory?
    output_dir = config["output_dir"]
    mkdir_if_missing(output_dir)

    # Save config in run directory
    config.save(os.path.join(output_dir, config["save_conf_name"]))

    # directory of test images and segmasks
    detector_type = config["detector"]["type"]
    if detector_type == "euclidean" or detector_type == "region_growing":
        from sd_maskrcnn.pcl.pydetect import detect
    elif detector_type == "gop" or detector_type == "mcg":
        from sd_maskrcnn.gop.detect import detect
    else:
        print("Detector type not supported")
        exit()

    # Create predictions and record where everything gets stored.
    pred_mask_dir, pred_info_dir, gt_mask_dir = detect(
        detector_type,
        config["detector"][detector_type],
        output_dir,
        config["dataset"],
    )

    ap, ar = coco_benchmark(pred_mask_dir, pred_info_dir, gt_mask_dir)
    if config["vis"]["predictions"]:
        visualize_predictions(
            output_dir,
            config["dataset"],
            pred_mask_dir,
            pred_info_dir,
            show_bbox=config["vis"]["show_bbox_pred"],
            show_class=config["vis"]["show_class_pred"],
        )
    if config["vis"]["s_bench"]:
        s_benchmark(
            output_dir,
            config["dataset"],
            pred_mask_dir,
            pred_info_dir,
            gt_mask_dir,
        )

    print("Saved benchmarking output to {}.\n".format(output_dir))
    return ap, ar
Example #2
0
def benchmark(config):
    """Computes and stores predictions and then
    evaluates them on COCO metrics and supplementary benchmarking script."""

    print("Benchmarking Baseline method {}.".format(
        config['detector']['type']))

    # Create new directory for run outputs
    # In what location should we put this new directory?
    output_dir = config['output_dir']
    mkdir_if_missing(output_dir)

    # Save config in run directory
    config.save(os.path.join(output_dir, config['save_conf_name']))

    # directory of test images and segmasks
    detector_type = config['detector']['type']
    if detector_type == 'euclidean' or detector_type == 'region_growing':
        from sd_maskrcnn.pcl.pydetect import detect
    elif detector_type == 'gop' or detector_type == 'mcg':
        from sd_maskrcnn.gop.detect import detect
    else:
        print('Detector type not supported')
        exit()

    # Create predictions and record where everything gets stored.
    pred_mask_dir, pred_info_dir, gt_mask_dir = \
        detect(detector_type, config['detector'][detector_type], output_dir, config['dataset'])

    ap, ar = coco_benchmark(pred_mask_dir, pred_info_dir, gt_mask_dir)
    if config['vis']['predictions']:
        visualize_predictions(output_dir,
                              config['dataset'],
                              pred_mask_dir,
                              pred_info_dir,
                              show_bbox=config['vis']['show_bbox_pred'],
                              show_class=config['vis']['show_class_pred'])
    if config['vis']['s_bench']:
        s_benchmark(output_dir, config['dataset'], pred_mask_dir,
                    pred_info_dir, gt_mask_dir)

    print("Saved benchmarking output to {}.\n".format(output_dir))
    return ap, ar
Example #3
0
def benchmark(config):
    """Benchmarks a model, computes and stores model predictions and then
    evaluates them on COCO metrics and supplementary benchmarking script."""

    print("Benchmarking model.")

    # Create new directory for outputs
    output_dir = config['output_dir']
    utils.mkdir_if_missing(output_dir)

    # Save config in output directory
    config.save(os.path.join(output_dir, config['save_conf_name']))
    image_shape = config['model']['settings']['image_shape']
    config['model']['settings']['image_min_dim'] = min(image_shape)
    config['model']['settings']['image_max_dim'] = max(image_shape)
    config['model']['settings']['gpu_count'] = 1
    config['model']['settings']['images_per_gpu'] = 1
    inference_config = MaskConfig(config['model']['settings'])
    
    model_dir, _ = os.path.split(config['model']['path'])
    model = modellib.MaskRCNN(mode=config['model']['mode'], config=inference_config,
                              model_dir=model_dir)

    # Load trained weights
    print("Loading weights from ", config['model']['path'])
    model.load_weights(config['model']['path'], by_name=True)

    # Create dataset
    test_dataset = ImageDataset(config)
    test_dataset.load(config['dataset']['indices'])
    test_dataset.prepare()

    vis_config = copy(config)
    vis_config['dataset']['images'] = 'depth_ims'
    vis_config['dataset']['masks'] = 'modal_segmasks'
    vis_dataset = ImageDataset(config)
    vis_dataset.load(config['dataset']['indices'])
    vis_dataset.prepare()

    ######## BENCHMARK JUST CREATES THE RUN DIRECTORY ########
    # code that actually produces outputs should be plug-and-play
    # depending on what kind of benchmark function we run.

    # If we want to remove bin pixels, pass in the directory with
    # those masks.
    if config['mask']['remove_bin_pixels']:
        bin_mask_dir = os.path.join(config['dataset']['path'], config['mask']['bin_masks'])
        overlap_thresh = config['mask']['overlap_thresh']
    else:
        bin_mask_dir = False
        overlap_thresh = 0

    # Create predictions and record where everything gets stored.
    pred_mask_dir, pred_info_dir, gt_mask_dir = \
        detect(config['output_dir'], inference_config, model, test_dataset, bin_mask_dir, overlap_thresh)

    ap, ar = coco_benchmark(pred_mask_dir, pred_info_dir, gt_mask_dir)
    if config['vis']['predictions']:
        visualize_predictions(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir, 
                              show_bbox=config['vis']['show_bbox_pred'], show_scores=config['vis']['show_scores_pred'], show_class=config['vis']['show_class_pred'])
    if config['vis']['ground_truth']:
        visualize_gts(config['output_dir'], vis_dataset, inference_config, show_scores=False, show_bbox=config['vis']['show_bbox_gt'], show_class=config['vis']['show_class_gt'])
    if config['vis']['s_bench']:
        s_benchmark(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir)

    print("Saved benchmarking output to {}.\n".format(config['output_dir']))
    return ap, ar