Beispiel #1
0
    def set_mode(self, data):
        """
        Sets the mode of the algorithm and loads the corresponding weights.
        'both' means that the algorithm is considering grayscale and depth data.
        'depth' means that the algorithm only considers depth data.
        """
        mode = data.decode().strip().lower()
        if mode not in MODES:
            return Response(f"Invalid mode {mode}")
        self.mode = mode
        config = YamlConfig(self.config_path)
        inference_config = MaskConfig(config['model']['settings'])
        inference_config.GPU_COUNT = 1
        inference_config.IMAGES_PER_GPU = 1

        model_path = MODEL_PATHS[self.mode]
        model_dir, _ = os.path.split(model_path)
        self.model = modellib.MaskRCNN(mode=config['model']['mode'],
                                       config=inference_config,
                                       model_dir=model_dir)
        self.model.load_weights(model_path, by_name=True)
        self.element.log(LogLevel.INFO, f"Loaded weights from {model_path}")
        return Response(f"Mode switched to {self.mode}")
def init_sd_mask_rcnn(yaml_file_path):
    '''
    input: yaml file path
    returns: loaded model
    '''

    # Initialization for sd mask RCNN
    # parse the provided configuration file, set tf settings, and benchmark
    # Change add_argument default for yaml file path
    conf_parser = argparse.ArgumentParser(
        description="Benchmark SD Mask RCNN model")
    conf_parser.add_argument("--config",
                             action="store",
                             default=yaml_file_path,
                             dest="conf_file",
                             type=str,
                             help="path to the configuration file")

    conf_args = conf_parser.parse_args(args=[])

    # read in config file information from proper section
    config = YamlConfig(conf_args.conf_file)

    inference_config = MaskConfig(config['model']['settings'])
    inference_config.GPU_COUNT = 1
    inference_config.IMAGES_PER_GPU = 1

    model_dir, _ = os.path.split(config['model']['path'])
    model = modellib.MaskRCNN(mode=config['model']['mode'],
                              config=inference_config,
                              model_dir=model_dir)

    # Load trained weights
    print("Loading weights from ", config['model']['path'])
    model.load_weights(config['model']['path'], by_name=True)
    graph = tf.get_default_graph()
    return model, graph
Beispiel #3
0
def benchmark(config):
    """Benchmarks a model, computes and stores model predictions and then
    evaluates them on COCO metrics and supplementary benchmarking script."""

    print("Benchmarking model.")

    # Create new directory for outputs
    output_dir = config['output_dir']
    utils.mkdir_if_missing(output_dir)

    # Save config in output directory
    config.save(os.path.join(output_dir, config['save_conf_name']))
    inference_config = MaskConfig(config['model']['settings'])
    inference_config.GPU_COUNT = 1
    inference_config.IMAGES_PER_GPU = 1
    
    model_dir, _ = os.path.split(config['model']['path'])
    model = modellib.MaskRCNN(mode=config['model']['mode'], config=inference_config,
                              model_dir=model_dir)

    # Load trained weights
    print("Loading weights from ", config['model']['path'])
    model.load_weights(config['model']['path'], by_name=True)

    # Create dataset
    test_dataset = ImageDataset(config['test']['path'], config['test']['images'], config['test']['masks'])
    test_dataset.load(config['test']['indices'])
    test_dataset.prepare()

    vis_dataset = ImageDataset(config['test']['path'], 'depth_ims', 'modal_segmasks')
    vis_dataset.load(config['test']['indices'])
    vis_dataset.prepare()

    ######## BENCHMARK JUST CREATES THE RUN DIRECTORY ########
    # code that actually produces outputs should be plug-and-play
    # depending on what kind of benchmark function we run.

    # If we want to remove bin pixels, pass in the directory with
    # those masks.
    if config['mask']['remove_bin_pixels']:
        bin_mask_dir = os.path.join(config['test']['path'], config['mask']['bin_masks'])
        overlap_thresh = config['mask']['overlap_thresh']
    else:
        bin_mask_dir = False
        overlap_thresh = 0

    # Create predictions and record where everything gets stored.
    pred_mask_dir, pred_info_dir, gt_mask_dir = \
        detect(config['output_dir'], inference_config, model, test_dataset, bin_mask_dir, overlap_thresh)

    ap, ar = coco_benchmark(pred_mask_dir, pred_info_dir, gt_mask_dir)
    if config['vis']['predictions']:
        visualize_predictions(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir, 
                              show_bbox=config['vis']['show_bbox_pred'], show_scores=config['vis']['show_scores_pred'], show_class=config['vis']['show_class_pred'])
    if config['vis']['ground_truth']:
        visualize_gts(config['output_dir'], vis_dataset, inference_config, show_scores=False, show_bbox=config['vis']['show_bbox_gt'], show_class=config['vis']['show_class_gt'])
    if config['vis']['s_bench']:
        s_benchmark(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir)

    print("Saved benchmarking output to {}.\n".format(config['output_dir']))
    return ap, ar