Exemplo n.º 1
0
def benchmark(config):
    """Benchmarks a model, computes and stores model predictions and then
    evaluates them on COCO metrics and supplementary benchmarking script."""

    print("Benchmarking model.")

    # Create new directory for outputs
    output_dir = config['output_dir']
    utils.mkdir_if_missing(output_dir)

    # Save config in output directory
    config.save(os.path.join(output_dir, config['save_conf_name']))
    image_shape = config['model']['settings']['image_shape']
    config['model']['settings']['image_min_dim'] = min(image_shape)
    config['model']['settings']['image_max_dim'] = max(image_shape)
    config['model']['settings']['gpu_count'] = 1
    config['model']['settings']['images_per_gpu'] = 1
    inference_config = MaskConfig(config['model']['settings'])
    
    model_dir, _ = os.path.split(config['model']['path'])
    model = modellib.MaskRCNN(mode=config['model']['mode'], config=inference_config,
                              model_dir=model_dir)

    # Load trained weights
    print("Loading weights from ", config['model']['path'])
    model.load_weights(config['model']['path'], by_name=True)

    # Create dataset
    test_dataset = ImageDataset(config)
    test_dataset.load(config['dataset']['indices'])
    test_dataset.prepare()

    vis_config = copy(config)
    vis_config['dataset']['images'] = 'depth_ims'
    vis_config['dataset']['masks'] = 'modal_segmasks'
    vis_dataset = ImageDataset(config)
    vis_dataset.load(config['dataset']['indices'])
    vis_dataset.prepare()

    ######## BENCHMARK JUST CREATES THE RUN DIRECTORY ########
    # code that actually produces outputs should be plug-and-play
    # depending on what kind of benchmark function we run.

    # If we want to remove bin pixels, pass in the directory with
    # those masks.
    if config['mask']['remove_bin_pixels']:
        bin_mask_dir = os.path.join(config['dataset']['path'], config['mask']['bin_masks'])
        overlap_thresh = config['mask']['overlap_thresh']
    else:
        bin_mask_dir = False
        overlap_thresh = 0

    # Create predictions and record where everything gets stored.
    pred_mask_dir, pred_info_dir, gt_mask_dir = \
        detect(config['output_dir'], inference_config, model, test_dataset, bin_mask_dir, overlap_thresh)

    ap, ar = coco_benchmark(pred_mask_dir, pred_info_dir, gt_mask_dir)
    if config['vis']['predictions']:
        visualize_predictions(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir, 
                              show_bbox=config['vis']['show_bbox_pred'], show_scores=config['vis']['show_scores_pred'], show_class=config['vis']['show_class_pred'])
    if config['vis']['ground_truth']:
        visualize_gts(config['output_dir'], vis_dataset, inference_config, show_scores=False, show_bbox=config['vis']['show_bbox_gt'], show_class=config['vis']['show_class_gt'])
    if config['vis']['s_bench']:
        s_benchmark(config['output_dir'], vis_dataset, inference_config, pred_mask_dir, pred_info_dir)

    print("Saved benchmarking output to {}.\n".format(config['output_dir']))
    return ap, ar
Exemplo n.º 2
0
def train(config):

    # Training dataset
    dataset_train = ImageDataset(config)
    dataset_train.load(config['dataset']['train_indices'], augment=True)
    dataset_train.prepare()

    # Validation dataset
    dataset_val = ImageDataset(config)
    dataset_val.load(config['dataset']['val_indices'])
    dataset_val.prepare()

    # Load config
    image_shape = config['model']['settings']['image_shape']
    config['model']['settings']['image_min_dim'] = min(image_shape)
    config['model']['settings']['image_max_dim'] = max(image_shape)
    train_config = MaskConfig(config['model']['settings'])
    train_config.STEPS_PER_EPOCH = dataset_train.indices.size/(train_config.IMAGES_PER_GPU*train_config.GPU_COUNT)
    train_config.display()

    # Create directory if it doesn't currently exist
    utils.mkdir_if_missing(config['model']['path'])

    # Create the model.
    model = modellib.MaskRCNN(mode='training', config=train_config,
                              model_dir=config['model']['path'])


    # Select weights file to load
    if config['model']['weights'].lower() == "coco":
        weights_path = os.path.join(config['model']['path'], 'mask_rcnn_coco.h5')
        # Download weights file
        if not os.path.exists(weights_path):
            utilslib.download_trained_weights(weights_path)
    elif config['model']['weights'].lower() == "last":
        # Find last trained weights
        weights_path = model.find_last()
    elif config['model']['weights'].lower() == "imagenet":
        # Start from ImageNet trained weights
        weights_path = model.get_imagenet_weights()
    else:
        weights_path = config['model']['weights']

    # Load weights
    exclude_layers = []
    print("Loading weights ", weights_path)
    if config['model']['weights'].lower() == "coco":
        # Exclude the last layers because they require a matching
        # number of classes
        if config['model']['settings']['image_channel_count'] == 1:
            exclude_layers = ['conv1']
        exclude_layers += ["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]
        model.load_weights(weights_path, by_name=True, exclude=exclude_layers)
    elif config['model']['weights'].lower() == "imagenet":
        if config['model']['settings']['image_channel_count'] == 1:
            exclude_layers = ['conv1']
        model.load_weights(weights_path, by_name=True, exclude=exclude_layers)
    elif config['model']['weights'].lower() != "new":
        model.load_weights(weights_path, by_name=True)

    # save config in run folder
    config.save(os.path.join(config['model']['path'], config['save_conf_name']))

    # train and save weights to model_path
    model.train(dataset_train, dataset_val, learning_rate=train_config.LEARNING_RATE,
                epochs=config['model']['epochs'], layers='all')

    # save in the models folder
    current_datetime = time.strftime("%Y%m%d-%H%M%S")
    model_path = os.path.join(config['model']['path'], "mask_rcnn_{}_{}.h5".format(train_config.NAME, current_datetime))
    model.keras_model.save_weights(model_path)
Exemplo n.º 3
0
def train(config):

    # Training dataset
    dataset_train = ImageDataset(config)
    dataset_train.load(config["dataset"]["train_indices"], augment=True)
    dataset_train.prepare()

    # Validation dataset
    dataset_val = ImageDataset(config)
    dataset_val.load(config["dataset"]["val_indices"])
    dataset_val.prepare()

    # Load config
    config["model"]["settings"][
        "steps_per_epoch"] = dataset_train.num_images / (
            config["model"]["settings"]["images_per_gpu"] *
            config["model"]["settings"]["gpu_count"])

    # Create the model.
    model = SDMaskRCNNModel("training", config["model"])

    # save config in run folder
    config.save(os.path.join(config["model"]["path"],
                             config["save_conf_name"]))

    # train and save weights to model_path
    model.train(dataset_train, dataset_val)