Пример #1
0
def main(DeepForest_config, model=None, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    #Add seperate dir
    #save time for logging
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_path = DeepForest_config["save_image_path"] + dirname
    os.makedirs(save_path)
    print("save path is {}".format(save_path))

    #Evaluation metrics
    site = DeepForest_config["evaluation_site"]

    #create the NEON mAP generator
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    #NEON plot mAP
    recall, precision = evaluate_pr(NEON_generator,
                                    model,
                                    iou_threshold=args.iou_threshold,
                                    score_threshold=args.score_threshold,
                                    max_detections=args.max_detections,
                                    save_path=save_path,
                                    experiment=None)

    return [recall, precision]
Пример #2
0
def main(DeepForest_config, model=None, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    #Add seperate dir
    #save time for logging
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S")
    if args.save_path:
        save_path = args.save_path + dirname

        # make save path if it doesn't exist
        if not os.path.exists(save_path):
            os.makedirs(save_path)
    else:
        save_path = None

    #Evaluation metrics
    site = DeepForest_config["evaluation_site"]

    #create the NEON mAP generator
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    #NEON plot mAP
    recall, precision = evaluate_pr(NEON_generator,
                                    model,
                                    iou_threshold=args.iou_threshold,
                                    score_threshold=args.score_threshold,
                                    max_detections=args.max_detections,
                                    save_path=save_path,
                                    experiment=None)

    return [recall, precision]
Пример #3
0
def test_callback(model, experiment):
    #create the NEON generator 
    NEON_generator = create_NEON_generator(DeepForest_config["batch_size"], DeepForest_config)
    
    average_precisions = evalmAP.evaluate(
        NEON_generator,
        model,
        iou_threshold=0.5,
        score_threshold=0.15,
        max_detections=300,
        save_path="../snapshots/",
        experiment=experiment
    )
Пример #4
0
def main(DeepForest_config, args=None, experiment=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    #Add seperate dir
    #save time for logging
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S")

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path +
                                                         dirname):
        os.makedirs(args.save_path + dirname)

    #Evaluation metrics
    site = DeepForest_config["evaluation_site"]

    #create the NEON mAP generator
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model,
                              backbone_name=args.backbone,
                              convert=args.convert_model,
                              nms_threshold=DeepForest_config["nms_threshold"])

    #print(model.summary())

    #NEON plot mAP
    average_precisions = evaluate(NEON_generator,
                                  model,
                                  iou_threshold=args.iou_threshold,
                                  score_threshold=args.score_threshold,
                                  max_detections=args.max_detections,
                                  save_path=args.save_path + dirname,
                                  experiment=experiment)

    return average_precisions
Пример #5
0
    "/orange/ewhite/b.weinstein/retinanet/20190713_230957/resnet50_40.h5",
    "NIWO":
    "/orange/ewhite/b.weinstein/retinanet/20190712_055958/resnet50_40.h5",
    "MLBS":
    "/orange/ewhite/b.weinstein/retinanet/20190712_035528/resnet50_40.h5",
    "All":
    "/orange/ewhite/b.weinstein/retinanet/20190715_123358/resnet50_40.h5"
}

for trained_model in trained_models:
    # load retinanet model
    model_path = trained_models[trained_model]
    model = models.load_model(model_path,
                              backbone_name='resnet50',
                              convert=True,
                              nms_threshold=DeepForest_config["nms_threshold"])

    #Make a new dir to hold images
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S") + trained_model
    save_image_path = os.path.join("..", "snapshots", dirname)
    os.mkdir(save_image_path)

    NEON_generator = generators.create_NEON_generator(
        DeepForest_config["batch_size"], DeepForest_config, name="evaluation")
    all_detections = _get_detections(
        NEON_generator,
        model,
        score_threshold=DeepForest_config["score_threshold"],
        max_detections=300,
        save_path=save_image_path,
        experiment=None)
Пример #6
0
def main(data, DeepForest_config, experiment, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    #Add seperate dir
    #save time for logging
    dirname=datetime.now().strftime("%Y%m%d_%H%M%S")
    experiment.log_parameter("Start Time", dirname)
    
    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path + dirname):
        os.makedirs(args.save_path + dirname)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model, input_channels=DeepForest_config["input_channels"], nms_threshold=DeepForest_config["nms_threshold"])

    #print(model.summary())

    ## create the testing generators
    #if DeepForest_config["evaluation_images"] > 0:
        #generator = create_generator(args, data, DeepForest_config)
        
        #average_precisions = evaluate(
            #generator,
            #model,
            #iou_threshold=args.iou_threshold,
            #score_threshold=args.score_threshold,
            #max_detections=args.max_detections,
            #save_path=args.save_path + dirname
        #)
    
        ### print evaluation
        #present_classes = 0
        #precision = 0
        #for label, (average_precision, num_annotations) in average_precisions.items():
            #print('{:.0f} instances of class'.format(num_annotations),
                  #generator.label_to_name(label), 'with average precision: {:.3f}'.format(average_precision))
            #if num_annotations > 0:
                #present_classes += 1
                #precision       += average_precision
        #print('mAP: {:.3f}'.format(precision / present_classes))
        #experiment.log_metric("mAP", precision / present_classes)                 

    ##Evaluation metrics
    #sites = DeepForest_config["evaluation_site"]
    
    ##create the NEON mAP generator 
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)
    #NEON_recall_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    #recall = neonRecall(
        #sites,
        #NEON_recall_generator,
        #model,            
        #score_threshold=args.score_threshold,
        #save_path=args.save_path + dirname,
        #max_detections=args.max_detections
    #)
    
    #print("Recall is {:0.3f}".format(recall))
    
    #experiment.log_metric("Recall", recall)               
        
    #NEON plot mAP
    average_precisions = evaluate(
        NEON_generator,
        model,
        iou_threshold=args.iou_threshold,
        score_threshold=args.score_threshold,
        max_detections=args.max_detections,
        save_path=args.save_path + dirname,
        experiment=experiment
    )

    # print evaluation
    ## print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision, num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label), 'with average precision: {:.3f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision       += average_precision
    NEON_map = round(precision / present_classes,3)
    print('Neon mAP: {:.3f}'.format(NEON_map))
    experiment.log_metric("Neon mAP", NEON_map)       
    
    return [recall, NEON_map]
Пример #7
0
def create_callbacks(model, training_model, prediction_model, train_generator, validation_generator, args, experiment, DeepForest_config):
    """ Creates the callbacks to use during training.

    Args
        model: The base model.
        training_model: The model that is used for training.
        prediction_model: The model that should be used for validation.
        validation_generator: The generator for creating validation data.
        args: parseargs args object.

    Returns:
        A list of callbacks used for training.
    """
    callbacks = []

    if args.evaluation and validation_generator:
        
        evaluation = Evaluate(validation_generator, 
                              experiment=experiment,
                              save_path=args.save_path,
                              score_threshold=args.score_threshold,
                              DeepForest_config=DeepForest_config,
                              )
        
        evaluation = RedirectModel(evaluation, prediction_model)
        callbacks.append(evaluation)

    # save the model
    if args.snapshots:
        # ensure directory created first; otherwise h5py will error after epoch.
        makedirs(args.snapshot_path)
        checkpoint = keras.callbacks.ModelCheckpoint(
            os.path.join(
                args.snapshot_path,
                '{backbone}_{{epoch:02d}}.h5'.format(backbone=args.backbone)
            ),
            verbose=1
            #,
            #save_best_only=True,
            #monitor="mAP",
            #mode='max'
        )
        checkpoint = RedirectModel(checkpoint, model)
        callbacks.append(checkpoint)

    callbacks.append(keras.callbacks.ReduceLROnPlateau(
        monitor  = 'loss',
        factor   = 0.1,
        patience = 2,
        verbose  = 1,
        mode     = 'auto',
        epsilon  = 0.0001,
        cooldown = 0,
        min_lr   = 0
    ))
   
    #Neon Callbacks
    NEON_recall_generator = create_NEON_generator(args.batch_size, DeepForest_config)
    recall = recallCallback(
        generator=NEON_recall_generator,
        save_path=args.save_path,
        score_threshold=args.score_threshold,
        experiment=experiment,
        sites=DeepForest_config["evaluation_site"]    )
    
    recall = RedirectModel(recall, prediction_model)
    callbacks.append(recall)
    
    #create the NEON generator 
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)
    
    neon_evaluation = NEONmAP(NEON_generator, 
                              experiment=experiment,
                              save_path=args.save_path,
                              score_threshold=args.score_threshold,
                              DeepForest_config=DeepForest_config)

    neon_evaluation = RedirectModel(neon_evaluation, prediction_model)
    callbacks.append(neon_evaluation)  
        
    return callbacks
Пример #8
0
from DeepForest import evalmAP
from DeepForest.utils.generators import create_NEON_generator

parser = argparse.ArgumentParser(description='Prediction of a new image')
parser.add_argument('--model', help='path to training model')
parser.add_argument('--batch_size',
                    help='batch size for prediction',
                    default=1)

args = parser.parse_args()

#Config files
DeepForest_config = load_config(dir="..")

#Load hand annotations
neon_generator = create_NEON_generator(args.batch_size, DeepForest_config)

#Get detections and annotations
model = models.load_model(args.model,
                          backbone_name='resnet50',
                          convert=True,
                          nms_threshold=DeepForest_config["nms_threshold"])
labels_to_names = {0: 'Tree'}
all_detections = evalmAP._get_detections(
    neon_generator,
    model,
    score_threshold=DeepForest_config["score_threshold"],
    save_path="/Users/Ben/Downloads/")
all_annotations = evalmAP._get_annotations(neon_generator)

#Loop through images and match boxes.