Beispiel #1
0
def test_callback(model, experiment):
    #create the NEON generator 
    NEON_generator = create_NEON_generator(DeepForest_config["batch_size"], DeepForest_config)
    
    average_precisions = evalmAP.evaluate(
        NEON_generator,
        model,
        iou_threshold=0.5,
        score_threshold=0.15,
        max_detections=300,
        save_path="../snapshots/",
        experiment=experiment
    )
Beispiel #2
0
def main(DeepForest_config, args=None, experiment=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    #Add seperate dir
    #save time for logging
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S")

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path +
                                                         dirname):
        os.makedirs(args.save_path + dirname)

    #Evaluation metrics
    site = DeepForest_config["evaluation_site"]

    #create the NEON mAP generator
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model,
                              backbone_name=args.backbone,
                              convert=args.convert_model,
                              nms_threshold=DeepForest_config["nms_threshold"])

    #print(model.summary())

    #NEON plot mAP
    average_precisions = evaluate(NEON_generator,
                                  model,
                                  iou_threshold=args.iou_threshold,
                                  score_threshold=args.score_threshold,
                                  max_detections=args.max_detections,
                                  save_path=args.save_path + dirname,
                                  experiment=experiment)

    return average_precisions
Beispiel #3
0
def main(data, DeepForest_config, experiment, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    #Add seperate dir
    #save time for logging
    dirname=datetime.now().strftime("%Y%m%d_%H%M%S")
    experiment.log_parameter("Start Time", dirname)
    
    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path + dirname):
        os.makedirs(args.save_path + dirname)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model, input_channels=DeepForest_config["input_channels"], nms_threshold=DeepForest_config["nms_threshold"])

    #print(model.summary())

    ## create the testing generators
    #if DeepForest_config["evaluation_images"] > 0:
        #generator = create_generator(args, data, DeepForest_config)
        
        #average_precisions = evaluate(
            #generator,
            #model,
            #iou_threshold=args.iou_threshold,
            #score_threshold=args.score_threshold,
            #max_detections=args.max_detections,
            #save_path=args.save_path + dirname
        #)
    
        ### print evaluation
        #present_classes = 0
        #precision = 0
        #for label, (average_precision, num_annotations) in average_precisions.items():
            #print('{:.0f} instances of class'.format(num_annotations),
                  #generator.label_to_name(label), 'with average precision: {:.3f}'.format(average_precision))
            #if num_annotations > 0:
                #present_classes += 1
                #precision       += average_precision
        #print('mAP: {:.3f}'.format(precision / present_classes))
        #experiment.log_metric("mAP", precision / present_classes)                 

    ##Evaluation metrics
    #sites = DeepForest_config["evaluation_site"]
    
    ##create the NEON mAP generator 
    NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)
    #NEON_recall_generator = create_NEON_generator(args.batch_size, DeepForest_config)

    #recall = neonRecall(
        #sites,
        #NEON_recall_generator,
        #model,            
        #score_threshold=args.score_threshold,
        #save_path=args.save_path + dirname,
        #max_detections=args.max_detections
    #)
    
    #print("Recall is {:0.3f}".format(recall))
    
    #experiment.log_metric("Recall", recall)               
        
    #NEON plot mAP
    average_precisions = evaluate(
        NEON_generator,
        model,
        iou_threshold=args.iou_threshold,
        score_threshold=args.score_threshold,
        max_detections=args.max_detections,
        save_path=args.save_path + dirname,
        experiment=experiment
    )

    # print evaluation
    ## print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision, num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label), 'with average precision: {:.3f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision       += average_precision
    NEON_map = round(precision / present_classes,3)
    print('Neon mAP: {:.3f}'.format(NEON_map))
    experiment.log_metric("Neon mAP", NEON_map)       
    
    return [recall, NEON_map]