示例#1
0
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()
        print('Creating model')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config)

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)

    # create the callbacks
    callbacks = create_callbacks(model, training_model, prediction_model, args)

    # start training
    return training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_generator.size() // args.batch_size,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks)
    def train(self,
              annotation_path,
              class_path,
              snapshot_src=None,
              tensorboard_dir=None,
              snapshot_dir=None,
              snapshot_tag=None,
              snapshot_freq=10):
        """ Train routine.
            Params:
                annotation_path: file path for target annotations
                class_path: file path for class-id mapping
                snapshot_src: file path of snapshot to continue training
                tensorboard_dir: directory for saving tensorboard
                snapshot_dir: directory for saving snapshots
                shapshot_tag: tag used as filename identifier for snapshots
                shapshot_freq: epoch interval for saving snapshots
        """
        if self._epoch % snapshot_freq != 0:
            print(
                "Error: Snapshot saving interval should be set to factor of total epochns ({})."
                .format(self._epoch))
            return

        # Load network backbone
        generator = self.__create_generator(annotation_path, class_path)

        if snapshot_src:
            model = models.load_model(snapshot_src,
                                      backbone_name=self._backbone_name)
            training_model = model
            prediction_model = retinanet_bbox(
                model=model, anchor_params=self._anchor_params)
        else:
            model, training_model, prediction_model = self.__create_models(
                self._backbone.retinanet, generator.num_classes())

        print(model.summary())

        if 'vgg' in self._backbone_name or 'densenet' in self._backbone_name:
            generator.compute_shapes = make_shapes_callback(model)

        callbacks = self.__create_callbacks(model, training_model,
                                            prediction_model, tensorboard_dir,
                                            snapshot_dir, snapshot_tag,
                                            snapshot_freq)
        training_model.fit_generator(generator=generator,
                                     steps_per_epoch=self._step,
                                     epochs=self._epoch,
                                     verbose=1,
                                     callbacks=callbacks)
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    generator = create_generators(args)

    # create the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.snapshot, backbone_name=args.backbone)
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)
    prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)

    # print model summary
    prediction_model.summary()

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        generator.compute_shapes = make_shapes_callback(model)

    # inference
    boxes, scores, labels = prediction_model.predict_generator(
        generator=generator,
        verbose=1,
    )

    # generate COCO annotations
    create_coco_annotations(boxes, scores, labels, generator.image_ids)
示例#4
0
    prediction_model = retinanet_bbox(model=model)

    # compile model
    training_model.compile(loss={
        'regression': losses.smooth_l1(),
        'classification': losses.focal()
    },
                           optimizer=keras.optimizers.adam(lr=1e-5,
                                                           clipnorm=0.001))

# print model summary
print(model.summary())

# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in backbone or 'densenet' in backbone:
    train_generator.compute_shapes = make_shapes_callback(model)
    if validation_generator:
        validation_generator.compute_shapes = train_generator.compute_shapes

callbacks = []

tensorboard_callback = None

if tensorboard_dir:
    tensorboard_callback = keras.callbacks.TensorBoard(
        log_dir=tensorboard_dir,
        histogram_freq=0,
        batch_size=batch_size,
        write_graph=True,
        write_grads=False,
        write_images=False,
def main(args=None):
    global config
    from keras import backend as K

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    print('Arguments: {}'.format(args))

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if 'anchor_parameters' in config:
            anchor_params = parse_anchor_parameters(config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            args=args,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            config=config)

    # print model summary
    print(model.summary())

    print('Learning rate: {}'.format(K.get_value(model.optimizer.lr)))
    if args.lr > 0.0:
        K.set_value(model.optimizer.lr, args.lr)
        print('Updated learning rate: {}'.format(
            K.get_value(model.optimizer.lr)))

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    init_epoch = 0
    try:
        if args.snapshot:
            init_epoch = int(args.snapshot.split("_")[-2])
    except:
        pass
    # init_epoch = 6
    print('Init epoch: {}'.format(init_epoch))

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
        initial_epoch=init_epoch,
    )
示例#6
0
def main(args=None, data=None, DeepForest_config=None, experiment=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, data, DeepForest_config=DeepForest_config)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a secondkeras-retinanet.\n')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(
            model=model, nms_threshold=DeepForest_config["nms_threshold"])
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a secondkeras-retinanet .')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            nms_threshold=DeepForest_config["nms_threshold"])

    # print model summary
    #print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(
            anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    matched = []
    for entry in validation_generator.image_data.values():
        test = entry in train_generator.image_data.values()
        matched.append(test)
    if sum(matched) > 0:
        raise Exception(
            "%.2f percent of validation windows are in training data" %
            (100 * sum(matched) / train_generator.size()))
    else:
        print("Test passed: No overlapping data in training and validation")

    #start training
    cp.enable()
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=train_generator.size() /
                                 DeepForest_config["batch_size"],
                                 epochs=args.epochs,
                                 verbose=1,
                                 shuffle=False,
                                 callbacks=None,
                                 workers=DeepForest_config["workers"],
                                 use_multiprocessing=True)

    cp.disable()
示例#7
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone
        )

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    history=training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
    timestr=time.strftime("%Y-%m-%d-%H%M")
    
    history_path=os.path.join(
                args.snapshot_path,
                '{timestr}_{backbone}.csv'.format(timestr=timestr,backbone=args.backbone, dataset_type=args.dataset_type)
            )
    pd.DataFrame(history.history).to_csv(history_path)
def main(forest_object,
         args=None,
         input_type="fit_generator",
         list_of_tfrecords=None,
         comet_experiment=None):
    """
    Main Training Loop
    Args:
        forest_object: a deepforest class object
        args: Keras retinanet argparse
        list_of_tfrecords: list of tfrecords to parse
        input_type: "fit_generator" or "tfrecord" input type
    """
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        setup_gpu(args.gpu)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # data input
    if input_type == "fit_generator":
        # create the generators
        train_generator, validation_generator = create_generators(
            args, backbone.preprocess_image)

        # placeholder target tensor for creating models
        targets = None

    elif input_type == "tfrecord":
        # Create tensorflow iterators
        iterator = tfrecords.create_dataset(list_of_tfrecords, args.batch_size)
        next_element = iterator.get_next()

        # Split into inputs and targets
        inputs = next_element[0]
        targets = [next_element[1], next_element[2]]

        validation_generator = None

    else:
        raise ValueError(
            "{} input type is invalid. Only 'tfrecord' or 'for_generator' "
            "input types are accepted for model training".format(input_type))

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        if input_type == "fit_generator":
            num_of_classes = train_generator.num_classes()
        else:
            # Add background class
            num_of_classes = len(forest_object.labels.keys())

        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=num_of_classes,
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config,
            targets=targets,
            freeze_layers=args.freeze_layers)

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(model, training_model, prediction_model,
                                 validation_generator, args, comet_experiment)

    if not args.compute_val_loss:
        validation_generator = None

    # start training
    if input_type == "fit_generator":
        history = training_model.fit_generator(
            generator=train_generator,
            steps_per_epoch=args.steps,
            epochs=args.epochs,
            verbose=1,
            callbacks=callbacks,
            workers=args.workers,
            use_multiprocessing=args.multiprocessing,
            max_queue_size=args.max_queue_size,
            validation_data=validation_generator)
    elif input_type == "tfrecord":

        # Fit model
        history = training_model.fit(x=inputs,
                                     steps_per_epoch=args.steps,
                                     epochs=args.epochs,
                                     callbacks=callbacks)
    else:
        raise ValueError(
            "{} input type is invalid. Only 'tfrecord' or 'for_generator' "
            "input types are accepted for model training".format(input_type))

    # Assign history to deepforest model class
    forest_object.history = history

    # return trained model
    return model, prediction_model, training_model
示例#9
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # Export configs
    configs = {
        'backbone': args.backbone,
        'min_size': args.image_min_side,
        'max_size': args.image_max_side
    }

    conf_path = os.path.join(args.export_path, 'configs.json')
    with open(conf_path, 'wt') as f:
        json.dump(configs, f)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config)

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # Use multiprocessing if workers > 0
    if args.workers > 0:
        use_multiprocessing = True
    else:
        use_multiprocessing = False

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 workers=args.workers,
                                 use_multiprocessing=use_multiprocessing,
                                 max_queue_size=args.max_queue_size)
示例#10
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras and tensorflow are the minimum required version
    check_keras_version()
    check_tf_version()

    # optionally choose specific GPU
    if args.gpu is not None:
        setup_gpu(args.gpu)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator, validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        anchor_params    = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config,
            regularisation=args.regularisation
        )

    # print model summary
    #print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    if not args.compute_val_loss:
        validation_generator = None

    # start training
    return training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
        workers=args.workers,
        use_multiprocessing=args.multiprocessing,
        max_queue_size=args.max_queue_size,
        validation_data=validation_generator,
        initial_epoch=args.initial_epoch
    )
示例#11
0
def main(args=None):
    log_util.config(__file__)
    logger = logging.getLogger(__name__)
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(args)

    if 'resnet' in args.backbone:
        from keras_retinanet.models.resnet import resnet_retinanet as retinanet, custom_objects, download_imagenet
    elif 'mobilenet' in args.backbone:
        from keras_retinanet.models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet
    elif 'vgg' in args.backbone:
        from keras_retinanet.models.vgg import vgg_retinanet as retinanet, custom_objects, download_imagenet
    elif 'densenet' in args.backbone:
        from keras_retinanet.models.densenet import densenet_retinanet as retinanet, custom_objects, download_imagenet
    else:
        raise NotImplementedError('Backbone \'{}\' not implemented.'.format(
            args.backbone))

    # create the model
    if args.snapshot is not None:
        logger.info('Loading model, this may take a second...')
        model = keras.models.load_model(args.snapshot,
                                        custom_objects=custom_objects)
        training_model = model
        prediction_model = model
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = download_imagenet(args.backbone)

        logger.info('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=retinanet,
            backbone=args.backbone,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone)

    logger.info(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(
            anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 workers=4)
示例#12
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    if args.anchors:
        anchors_dict = get_anchors_params(args.anchors)
    elif args.snapshot:
        #search the anchors parameters configure beside models
        anchors_path = os.path.join(args.snapshot,"anchors.yaml")
        anchors_path = anchors_path if os.path.exists(anchors_path) else None
        anchors_dict = get_anchors_params(anchors_path)
    else:
        #default anchors params
        anchors_dict = get_anchors_params(None)
    anchors_params = AnchorParameters(**anchors_dict)

    if args.snapshots and args.anchors:
    #save anchors configure beside models
        makedirs(args.snapshot_path)
        anchors_out = os.path.join(
            args.snapshot_path,
            "anchors.yaml"
        )
        save_anchors_params(anchors_dict,anchors_out)
 
    # create the generators
    train_generator, validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        prediction_model = retinanet_bbox(model=model,anchor_parameters = anchors_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()
        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            anchors_path=args.anchors
        )

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
示例#13
0
def main(args=None, data=None, DeepForest_config=None, experiment=None):
    # parse arguments
    print("parsing arguments")
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
         
    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    print("Get keras version")
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
        
    print("Get session")
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    print("Creating generators")
    train_generator, validation_generator = create_generators(args, data, DeepForest_config=DeepForest_config)
    
    #Log number of trees trained on
    if experiment:
        experiment.log_parameter("Number of Training Trees", train_generator.total_trees)    
       
    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a secondkeras-retinanet.\n')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        prediction_model = retinanet_bbox(model=model, nms_threshold=DeepForest_config["nms_threshold"])
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            
            print("Loading imagenet weights")
            weights = backbone.download_imagenet()

        print('Creating model, this may take a secondkeras-retinanet .')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            nms_threshold=DeepForest_config["nms_threshold"],
            input_channels=DeepForest_config["input_channels"]
        )

    # print model summary
    #print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        train_generator,        
        validation_generator,
        args,
        experiment,
        DeepForest_config
    )
    
    #Make sure no overlapping data
    if validation_generator:
        matched=[]
        for entry in validation_generator.image_data.values():
            test = entry in train_generator.image_data.values() 
            matched.append(test)
        if sum(matched) > 0:
            raise Exception("%.2f percent of validation windows are in training data" % (100 * sum(matched)/train_generator.size()))
        else:
            print("Test passed: No overlapping data in training and validation")
    
    #start training
    history = training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_generator.size()/DeepForest_config["batch_size"],
        epochs=args.epochs,
        verbose=2,
        shuffle=False,
        callbacks=callbacks,
        workers=DeepForest_config["workers"],
        use_multiprocessing=DeepForest_config["use_multiprocessing"],
        max_queue_size=DeepForest_config["max_queue_size"]
    )
    
    #return path snapshot of final epoch
    saved_models = glob.glob(os.path.join(args.snapshot_path,"*.h5"))
    saved_models.sort()
    
    #Return model if found
    if len(saved_models) > 0:
        return saved_models[-1]
示例#14
0
def main(args=None, config=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(args, config)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a secondkeras-retinanet .')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a secondkeras-retinanet .')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone)

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(
            anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
示例#15
0
def main(args=None, model_filename=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = parse_args(args)

    # make sure keras and tensorflow are the minimum required version
    check_keras_version()
    check_tf_version()

    # optionally choose specific GPU
    if args.gpu:
        setup_gpu(args.gpu)

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    backbone = models.backbone(args.backbone)
    generator = create_generator(args, backbone.preprocess_image)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    if args.continual_learning_model == 'dual_memory':  # Continual learning dual-memory modelling treatment
        base_models = LoadModels(args.historical_snapshots_folder,
                                 args.backbone, args.day_number)
        all_models = []
        for model in base_models:
            generator.compute_shapes = make_shapes_callback(model)
            if args.convert_model:
                model = models.convert_model(model,
                                             anchor_params=anchor_params)
            all_models.append(model)

        (average_precisions, inference_time, detections_per_model,
         final_detections) = evaluate_dual_memory_model(
             generator,
             all_models,
             iou_threshold=args.iou_threshold,
             score_threshold=args.score_threshold,
             max_detections=args.max_detections,
             save_path=args.save_path)

        # bbox_savepath given, save bounding box coordinates from dual-memory model predictions:

        if args.bbox_savepath:
            detections_per_model = [[
                [class_predictions.tolist() for class_predictions in image]
                for image in model_predictions
            ] for model_predictions in detections_per_model]
            detections_with_filenames = {
                'final_detections': final_detections,
                'annotations': args.annotations,
                'detections_per_model': detections_per_model
            }
            with open(args.bbox_savepath, 'wt') as outf:
                json.dump(detections_with_filenames, outf)

            print("Finished dual memory model")
            print(average_precisions, inference_time)

    else:
        if model_filename is None:
            model_filename = args.model
        model = models.load_model(model_filename, backbone_name=args.backbone)

        generator.compute_shapes = make_shapes_callback(model)

        # optionally convert the model
        if args.convert_model:
            model = models.convert_model(model, anchor_params=anchor_params)

        # print model summary
        # print(model.summary())

        # start evaluation
        if args.dataset_type == 'coco':
            from ..utils.coco_eval import evaluate_coco
            evaluate_coco(generator, model, args.score_threshold)
        else:
            average_precisions, inference_time = evaluate(
                generator,
                model,
                iou_threshold=args.iou_threshold,
                score_threshold=args.score_threshold,
                max_detections=args.max_detections,
                save_path=args.save_path)

    # print evaluation
    total_instances = []
    precisions = []
    #labels = []
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        #labels.append(label)
        total_instances.append(num_annotations)
        precisions.append(average_precision)

    if sum(total_instances) == 0:
        print('No test instances found.')
        return

    print('Inference time for {:.0f} images: {:.4f}'.format(
        generator.size(), inference_time))

    print('mAP using the weighted average of precisions among classes: {:.4f}'.
          format(
              sum([a * b for a, b in zip(total_instances, precisions)]) /
              sum(total_instances)))
    print('mAP: {:.4f}'.format(
        sum(precisions) / sum(x > 0 for x in total_instances)))

    #print(labels)
    print(precisions)
    print(total_instances)

    # Save mAP and other accuracy statistics to mAP_savepath:

    mAP = sum(precisions) / sum(x > 0 for x in total_instances)
    date = datetime.now().strftime("%Y%m%d%H%M")
    with open(args.mAP_savepath, 'a') as outf:
        outf.write(
            f"{date}, {mAP}, {precisions}, {total_instances}, {model_filename}, {args.continual_learning_model}"
            + "\n")
    return mAP
示例#16
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = get_backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    if args.dataset_type == 'run':
        model = load_model(args.model_path,
                           backbone_name=args.backbone,
                           convert=True)
        labels_to_names = {0: 'cell'}
        makedirs(args.save_path)
        test_imlist = next(os.walk(args.run_path))[2]
        for testimgcnt, img_path in enumerate(test_imlist):
            image = get_image(img_path)
            draw2 = get_image(img_path)
            draw2 = draw2 / np.max(draw2)
            # copy to draw on

            # preprocess image for network
            image = preprocess_image(image)
            image, scale = resize_image(image)
            print(scale)
            # process image
            start = time.time()
            boxes, scores, labels = model.predict_on_batch(
                np.expand_dims(image, axis=0))
            print('processing time: ', time.time() - start)

            # correct for image scale
            boxes /= scale

            # visualize detections
            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                # scores are sorted so we can break
                if score < 0.5:
                    break

                color = label_color(label)
                color = [255, 0, 255]
                b = box.astype(int)
                draw_box(draw2, b, color=color)

                caption = '{} {:.3f}'.format(labels_to_names[label], score)
                draw_caption(draw2, b, caption)
            plt.imsave(
                os.path.join(args.save_path,
                             'retinanet_output_' + str(testimgcnt)), draw2)

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone)

    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if any(x in args.backbone for x in ['vgg', 'densenet', 'deepcell']):
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )