Пример #1
0
def get_RetinaNet_model():
    from keras.utils import custom_object_scope
    from keras_resnet.layers import BatchNormalization
    from keras_retinanet.layers import UpsampleLike, Anchors, RegressBoxes, ClipBoxes, FilterDetections
    from keras_retinanet.initializers import PriorProbability
    from keras_retinanet import models
    from keras_retinanet.models.retinanet import retinanet_bbox

    custom_objects = {
        'BatchNormalization': BatchNormalization,
        'UpsampleLike': UpsampleLike,
        'Anchors': Anchors,
        'RegressBoxes': RegressBoxes,
        'PriorProbability': PriorProbability,
        'ClipBoxes': ClipBoxes,
        'FilterDetections': FilterDetections,
    }

    with custom_object_scope(custom_objects):
        backbone = models.backbone('resnet50')
        model = backbone.retinanet(500)
        prediction_model = retinanet_bbox(model=model)
        # prediction_model.load_weights("...your weights here...")

    return prediction_model, custom_objects
Пример #2
0
def test_lengths(config):
    """Assert that a csv generator and tfrecords create
    the same number of images in a epoch"""

    created_records = tfrecords.create_tfrecords(
        annotations_file="tests/output/testfile_tfrecords.csv",
        class_file="tests/output/classes.csv",
        image_min_side=config["image-min-side"],
        backbone_model=config["backbone"],
        size=100,
        savedir="tests/output/")

    # tfdata
    tf_filenames = find_tf_filenames(path="tests/output/*.tfrecord")

    # keras generator
    backbone = models.backbone(config["backbone"])
    generator = csv_generator.CSVGenerator(
        csv_data_file="tests/output/testfile_tfrecords.csv",
        csv_class_file="tests/output/classes.csv",
        image_min_side=config["image-min-side"],
        preprocess_image=backbone.preprocess_image,
    )

    fit_genertor_length = generator.size()
    assert len(tf_filenames) == fit_genertor_length
Пример #3
0
def main(args=None):
    from keras import backend as K

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    print('Arguments: {}'.format(args))

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    weights = args.weights
    # default to imagenet if nothing else is specified
    if weights is None and args.imagenet_weights:
        weights = backbone.download_imagenet()

    print('Creating model, this may take a second...')
    model, prediction_model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=validation_generator.num_classes(),
        weights=weights,
        multi_gpu=args.multi_gpu,
        freeze_backbone=args.freeze_backbone)

    # evaluate model
    print('Evaluating model from {}, this may take a while...'.format(
        args.weights))
    average_precisions = evaluate(
        validation_generator,
        prediction_model,
        iou_threshold=0.5,
        score_threshold=0.05,
        max_detections=300,
    )

    # print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              validation_generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision += average_precision
    print('mAP: {:.4f}'.format(precision / present_classes))
Пример #4
0
    def use_release(self, gpus=1):
        '''Use the latest DeepForest model release from github and load model. Optionally download if release doesn't exist
        
        Returns:
            model (object): A trained keras model
            gpus: number of gpus to parallelize, default to 1
        '''
        #Download latest model from github release
        release_tag, self.weights = utilities.use_release()

        #load saved model and tag release
        self.__release_version__ = release_tag
        print("Loading pre-built model: {}".format(release_tag))

        if gpus == 1:
            with warnings.catch_warnings():
                #Suppress compilte warning, not relevant here
                warnings.filterwarnings("ignore", category=UserWarning)
                self.model = utilities.read_model(self.weights, self.config)

            #Convert model
            self.prediction_model = convert_model(self.model)
        elif gpus > 1:
            backbone = models.backbone(self.config["backbone"])
            n_classes = len(self.labels.keys())
            self.model, self.training_model, self.prediction_model = create_models(
                backbone.retinanet,
                num_classes=n_classes,
                weights=self.weights,
                multi_gpu=gpus)

        #add to config
        self.config["weights"] = self.weights
Пример #5
0
    def __init__(self,
                 backbone="resnet50",
                 random_transform=False,
                 batch_size=2,
                 step=100,
                 epoch=40,
                 anchor_settings=None):
        """ Initializer
            Args:
                backbone
                    string indicate which backbone network to use (only for ones defined in
                    keras-retinanet)
                random_transform
                    boolean indicate whether image is randomly transformed when training
                batch_size
                    number of images sent for each step during training
                step
                    number of steps(batches) to run for each epoch during training
                epoch
                    number of epochs for this training
                anchor_settings
                    a list of lists indicate anchor box parameters. please reference
                    keras-retinanet's document for how to set up anchor box parameters
        """
        check_keras_version()
        self._backbone_name = backbone
        self._backbone = models.backbone(backbone)
        self._img_preprocessor = self._backbone.preprocess_image
        self._batch_size = batch_size
        self._step = step
        self._epoch = epoch

        if random_transform:
            self._transform_generator = random_transform_generator(
                min_rotation=-0.1,
                max_rotation=0.1,
                min_translation=(-0.1, -0.1),
                max_translation=(0.1, 0.1),
                min_shear=-0.1,
                max_shear=0.1,
                min_scaling=(0.9, 0.9),
                max_scaling=(1.1, 1.1),
                flip_x_chance=0.5,
                flip_y_chance=0.5,
            )
        else:
            self._transform_generator = random_transform_generator(
                flip_x_chance=0.5)

        self._common_args = {
            'batch_size': batch_size,
            'preprocess_image': self._img_preprocessor
        }

        if anchor_settings:
            self._anchor_params = AnchorParameters(*anchor_settings)
        else:
            self._anchor_params = AnchorParameters.default
Пример #6
0
    def _load_model_with_nms(self, test_args):
        """ This is mostly copied fomr retinanet.py """

        backbone_name = test_args.get('DETECTOR', 'backbone')
        print(backbone_name)
        print(test_args.get('DETECTOR', 'detector_model_path'))
        model = keras.models.load_model(
            str(test_args.get('DETECTOR', 'detector_model_path')),
            custom_objects=backbone(backbone_name).custom_objects)

        # compute the anchors
        features = [
            model.get_layer(name).output
            for name in ['P3', 'P4', 'P5', 'P6', 'P7']
        ]
        anchors = build_anchors(AnchorParameters.default, features)

        # we expect the anchors, regression and classification values as first
        # output
        print(len(model.outputs))
        regression = model.outputs[0]
        classification = model.outputs[1]
        print(classification.shape[1])
        print(regression.shape)

        # "other" can be any additional output from custom submodels,
        # by default this will be []
        other = model.outputs[2:]

        # apply predicted regression to anchors
        boxes = layers.RegressBoxes(name='boxes')([anchors, regression])
        boxes = layers.ClipBoxes(name='clipped_boxes')(
            [model.inputs[0], boxes])

        # filter detections (apply NMS / score threshold / select top-k)
        #detections = layers.FilterDetections(
        #        nms=True,
        #        name='filtered_detections',
        #        nms_threshold = test_args.getfloat('DETECTOR','nms_threshold'),
        #        score_threshold = test_args.getfloat('DETECTOR','det_threshold'),
        #        max_detections = test_args.getint('DETECTOR', 'max_detections')
        #        )([boxes, classification] + other)
        detections = layers.filter_detections.filter_detections(
            boxes=boxes,
            classification=classification,
            other=other,
            nms=True,
            nms_threshold=test_args.getfloat('DETECTOR', 'nms_threshold'),
            score_threshold=test_args.getfloat('DETECTOR', 'det_threshold'),
            max_detections=test_args.getint('DETECTOR', 'max_detections'))

        outputs = detections

        # construct the model
        return keras.models.Model(inputs=model.inputs,
                                  outputs=outputs,
                                  name='retinanet-bbox')
Пример #7
0
def main(args=None):
    args = parse_args(args)
    action = args.action
    backbone = models.backbone("resnet50")
    modelpath = args.model_path
    cocopath = args.coco_path
    generator = create_generator(args, backbone.preprocess_image)
    quantize_retinanet(modelpath, cocopath, action)
    evaluate(generator, action, args.score_threshold)
Пример #8
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally choose specific GPU
    # if args.gpu:
    #     os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    with tf.device('/device:GPU:3'):
        config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'CPU': 1, 'GPU': 1})
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        keras.backend.set_session(session)

    # create the generators
    val_generator = create_val_generator(args)


    weights = args.weights
    # default to imagenet if nothing else is specified

    backbone = models.backbone(args.backbone)

    print('Creating model, this may take a second...')
    # model = model_with_weights(backbone(val_generator.num_classes(), num_anchors=None, modifier=freeze_model), weights=weights, skip_mismatch=True)
    model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=val_generator.num_classes(),
        weights=weights,
        multi_gpu=args.multi_gpu,
        freeze_backbone=args.freeze_backbone,
        lr=args.lr,
        config=args.config
    )

    # print model summary
    print(model.summary())

    evaluation = Evaluate(val_generator, weighted_average=args.weighted_average, eval_batch_size=args.val_batch_size)
    evaluation = RedirectModel(evaluation, prediction_model)

    generator = val_generator
    iou_threshold = 0.5
    score_threshold = 0.05
    max_detections = 100
    save_path = None
    weighted_average = False
    verbose = True
    eval_batch_size = 16

    from keras_retinanet.utils.sail_eval_data_generator import SAIL_EvalDataGenerator
    eval_batch_datagen = SAIL_EvalDataGenerator(generator, batch_size=eval_batch_size)
    boxes, scores, labels = model.predict_generator(eval_batch_datagen, steps=len(eval_batch_datagen), verbose=True,
                                                    workers=0, use_multiprocessing=False)[:3]
Пример #9
0
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()
        print('Creating model')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config)

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)

    # create the callbacks
    callbacks = create_callbacks(model, training_model, prediction_model, args)

    # start training
    return training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_generator.size() // args.batch_size,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks)
def load_retina(weights: str, num_classes: int,
                anchors_wrap: AnchorParametersWrap,
                backbone_name: str) -> Model:
    """ Loads retinanet with weights. """
    ret = models.backbone(backbone_name=backbone_name).retinanet(
        num_classes=num_classes, num_anchors=anchors_wrap.num_anchors)
    ret.load_weights(weights)
    ret = models.convert_model(model=ret,
                               nms=True,
                               class_specific_filter=True,
                               anchor_params=anchors_wrap.anchors)
    return ret
def get_joint_detection_model(model_path, model_type):
    """
    Input -> Model path for the object detection model
            Model type-> Foot or Hand
    Output -> Inference model for getting the predictions on test images
    
    """
    # config_file_path = '/usr/local/bin/config'
    if model_type == 'Foot_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1.2 1.5 2 2.5 3\nscales  =1 1.5 2\n')

        model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone('resnet50').retinanet,
        num_classes=5,
        weights=None,
        multi_gpu=False,
        freeze_backbone=True,
        lr=1e-3,
        config=read_config_file('/usr/local/bin/Config files/config_foot.ini'))

        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_foot.ini')))

    elif model_type == 'Hand_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1 1.5 2 2.5 3\nscales  = 1 1.2 1.6\n')

        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone('resnet50').retinanet,
            num_classes=6,
            weights=None,
            multi_gpu=False,
            freeze_backbone=True,
            lr=1e-3,
            config=read_config_file('/usr/local/bin/Config files/config_hand.ini'))
        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_hand.ini')))
    
    return infer_model
Пример #12
0
def retinanet_train(tiny=False):
    labels_path = bdd100k_labels_path
    val_json = labels_path + 'bdd100k_labels_images_val.json'
    train_json = labels_path + 'bdd100k_labels_images_train.json'
    val_annot = labels_path + 'val_annotations.csv'
    train_annot = labels_path + 'train_annotations.csv'

    num_data = 70000

    classes = bu.annotate(val_json, val_annot, labels_path, bdd100k_val_path)
    cl_map_path = bu.class_mapping(classes,
                                   output_csv=labels_path +
                                   'class_mapping.csv')
    bu.annotate(train_json, train_annot, bdd100k_labels_path,
                bdd100k_train_path)

    # Hyper-parameters
    batch_size = 1
    steps_per_epoch = np.ceil(num_data / batch_size)

    for m in models:
        print('Generating %s backbone...' % m)
        backbone = kr_models.backbone(m)
        weights = backbone.download_imagenet()
        print('Creating generators...')
        tr_gen, val_gen = mt.create_generators(
            train_annotations=train_annot,
            val_annotations=val_annot,
            class_mapping=cl_map_path,
            base_dir='',
            preprocess_image=backbone.preprocess_image,
            batch_size=batch_size)
        print('Creating models...')
        model, training_model, prediction_model = kr_train.create_models(
            backbone.retinanet, tr_gen.num_classes(), weights)
        print('Creating callbacks...')
        callbacks = mt.create_callbacks(model,
                                        batch_size,
                                        'test',
                                        tensorboard_dir=log_path)

        print('Training...')
        training_model.fit_generator(
            generator=tr_gen,
            steps_per_epoch=steps_per_epoch,  # 10000,
            epochs=2,
            verbose=1,
            callbacks=callbacks,
            workers=4,  # 1
            use_multiprocessing=True,  # False,
            max_queue_size=10,
            validation_data=val_gen)
Пример #13
0
def create_model():
    model_path = Path(
        '/data/students_home/fschipani/thesis/keras-retinanet/snapshots/resnet50_coco_best_v2.1.0.h5'
    )
    model = models.backbone('resnet50').retinanet(
        num_classes=train_generator.num_classes())
    model.load_weights(model_path, by_name=True, skip_mismatch=True)
    model.compile(loss={
        'regression': smooth_l1(),
        'classification': focal()
    },
                  optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001))
    return model
Пример #14
0
def load_training_model(num_classes, args):
    backbone = models.backbone(args.backbone)
    weights = backbone.download_imagenet()

    model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=num_classes,
        weights=weights,
        multi_gpu=args.multi_gpu,
        freeze_backbone=args.freeze_backbone,
        lr=args.lr,
        config=args.config)
    return training_model
Пример #15
0
def retinanet_tiny_train():
    labels_path = bdd100k_labels_path
    val_json = labels_path + 'bdd100k_labels_images_val.json'

    num_data = 7000
    batch_size = 1
    steps_per_epoch = np.ceil(num_data / batch_size)

    train_annot, val_annot = bu.annotate_tiny(val_json,
                                              labels_path,
                                              bdd100k_val_path,
                                              overwrite=True)
    cl_map_path = bu.class_mapping(input_json=val_json,
                                   output_csv=labels_path +
                                   'class_mapping.csv')

    for m in models:
        print('Generating %s backbone...' % m)
        backbone = kr_models.backbone(m)
        weights = backbone.download_imagenet()
        print('Creating generators...')
        tr_gen, val_gen = bu.create_generators(
            train_annotations=train_annot,
            val_annotations=val_annot,
            class_mapping=cl_map_path,
            base_dir='',
            preprocess_image=backbone.preprocess_image,
            batch_size=batch_size)
        print('Creating models...')
        model, training_model, prediction_model = kr_train.create_models(
            backbone.retinanet, tr_gen.num_classes(), weights)
        print('Creating callbacks...')
        callbacks = bu.create_callbacks(model,
                                        batch_size,
                                        snapshots_path=retinanet_h5_path,
                                        tensorboard_dir=log_path,
                                        backbone=m,
                                        dataset_type='bdd10k')

        print('Training...')
        training_model.fit_generator(
            generator=tr_gen,
            steps_per_epoch=steps_per_epoch,  # 10000,
            epochs=50,
            verbose=1,
            callbacks=callbacks,
            workers=1,  # 1
            use_multiprocessing=False,  # False,
            max_queue_size=10,
            validation_data=val_gen)
def _initilize_model(num_classes):
    """Initilze our model to train with
    """

    # Suggested to initialize model on cpu before turning into a
    # multi_gpu model to save gpu memory
    with tf.device('/cpu:0'):
        model = models.backbone('resnet50').retinanet(num_classes=num_classes)
        model.load_weights(config.WEIGHTS_PATH,
                           by_name=True, skip_mismatch=True)

    gpus = len([i for i in device_lib.list_local_devices()
                if i.device_type == 'GPU'])

    if gpus > 1:
        return model, multi_gpu_model(model, gpus=gpus)
    return model, model
Пример #17
0
    def __init__(self, weights=None, saved_model=None):
        self.weights = weights
        self.saved_model = saved_model

        # Read config file - if a config file exists in local dir use it,
        # if not use installed.
        if os.path.exists("deepforest_config.yml"):
            config_path = "deepforest_config.yml"
        else:
            try:
                config_path = get_data("deepforest_config.yml")
            except Exception as e:
                raise ValueError(
                    "No deepforest_config.yml found either in local "
                    "directory or in installed package location. {}".format(e))

        print("Reading config file: {}".format(config_path))
        self.config = utilities.read_config(config_path)

        # Create a label dict, defaults to "Tree"
        self.read_classes()

        # release version id to flag if release is being used
        self.__release_version__ = None

        # Load saved model if needed
        if self.saved_model:
            print("Loading saved model")
            # Capture user warning, not relevant here
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=UserWarning)
                self.model = models.load_model(saved_model)
                self.prediction_model = convert_model(self.model)

        elif self.weights:
            print("Creating model from weights")
            backbone = models.backbone(self.config["backbone"])
            self.model, self.training_model, self.prediction_model = create_models(
                backbone.retinanet, num_classes=1, weights=self.weights)
        else:
            print(
                "A blank deepforest object created. "
                "To perform prediction, either train or load an existing model."
            )
            self.model = None
Пример #18
0
def run(args=None):
    args = parse_args(args)
    backbone = models.backbone(args.backbone)
    if args.config:
        args.config = read_config_file(args.config)
    _, generator = create_generators(args, backbone.preprocess_image)

    weights = os.path.join(args.snapshot_path, 'checkpoint.h5')
    _, _, model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=10,
        weights=weights,
        multi_gpu=args.multi_gpu,
        freeze_backbone=args.freeze_backbone,
        lr=args.lr,
        config=args.config
    )

    infer(generator, model)
Пример #19
0
def test_equivalence(config, setup_create_tensors):
    # unpack created tensors
    tf_inputs, tf_targets = setup_create_tensors

    # the image going in to tensorflow should be equivalent
    # to the image from the fit_generator
    backbone = models.backbone(config["backbone"])

    # CSV generator
    generator = csv_generator.CSVGenerator(
        csv_data_file="tests/output/testfile_tfrecords.csv",
        csv_class_file="tests/data/classes.csv",
        image_min_side=config["image-min-side"],
        preprocess_image=backbone.preprocess_image,
    )

    # find file in randomize generator group
    first_file = generator.groups[0][0]
    gen_filename = os.path.join(generator.base_dir, generator.image_names[first_file])
    original_image = generator.load_image(first_file)
    inputs, targets = generator.__getitem__(0)

    image = inputs[0, ...]
    targets = targets[0][0, ...]

    with tf.Session() as sess:
        # seek the randomized image to match
        tf_inputs, tf_targets = sess.run([tf_inputs, tf_targets])

    # assert filename is the same as generator
    # assert gen_filename == filename
    # tf_image = tf_image[0,...]
    tf_inputs = tf_inputs[0, ...]
    tf_targets = tf_targets[0][0, ...]

    # Same shape
    # assert tf_image.shape == image.shape
    assert tf_inputs.shape == image.shape
    assert tf_targets.shape == targets.shape
Пример #20
0
def test_multigpu_training():

    experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2",
                            project_name='deeplidar',
                            log_code=True)

    DeepForest_config = config.load_config(dir="..")
    DeepForest_config["save_image_path"] = "../snapshots/"

    data = generators.load_retraining_data(DeepForest_config)
    train_generator, validation_generator = generators.create_h5_generators(
        data, DeepForest_config=DeepForest_config)

    #imagenet pretraining weights
    backbone = models.backbone(DeepForest_config["backbone"])
    weights = backbone.download_imagenet()

    model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=train_generator.num_classes(),
        weights=weights,
        multi_gpu=DeepForest_config["num_GPUs"],
        freeze_backbone=False,
        nms_threshold=DeepForest_config["nms_threshold"],
        input_channels=DeepForest_config["input_channels"])

    #start training
    history = training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_generator.size() /
        DeepForest_config["batch_size"],
        epochs=DeepForest_config["epochs"],
        verbose=2,
        shuffle=False,
        workers=DeepForest_config["workers"],
        use_multiprocessing=DeepForest_config["use_multiprocessing"],
        max_queue_size=DeepForest_config["max_queue_size"],
        experiment=experiment)
Пример #21
0
evaluation = True
# Хэрэв сургалтаа үргэлжлүүлэх бол snapshot-ын утганд сүүлийн сургалтын файлыг зааж өгнө
snapshot = './snapshots/resnet50_csv_03.h5'
# Snapshot хадгалах эсэх, хадгалах хавтас
snapshots = True
snapshot_path = './snapshots'

tensorboard_dir = './logs'

weights = None
imagenet_weights = True
batch_size = 1
image_min_side = 225
image_max_side = 300

backbone_model = models.backbone('resnet50')
freeze_backbone = False
common_args = {
    'batch_size': batch_size,
    'image_min_side': image_min_side,
    'image_max_side': image_max_side,
    'preprocess_image': backbone_model.preprocess_image,
}

train_generator = CSVGenerator(annotations_path, classes_path, **common_args)
if validations_path != '':
    validation_generator = CSVGenerator(validations_path, classes_path,
                                        **common_args)
else:
    validation_generator = None
Пример #22
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config)

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    # if 'vgg' in args.backbone or 'densenet' in args.backbone:
    #     train_generator.compute_shapes = make_shapes_callback(model)
    #     if validation_generator:
    #         validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks

    callbacks = create_callbacks(model,
                                 training_model,
                                 prediction_model,
                                 validation_generator,
                                 args,
                                 eval_batch_size=args.val_batch_size)

    # Use multiprocessing if workers > 0
    if args.workers > 0:
        use_multiprocessing = True
    else:
        use_multiprocessing = False

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 workers=args.workers,
                                 use_multiprocessing=use_multiprocessing,
                                 max_queue_size=args.max_queue_size)
Пример #23
0
def test_equivalence(config, setup_create_tensors):
    
    #unpack created tensors
    tf_inputs, tf_targets = setup_create_tensors
    
    #the image going in to tensorflow should be equivalent to the image from the fit_generator
    backbone = models.backbone(config["backbone"])

    #CSV generator
    generator = csv_generator.CSVGenerator(
        csv_data_file="tests/data/testfile_tfrecords.csv",
        csv_class_file="tests/data/classes.csv",
        image_min_side=config["image-min-side"],
        preprocess_image=backbone.preprocess_image,
    )

    #find file in randomize generator group
    first_file = generator.groups[0][0]
    gen_filename = os.path.join(generator.base_dir, generator.image_names[first_file])
    original_image = generator.load_image(first_file)
    inputs, targets = generator.__getitem__(0)

    image = inputs[0,...]
    targets = targets[0][0,...]

    with tf.Session() as sess:
        #seek the randomized image to match
        tf_inputs, tf_targets = sess.run([tf_inputs,tf_targets])

    #assert filename is the same as generator
    #assert gen_filename == filename
    #tf_image = tf_image[0,...]
    tf_inputs = tf_inputs[0,...]
    tf_targets = tf_targets[0][0,...]

    #Same shape
    #assert tf_image.shape == image.shape
    assert tf_inputs.shape == image.shape
    assert tf_targets.shape == targets.shape

    #Same values, slightly duplicitious with above, but useful for debugging.
    #Saved array is the same as generator image
    #np.testing.assert_array_equal(image, tf_image)

    #Loaded array is the same as generator, this is not true currently, the opencv and the tensorflow interpolation method is slightly different, waiting for tf. 2.0
    #np.testing.assert_array_equal(tf_loaded, tf_image)

    ##Useful for debug to plot
    #fig = plt.figure()
    #ax1 = fig.add_subplot(1,4,1)
    #ax1.title.set_text('Fit Gen Original')    
    #plt.imshow(original_image[...,::-1])
    #ax1 = fig.add_subplot(1,4,2)
    #ax1.title.set_text('Fit Generator')    
    #plt.imshow(image)
    #ax2 = fig.add_subplot(1,4,3)
    #ax2 = fig.add_subplot(1,4,4)
    #ax2.title.set_text('Loaded Image')        
    #plt.imshow(tf_inputs)
    #plt.show()        

#Check for bad file types
#@pytest.fixture()
#def bad_annotations():
    #annotations = utilities.xml_to_annotations(get_data("OSBS_029.xml"))
    #f = "tests/data/testfile_error_deepforest.csv"
    #annotations.to_csv(f,index=False,header=False)
    #return f   

#def test_tfdataset_error(bad_annotations):    
    #with pytest.raises(ValueError):    
        #records_created = tfrecords.create_tfrecords(annotations_file=bad_annotations, class_file=get_data("classes.csv"), image_min_side=800, backbone_model="resnet50", size=100, savedir="tests/data/")
        'regression': losses.smooth_l1(),
        'classification': losses.focal()
    },
                           optimizer=keras.optimizers.adam(lr=lr,
                                                           clipnorm=0.001))

    return model, training_model, prediction_model


if __name__ == '__main__':
    smin, smax = 618, 800

    keras.backend.tensorflow_backend.set_session(get_session())

    model_path = os.path.join('snapshots', '7', 'resnet18_csv_19.h5')
    backbone = models.backbone('resnet18')

    labels_to_names = {
        0: 'goodhelmet',
        1: 'LP',
        2: 'goodshoes',
        3: 'badshoes',
        4: 'badhelmet',
        5: 'person'
    }
    # labels_to_names = {0: 'person', 1: 'helmet', 2: 'LP', 3: 'goodshoes', 4: 'badshoes'}
    # labels_to_names = {0: 'person', 1: 'LP', 2: 'badshoes', 3: 'unsafe_hat', 4: 'Boot', 5: 'helmet',
    #                    6: 'goodshoes'}
    main_model, training_model, prediction_model = create_models(
        backbone.retinanet, len(labels_to_names))
    main_model.load_weights(model_path)
def main(args=None):
    global config
    from keras import backend as K

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    print('Arguments: {}'.format(args))

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if 'anchor_parameters' in config:
            anchor_params = parse_anchor_parameters(config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            args=args,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            config=config)

    # print model summary
    print(model.summary())

    print('Learning rate: {}'.format(K.get_value(model.optimizer.lr)))
    if args.lr > 0.0:
        K.set_value(model.optimizer.lr, args.lr)
        print('Updated learning rate: {}'.format(
            K.get_value(model.optimizer.lr)))

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    init_epoch = 0
    try:
        if args.snapshot:
            init_epoch = int(args.snapshot.split("_")[-2])
    except:
        pass
    # init_epoch = 6
    print('Init epoch: {}'.format(init_epoch))

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
        initial_epoch=init_epoch,
    )
Пример #26
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()
        else:
            print("Don't load weight!")

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone)

    # print model summary
    print(model.summary())

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 validation_data=validation_generator,
                                 validation_steps=20)
Пример #27
0
def main(args=None, data=None, DeepForest_config=None, experiment=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(
        args, data, DeepForest_config=DeepForest_config)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a secondkeras-retinanet.\n')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(
            model=model, nms_threshold=DeepForest_config["nms_threshold"])
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a secondkeras-retinanet .')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            nms_threshold=DeepForest_config["nms_threshold"])

    # print model summary
    #print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(
            anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    matched = []
    for entry in validation_generator.image_data.values():
        test = entry in train_generator.image_data.values()
        matched.append(test)
    if sum(matched) > 0:
        raise Exception(
            "%.2f percent of validation windows are in training data" %
            (100 * sum(matched) / train_generator.size()))
    else:
        print("Test passed: No overlapping data in training and validation")

    #start training
    cp.enable()
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=train_generator.size() /
                                 DeepForest_config["batch_size"],
                                 epochs=args.epochs,
                                 verbose=1,
                                 shuffle=False,
                                 callbacks=None,
                                 workers=DeepForest_config["workers"],
                                 use_multiprocessing=True)

    cp.disable()
Пример #28
0
        'classification': losses.focal()
    },
                           optimizer=keras.optimizers.adam(lr=lr,
                                                           clipnorm=0.001))

    return model, training_model, prediction_model


if __name__ == '__main__':
    smin, smax = 1200, 1600

    keras.backend.tensorflow_backend.set_session(get_session())

    model_path = os.path.join('snapshots', '5', 'resnet50_csv_05.h5')

    backbone = models.backbone('resnet50')

    labels_to_names = {1: 'mif', 0: 'ov'}
    main_model, training_model, prediction_model = create_models(
        backbone.retinanet, len(labels_to_names))
    main_model.load_weights(model_path)
    model = prediction_model
    path = '/media/palm/data/MicroAlgae/images'
    pad = 0
    ls = [
        s.split(',')[0]
        for s in open('datastuff/algea/val_annotations').read().split('\n')
    ]
    found = []
    # while True:
    # p = os.path.join(path, np.random.choice(os.listdir(path)))
Пример #29
0
def load_pb_model(pb_model_path):
    start = time.time()
    pb_model = keras.models.load_model(pb_model_path, custom_objects=models.backbone('resnet50').custom_objects)
    print("Load .pb model - processing time: ", time.time() - start)
    return pb_model
Пример #30
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    train_generator, validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone
        )

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    history=training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
    timestr=time.strftime("%Y-%m-%d-%H%M")
    
    history_path=os.path.join(
                args.snapshot_path,
                '{timestr}_{backbone}.csv'.format(timestr=timestr,backbone=args.backbone, dataset_type=args.dataset_type)
            )
    pd.DataFrame(history.history).to_csv(history_path)