Esempio n. 1
0
    def use_release(self, gpus=1):
        '''Use the latest DeepForest model release from github and load model. Optionally download if release doesn't exist
        
        Returns:
            model (object): A trained keras model
            gpus: number of gpus to parallelize, default to 1
        '''
        #Download latest model from github release
        release_tag, self.weights = utilities.use_release()

        #load saved model and tag release
        self.__release_version__ = release_tag
        print("Loading pre-built model: {}".format(release_tag))

        if gpus == 1:
            with warnings.catch_warnings():
                #Suppress compilte warning, not relevant here
                warnings.filterwarnings("ignore", category=UserWarning)
                self.model = utilities.read_model(self.weights, self.config)

            #Convert model
            self.prediction_model = convert_model(self.model)
        elif gpus > 1:
            backbone = models.backbone(self.config["backbone"])
            n_classes = len(self.labels.keys())
            self.model, self.training_model, self.prediction_model = create_models(
                backbone.retinanet,
                num_classes=n_classes,
                weights=self.weights,
                multi_gpu=gpus)

        #add to config
        self.config["weights"] = self.weights
def get_joint_detection_model(model_path, model_type):
    """
    Input -> Model path for the object detection model
            Model type-> Foot or Hand
    Output -> Inference model for getting the predictions on test images
    
    """
    # config_file_path = '/usr/local/bin/config'
    if model_type == 'Foot_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1.2 1.5 2 2.5 3\nscales  =1 1.5 2\n')

        model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone('resnet50').retinanet,
        num_classes=5,
        weights=None,
        multi_gpu=False,
        freeze_backbone=True,
        lr=1e-3,
        config=read_config_file('/usr/local/bin/Config files/config_foot.ini'))

        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_foot.ini')))

    elif model_type == 'Hand_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1 1.5 2 2.5 3\nscales  = 1 1.2 1.6\n')

        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone('resnet50').retinanet,
            num_classes=6,
            weights=None,
            multi_gpu=False,
            freeze_backbone=True,
            lr=1e-3,
            config=read_config_file('/usr/local/bin/Config files/config_hand.ini'))
        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_hand.ini')))
    
    return infer_model
Esempio n. 3
0
def retinanet_train(tiny=False):
    labels_path = bdd100k_labels_path
    val_json = labels_path + 'bdd100k_labels_images_val.json'
    train_json = labels_path + 'bdd100k_labels_images_train.json'
    val_annot = labels_path + 'val_annotations.csv'
    train_annot = labels_path + 'train_annotations.csv'

    num_data = 70000

    classes = bu.annotate(val_json, val_annot, labels_path, bdd100k_val_path)
    cl_map_path = bu.class_mapping(classes,
                                   output_csv=labels_path +
                                   'class_mapping.csv')
    bu.annotate(train_json, train_annot, bdd100k_labels_path,
                bdd100k_train_path)

    # Hyper-parameters
    batch_size = 1
    steps_per_epoch = np.ceil(num_data / batch_size)

    for m in models:
        print('Generating %s backbone...' % m)
        backbone = kr_models.backbone(m)
        weights = backbone.download_imagenet()
        print('Creating generators...')
        tr_gen, val_gen = mt.create_generators(
            train_annotations=train_annot,
            val_annotations=val_annot,
            class_mapping=cl_map_path,
            base_dir='',
            preprocess_image=backbone.preprocess_image,
            batch_size=batch_size)
        print('Creating models...')
        model, training_model, prediction_model = kr_train.create_models(
            backbone.retinanet, tr_gen.num_classes(), weights)
        print('Creating callbacks...')
        callbacks = mt.create_callbacks(model,
                                        batch_size,
                                        'test',
                                        tensorboard_dir=log_path)

        print('Training...')
        training_model.fit_generator(
            generator=tr_gen,
            steps_per_epoch=steps_per_epoch,  # 10000,
            epochs=2,
            verbose=1,
            callbacks=callbacks,
            workers=4,  # 1
            use_multiprocessing=True,  # False,
            max_queue_size=10,
            validation_data=val_gen)
Esempio n. 4
0
def retinanet_tiny_train():
    labels_path = bdd100k_labels_path
    val_json = labels_path + 'bdd100k_labels_images_val.json'

    num_data = 7000
    batch_size = 1
    steps_per_epoch = np.ceil(num_data / batch_size)

    train_annot, val_annot = bu.annotate_tiny(val_json,
                                              labels_path,
                                              bdd100k_val_path,
                                              overwrite=True)
    cl_map_path = bu.class_mapping(input_json=val_json,
                                   output_csv=labels_path +
                                   'class_mapping.csv')

    for m in models:
        print('Generating %s backbone...' % m)
        backbone = kr_models.backbone(m)
        weights = backbone.download_imagenet()
        print('Creating generators...')
        tr_gen, val_gen = bu.create_generators(
            train_annotations=train_annot,
            val_annotations=val_annot,
            class_mapping=cl_map_path,
            base_dir='',
            preprocess_image=backbone.preprocess_image,
            batch_size=batch_size)
        print('Creating models...')
        model, training_model, prediction_model = kr_train.create_models(
            backbone.retinanet, tr_gen.num_classes(), weights)
        print('Creating callbacks...')
        callbacks = bu.create_callbacks(model,
                                        batch_size,
                                        snapshots_path=retinanet_h5_path,
                                        tensorboard_dir=log_path,
                                        backbone=m,
                                        dataset_type='bdd10k')

        print('Training...')
        training_model.fit_generator(
            generator=tr_gen,
            steps_per_epoch=steps_per_epoch,  # 10000,
            epochs=50,
            verbose=1,
            callbacks=callbacks,
            workers=1,  # 1
            use_multiprocessing=False,  # False,
            max_queue_size=10,
            validation_data=val_gen)
Esempio n. 5
0
    def __init__(self, weights=None, saved_model=None):
        self.weights = weights
        self.saved_model = saved_model

        # Read config file - if a config file exists in local dir use it,
        # if not use installed.
        if os.path.exists("deepforest_config.yml"):
            config_path = "deepforest_config.yml"
        else:
            try:
                config_path = get_data("deepforest_config.yml")
            except Exception as e:
                raise ValueError(
                    "No deepforest_config.yml found either in local "
                    "directory or in installed package location. {}".format(e))

        print("Reading config file: {}".format(config_path))
        self.config = utilities.read_config(config_path)

        # Create a label dict, defaults to "Tree"
        self.read_classes()

        # release version id to flag if release is being used
        self.__release_version__ = None

        # Load saved model if needed
        if self.saved_model:
            print("Loading saved model")
            # Capture user warning, not relevant here
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=UserWarning)
                self.model = models.load_model(saved_model)
                self.prediction_model = convert_model(self.model)

        elif self.weights:
            print("Creating model from weights")
            backbone = models.backbone(self.config["backbone"])
            self.model, self.training_model, self.prediction_model = create_models(
                backbone.retinanet, num_classes=1, weights=self.weights)
        else:
            print(
                "A blank deepforest object created. "
                "To perform prediction, either train or load an existing model."
            )
            self.model = None
Esempio n. 6
0
def return_objects(model_path, class_path, image_path, iou_threshold=0.25, model='resnet152'):

    with open(class_path, 'r') as f:
        classes = [x.strip() for x in f.readlines()]
    
    labels_to_names={}
    for i, c in enumerate(classes):
	    labels_to_names[i] = c
        
    # ## Load RetinaNet model
    if 'resnet' in model_path:
        from keras_retinanet.models.resnet import resnet_retinanet as retinanet, custom_objects, download_imagenet
    elif 'mobilenet' in model_path:
        from keras_retinanet.models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet
    elif 'vgg' in model_path:
        from keras_retinanet.models.vgg import vgg_retinanet as retinanet, custom_objects, download_imagenet
    elif 'densenet' in model_path:
        from keras_retinanet.models.densenet import densenet_retinanet as retinanet, custom_objects, download_imagenet
    else:
        raise NotImplementedError('Backbone \'{}\' not implemented.'.format(model_path))

    _, _, model = create_models(
        backbone_retinanet=retinanet,
        backbone=model,
        num_classes=len(classes),
        weights=model_path,
        multi_gpu=False,
        freeze_backbone=False
    )

    image = read_image_bgr(image_path)
    # preprocess image for network
    image = preprocess_image(image)
    image, scale = resize_image(image)

    # process image
    start = time.time()
    _, _, boxes, nms_classification = model.predict_on_batch(np.expand_dims(image, axis=0))
    #print("processing time: ", time.time() - start)

    # compute predicted labels and scores
    predicted_labels = np.argmax(nms_classification[0, :, :], axis=1)
    scores = nms_classification[0, np.arange(nms_classification.shape[1]), predicted_labels]

    # correct for image scale
    boxes /= scale
    
    plabels, pscores, pboxes = [], [], []
    for idx, (label, score) in enumerate(zip(predicted_labels, scores)):
        if score<0.5:
            continue
        pboxes.append(boxes[0, idx, :].astype(int))
        plabels.append([label])
        pscores.append(score)
    
    rboxes, rscores, rlabels, ignore, inter = [], [], [], [], False
    for i in range(len(pboxes)):
        if i in ignore:
            continue
        for j in range(i+1, len(pboxes)):
            if intersects(pboxes[i], pboxes[j]):
                iou = area(pboxes[i], pboxes[j])/float(union(pboxes[i], pboxes[j])-area(pboxes[i], pboxes[j]))
                if pscores[i]<pscores[j] and iou>iou_threshold:
                    inter = True
                elif pscores[i]>pscores[j] and iou>iou_threshold:
                    ignore.append(j)
                    plabels[i].append(plabels[j][0])
        
        if not inter:                
            rboxes.append(pboxes[i])
            rscores.append(pscores[i])
            rlabels.append(plabels[i])
        else:
            inter = False
        
    return rlabels, rscores, rboxes, labels_to_names
Esempio n. 7
0
    factor=0.1,
    patience=2,
    verbose=1,
    mode='auto',
    epsilon=0.0001,
    cooldown=0,
    min_lr=0
))

callbacks.append(LossHistory())
callbacks.append(model_checkpoint)
callbacks.append(evaluation)

model, training_model, prediction_model = create_models(
    backbone_retinanet=retinanet,
    backbone='resnet50',
    num_classes=train_generator.num_classes(),
    weights=weights,
    multi_gpu=0,
    freeze_backbone=True
)

training_model.fit_generator(
    generator=train_generator,
    steps_per_epoch=5000,
    epochs=100,
    verbose=1,
    callbacks=callbacks,
    validation_data=validation_generator,
    validation_steps=100
)
    checkpoint = RedirectModel(checkpoint, model)
    return [
        lr_reduce,
        early_stopping,
        evaluation,
        checkpoint,
    ]


BACKBONE = 'resnet50'
from pathlib import Path
for shape in SHAPES:
    log_dir = Path('/home/paperspace/retinanet_shape_{}_{}/'.format(*shape))
    log_dir.mkdir(exist_ok=True)
    kw = dict(image_min_side=shape[0], image_max_side=shape[1], batch_size=4)
    train_gen = CSVGenerator('train_path2.csv', class_map_path, **kw)
    val_gen = CSVGenerator('val_path_small_2.csv', class_map_path, **kw)
    model, training_model, prediction_model = create_models(
        backbone_retinanet=models.backbone(BACKBONE).retinanet,
        num_classes=train_gen.num_classes(),
        weights=wt_50_path,
        multi_gpu=0,
        freeze_backbone=True,
        config=None)
    callbacks = make_callbacks(log_dir, model, prediction_model)
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=10000 / 4,
                                  epochs=40,
                                  verbose=1,
                                  callbacks=callbacks)
    evaluation = True
    snapshots = True
    snapshot_path = "C:\\Users\\Pawan\\Documents\\ML\\snapshots12"
    backbone = 'resnet50'
    epochs = 100
    steps = 10755//(batch_size)
    gpu=0  
    resize=True
    
train_gen,valid_gen = create_generators(args,b.preprocess_image)

model, training_model, prediction_model = create_models(
            backbone_retinanet=b.retinanet,
            num_classes=train_gen.num_classes(),
            weights=None,
            multi_gpu=True,
            freeze_backbone=True,
            lr=1e-9,
            config=args.config
        )
 
training_model.load_weights("C:\\Users\\Pawan\\Documents\\ML\\snapshots12\\resnet50_csv_07.h5")

infer_model = convert_model(training_model,anchor_params=parse_anchor_parameters(read_config_file('C:\\Users\\Pawan\\Documents\\config.ini')))


def test_gen(image_ids, bs = 2, size=672,test = True):
    imgs = []
    scale = None
    idx = 0
    if test:
Esempio n. 10
0
    from keras_retinanet.models.resnet import resnet_retinanet as retinanet, custom_objects, download_imagenet
elif 'mobilenet' in sys.argv[1]:
    from keras_retinanet.models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet
elif 'vgg' in sys.argv[1]:
    from keras_retinanet.models.vgg import vgg_retinanet as retinanet, custom_objects, download_imagenet
elif 'densenet' in sys.argv[1]:
    from keras_retinanet.models.densenet import densenet_retinanet as retinanet, custom_objects, download_imagenet
else:
    raise NotImplementedError('Backbone \'{}\' not implemented.'.format(sys.argv[1]))

weights = sys.argv[2]

_, _, model = create_models(
    backbone_retinanet=retinanet,
    backbone=sys.argv[1],
    num_classes=12,
    weights=weights,
    multi_gpu=False,
    freeze_backbone=False
)

# print model summary
print(model.summary())

# ## Run detection on example

# In[5]:


# load image

image = read_image_bgr('./examples/test.jpg')
Esempio n. 11
0
def train_main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = train.parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    train.check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(train.get_session())

    # create the generators
    train_generator, validation_generator = train.create_generators(args)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = retinanet_bbox(model=model)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = train.create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone)

    # print model summary
    # print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        compute_anchor_targets = functools.partial(
            anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
        train_generator.compute_anchor_targets = compute_anchor_targets
        if validation_generator is not None:
            validation_generator.compute_anchor_targets = compute_anchor_targets

    # create the callbacks
    callbacks = train.create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    callbacks = add_classification_callbacks(args, callbacks, None)

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 initial_epoch=args.initial_epoch)

    # cleanup at the end of every epoch since we keep loading prediction models
    del training_model
    del model
    del prediction_model
    K.clear_session()