Esempio n. 1
0
def create_models(num_classes, p):
    # create "base" model (no NMS)
	image = keras.layers.Input((None, None, 3))

	if p['resnet'] == 101:
		model = ResNet101RetinaNet(image, num_classes=num_classes, weights=p['weights'], nms=False)
	elif p['resnet'] == 152:
		model = ResNet152RetinaNet(image, num_classes=num_classes, weights=p['weights'], nms=False)
	else: # 50
		model = ResNet50RetinaNet(image, num_classes=num_classes, weights=p['weights'], nms=False)

	training_model = model

    # append NMS for prediction only
	classification = model.outputs[1]
	detections = model.outputs[2]
	boxes = keras.layers.Lambda(lambda x: x[:, :, :4])(detections)
	detections = layers.NonMaximumSuppression(name='nms')([boxes, classification, detections])

	prediction_model = keras.models.Model(inputs=model.inputs, outputs=model.outputs[:2] + [detections])

	# compile model
	training_model.compile(
		loss={
			'regression'    : losses.smooth_l1(),
			'classification': losses.focal()
		},
		optimizer=keras.optimizers.adam(lr = p['learning-rate'], clipnorm=0.001),
		metrics = []
	)

	return model, training_model, prediction_model
Esempio n. 2
0
def create_models(backbone_retinanet,
                  backbone,
                  num_classes,
                  weights,
                  multi_gpu=0,
                  freeze_backbone=False):
    modifier = freeze_model if freeze_backbone else None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes,
                                                          backbone=backbone,
                                                          nms=False,
                                                          modifier=modifier),
                                       weights=weights,
                                       skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)

        # append NMS for prediction only
        classification = model.outputs[1]
        detections = model.outputs[2]
        boxes = keras.layers.Lambda(lambda x: x[:, :, :4])(detections)
        detections = layers.NonMaximumSuppression(name='nms')(
            [boxes, classification, detections])
        prediction_model = keras.models.Model(inputs=model.inputs,
                                              outputs=model.outputs[:2] +
                                              [detections])
    else:
        model = model_with_weights(backbone_retinanet(num_classes,
                                                      backbone=backbone,
                                                      nms=True,
                                                      modifier=modifier),
                                   weights=weights,
                                   skip_mismatch=True)
        training_model = model
        prediction_model = model

    # compile model
    training_model.compile(loss={
        'regression': losses.smooth_l1(),
        'classification': losses.focal()
    },
                           optimizer=keras.optimizers.adam(lr=1e-5,
                                                           clipnorm=0.001))

    return model, training_model, prediction_model
Esempio n. 3
0
def create_models(num_classes, weights='imagenet', multi_gpu=0):
    # create "base" model (no NMS)
    image = keras.layers.Input((None, None, 3))

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        with tf.device('/cpu:0'):
            model = ResNet50RetinaNet(image,
                                      num_classes=num_classes,
                                      weights=weights,
                                      nms=False)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model = ResNet50RetinaNet(image,
                                  num_classes=num_classes,
                                  weights=weights,
                                  nms=False)
        training_model = model

    # append NMS for prediction only
    classification = model.outputs[1]
    detections = model.outputs[2]
    boxes = keras.layers.Lambda(lambda x: x[:, :, :4])(detections)
    detections = layers.NonMaximumSuppression(name='nms')(
        [boxes, classification, detections])
    prediction_model = keras.models.Model(inputs=model.inputs,
                                          outputs=model.outputs[:2] +
                                          [detections])

    # compile model
    training_model.compile(loss={
        'regression': losses.smooth_l1(),
        'classification': losses.focal()
    },
                           optimizer=keras.optimizers.adam(lr=1e-5,
                                                           clipnorm=0.001))

    return model, training_model, prediction_model
Esempio n. 4
0
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
import numpy as np
from keras_retinanet import layers

import json

path_to_model = sys.argv[1]

model = keras.models.load_model(path_to_model, custom_objects=custom_objects)

#transfer to predict model
classification = model.outputs[1]
detections = model.outputs[2]
boxes = keras.layers.Lambda(lambda x: x[:, :, :4],
                            name='lambda_before_output')(detections)
detections = layers.NonMaximumSuppression(name='nms')(
    [boxes, classification, detections])
prediction_model = keras.models.Model(inputs=model.inputs,
                                      outputs=model.outputs[:2] + [detections])

model = prediction_model

test_files = glob('deploy/test/*/*_image.jpg')

result_dict = {}

#01bb984a-d3f1-4ba5-b0c0-5242533efa4d/0000

count = 0
all_num = len(test_files)

for test_file in test_files: