def build(self):
        super(ImageClassifier, self).build()

        classifier, self.preprocess_input = Classifiers.get(
            self.config.BACKBONE)
        base_model = classifier(input_shape=self.config.IMAGE_SHAPE,
                                include_top=True,
                                classes=self.config.NUM_CLASSES)

        for layer in base_model.layers:
            self.backbone_layer_names.append(layer.name)

        if self.config.MODE == 'training':
            input_label = Input(shape=[self.config.NUM_CLASSES],
                                name='input_label',
                                dtype=float)

            wfocal_loss_graph = weighted_focal_loss(
                self.config.CCE_WEIGHTS, self.config.FOCAL_LOSS_GAMMA)
            wfocal_loss = Lambda(lambda x: wfocal_loss_graph(*x), name='wfocal_loss') \
                ([input_label, base_model.output])

            inputs = base_model.inputs
            inputs += [input_label]
            outputs = base_model.outputs
            outputs += [wfocal_loss]
            model = Model(inputs, outputs, name=self.config.BACKBONE)

            return model
        else:
            return base_model
示例#2
0
    def __init__(self,nClass,featureNames=["conv2_block5_0_relu","conv3_block9_0_relu","conv4_block26_0_relu"],dropoutRate=0.0,weights="imagenet",useScoreMapConnect=False,height=224,width=224):        
        self.dropoutRate=dropoutRate
        self.nUpConvo=2
        self.nDownConvo=2
        self.activation="relu"
        self.nClass=nClass
        self.softmaxLayers=[]
        self.E=[None]*4*3
        self.width=width
        self.height=height
        self.useScoreMapConnect=useScoreMapConnect
        classifier, preprocess_input = Classifiers.get('densenet169')
        self.preprocess=lambda x:preprocess_input(x*255)
        basemodel = classifier((height, width, 3), weights=weights)
        outputs=list(map(lambda x:basemodel.get_layer(x).output,featureNames))
        encoderModel=Model(inputs=[basemodel.input],outputs=outputs)

        input=Input(shape=(height,width,3))
        x=input     
        self.bigFeature,self.smallFeature,x=encoderModel(x)
        x=self.upBlock(x,0,0)
        x=self.upBlock(x,0,1)
        x=self.downBlock(x,1,1)
        x=self.downBlock(x,1,0)
        x=self.upBlock(x,2,0)
        x=self.upBlock(x,2,1)
        x=self.upBlock(x,3,2,compress=False)
        x=self.upBlock(x,3,2,compress=False)
        x=Conv2D(nClass, (3,3), kernel_initializer='he_normal', padding='same')(x)
        x = Activation('softmax',name="output")(x)
        self.softmaxLayers.append(x)        
        self.trainModel=Model(input,self.softmaxLayers)
        self.trainModel.summary()
        self.model=Model(input,x)
        self.model.summary()
示例#3
0
    def __init__(self, name: str, size: int, preload_image: np.ndarray = None):
        classifier, preprocess_input = Classifiers.get(name)
        self.model = classifier((size, size, 3), weights='imagenet')
        self.preprocess_input = preprocess_input
        self.size = size

        if isinstance(preload_image, np.ndarray):
            self.classify(preload_image)
def _test_application(name,
                      input_shape=(224, 224, 3),
                      last_dim=1000,
                      label='bull_mastiff'):
    classifier, preprocess_input = Classifiers.get(name)
    model = classifier(input_shape=input_shape, weights='imagenet')

    output_shape, preds = _get_output_shape(model, preprocess_input)
    assert output_shape == (None, last_dim)

    names = [p[1] for p in decode_predictions(preds)[0]]
    assert label in names[:3]
def _test_save_load(name, input_shape=(224, 224, 3)):
    # create first model
    classifier, preprocess_input = Classifiers.get(name)
    model1 = classifier(input_shape=input_shape, weights=None)
    model1.save('model.h5')

    # load same model from file
    model2 = load_model('model.h5', compile=False)
    os.remove('model.h5')

    x = _get_img()
    y1 = model1.predict(x)
    y2 = model2.predict(x)

    assert np.allclose(y1, y2)
    def __build_convnet__(self):
        resnet, _ = Classifiers.get('resnet18')
        base_model = resnet(input_shape=(224, 224, 3),
                            include_top=False,
                            weights='imagenet')
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(4096, activation='relu')(x)
        x = Dropout(0.6)(x)
        x = Dense(4096, activation='relu')(x)
        x = Dropout(0.6)(x)
        x = Lambda(lambda x_: K.l2_normalize(x_, axis=1))(x)
        convnet_model = Model(inputs=base_model.input, outputs=x)

        self.convnet = convnet_model
def make_model(network, input_shape, classes=6, predict_flag=0, drop_rate=0.3):
    classifier, _ = Classifiers.get(network)
    if predict_flag == 0:
        weights = 'imagenet'
    else:
        weights = None
    base_model = classifier(input_shape=input_shape,
                            weights=weights,
                            include_top=False)
    x = keras.layers.GlobalAveragePooling2D()(base_model.output)

    x = keras.layers.Dropout(drop_rate)(x)

    output = keras.layers.Dense(classes, activation='softmax')(x)
    model = keras.models.Model(inputs=[base_model.input], outputs=[output])

    return model
示例#8
0
    def get_backbone_and_feature_layers(self, num_feature_layers):
        super(SemanticModelWrapper, self).build()
        image_shape = self.config.IMAGE_SHAPE
        classifier, self.preprocess_input = Classifiers.get(
            self.config.BACKBONE)
        backbone = classifier(input_shape=image_shape,
                              input_tensor=None,
                              weights=self.config.BACKBONE_WEIGHTS,
                              include_top=False)

        for layer in backbone.layers:
            self.backbone_layer_names.append(layer.name)

        if self.config.FEATURE_LAYERS == 'default':
            feature_layers = get_feature_layers(self.config.BACKBONE,
                                                n=num_feature_layers)
        else:
            feature_layers = self.config.FEATURE_LAYERS

        return backbone, feature_layers
示例#9
0
def get_gpu_model(input_size=None,
                  activation=None,
                  initial_weights=None,
                  is_corruption=False):
    ResNet50v2, preprocess_input = Classifiers.get('resnet50v2')
    model = ResNet50v2(input_shape=input_size,
                       weights='imagenet',
                       classes=1,
                       include_top=False,
                       pooling='avg')
    model_inputs = model.inputs
    model_outsputs = model.output
    model_outsputs = Dense(128, activation='relu')(model_outsputs)
    model_outsputs = Dense(32, activation='relu')(model_outsputs)
    model_outsputs = Dense(1, activation=activation)(model_outsputs)
    model = Model(model_inputs, model_outsputs)
    model = multi_gpu_model(model, gpus=2)
    model.compile(loss=keras.losses.mean_squared_error,
                  optimizer=keras.optimizers.Adam())
    return model
示例#10
0
def network(data, labels_one_hot, mode):
    model_name = S("model.classification_models.model")
    dataset = S("model.classification_models.dataset")

    # keras.backend.set_learning_phase(1 if mode==tf.estimator.ModeKeys.TRAIN else 0) # 0: Test(default), 1: Train
    keras.backend.set_learning_phase(0)  # 0: Test(default), 1: Train
    classifier, preprocess_input = Classifiers.get(model_name)

    # overwrite preprocess_input for mobilenet (workaround for a bug in keras_applications)
    if "mobilenet" in model_name:
        from keras.applications import imagenet_utils
        preprocess_input = lambda data: imagenet_utils.preprocess_input(
            data, mode='tf')

    # apply model
    data = preprocess_input(data)
    GLOBAL["keras_model_preprocess"] = preprocess_input
    model = classifier((224, 224, 3), input_tensor=data, weights=dataset)
    GLOBAL["keras_model"] = model
    logits = model.output

    # keras-models do not use empty-class
    logits = tf.concat([tf.expand_dims(logits[:, 0] * 0, 1), logits], axis=-1)
    return logits
def test_imports(name):
    data = Classifiers.get(name)
    assert data is not None
    elif modelname == "mobilenetv2":
        modelfile = "~/.keras/models/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224.h5"
    elif modelname == "nasnetmobile":
        modelfile = "~/.keras/models/nasnet_mobile.h5"
    else:
        print("guessing modelfile from modelname")
if modelfile:
    modelfile = os.path.expanduser(modelfile)

# download graph
print("downloading model ", modelname)
import tensorflow as tf
from tensorflow import keras
import re
from classification_models import Classifiers
classifier, preprocess_input = Classifiers.get(modelname)
model = classifier((224, 224, 3), weights=dataset)

# actually builds the graph
data = tf.placeholder(shape=(None, 224, 224, 3), dtype=tf.float32)
logits = model(data)

# guess the modelfile
modeldir = os.path.expanduser("~/.keras/models")
if modelfile is None:
    modelfile = [f for f in os.listdir(modeldir)
                 if modelname + "_" in f]  #ignores the version-suffixes
    if len(modelfile) == 0:
        modelfile = [
            f for f in os.listdir(modeldir)
            if re.sub(r"v\d+$", "", modelname) in f
示例#13
0
def main():
    tf.keras.backend.set_image_data_format('channels_last')
    train_input_dataset = input_fn(
      mode='train',
      is_training=True,
      batch_size=args.batch_size,
      num_epochs=args.n_epochs,
      parse_record_fn=parse_record)
    
    val_input_dataset = input_fn(
      mode='val',
      is_training=False,
      batch_size=args.batch_size,
      num_epochs=args.n_epochs,
      parse_record_fn=parse_record)
    
    test_input_dataset = input_fn(
      mode='test',
      is_training=False,
      batch_size=args.batch_size,
      num_epochs=args.n_epochs,
      parse_record_fn=parse_record)

    optimizer = tf.keras.optimizers.SGD(lr=args.lr, momentum=0.9)
    if args.resnet_size == 18 and args.pretrained:
        classifier, _ = Classifiers.get('resnet18')
        base_model = classifier(input_shape=(224,224,3), weights='imagenet', include_top=True)
        new_layer = tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
        model = tf.keras.models.Model(base_mode.input, new_layer(base_model.layers[-1].output))
    if args.resnet_size == 18:
        print('USING RESNET18')
        model = ResNet18(input_shape=(args.image_size, args.image_size, 3), classes=NUM_CLASSES)
        model_to_save = ResNet18(input_shape=(jr_main.HEIGHT, jr_main.WIDTH, 3), classes=NUM_CLASSES)
    elif args.resnet_size == 50 and not args.pretrained:
        print('USING RESNET50')
        model = ResNet50(input_shape=(args.image_size, args.image_size, 3), classes=NUM_CLASSES)
        model_to_save = ResNet50(input_shape=(args.image_size,args.image_size, 3), classes=NUM_CLASSES)
    elif args.resnet_size == 50 and args.pretrained:
        print('using pretrained resnet50')
        temp_model = tf.keras.applications.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=(224,224, 3))
        temp_model.layers.pop()
        new_layer = tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
        model = tf.keras.models.Model(temp_model.input, new_layer(temp_model.layers[-1].output))

    else:
        print('Need to specifcy resnet18 or 50!')
        sys.exit(0)


    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['categorical_accuracy', single_class_accuracy(0), 
                    single_class_accuracy(1), single_class_accuracy(2), mean_per_class_accuracy])
    #time_callback, tensorboard_callback, lr_callback = keras_common.get_callbacks(
     # learning_rate_schedule, NUM_IMAGES['train'])
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=args.checkpoint_dir)
    time_callback = TimeHistory(args.batch_size, log_steps=100)
    lr_callback = tf.keras.callbacks.LearningRateScheduler(step_decay)
    #lr_callback = LearningRateBatchScheduler(learning_rate_schedule, batch_size=args.batch_size, num_images=NUM_IMAGES['train'])
    
    checkpoint_path = os.path.join(args.checkpoint_dir, 'checkpoint.h5')
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
            checkpoint_path, monitor='val_mean_per_class_accuracy', verbose=1, save_best_only=True, save_weights_only=True, period=1)

    num_train_steps = NUM_IMAGES['train'] // args.batch_size
    num_val_steps = NUM_IMAGES['validation'] // args.batch_size
    num_test_steps = NUM_IMAGES['test'] // args.batch_size

    history = model.fit(train_input_dataset,
                      epochs=args.n_epochs,
                      steps_per_epoch=num_train_steps,
                      callbacks=[
                          time_callback,
                          lr_callback,
                          tensorboard_callback,
                          cp_callback,
                      ],
                      validation_steps=num_val_steps,
                      validation_data=val_input_dataset,
                      verbose=2,
                      workers=4)
    
    print('TESTING')
    test_output = model.evaluate(test_input_dataset,
                                 steps=num_test_steps,
                                 verbose=1)
    stats = build_stats(history, eval_output, time_callback)
    print('loading weights from best checkpoint')
    model_to_save.load_weights(checkpoint_path)
    print('saving final model')
    model_to_save.save('{}_final.h5'.format(checkpoint_path.split('/')[-2]))

    print('\nstats: ', stats)
from classification_models import Classifiers

from data_generators.utils import resize, load_image_rgb

# Parse command line arguments
parser = argparse.ArgumentParser(
    description='Imagenet classification example.')
parser.add_argument('-b',
                    '--backbone',
                    required=True,
                    metavar='backbone model name',
                    help='The name of the backbone architecture')
args = parser.parse_args()

backbone = args.backbone
classifier, preprocess_input = Classifiers.get(backbone)

# load model
model = classifier(input_shape=(224, 224, 3),
                   include_top=True,
                   weights='imagenet')

image_files = ['cat1.jpg', 'cat2.jpg', 'dog1.jpg', 'dog2.jpg']
print('=============results=============')
for image_file in image_files:
    # read and prepare image
    img = load_image_rgb(image_file)
    x = resize(img, (224, 224), preserve_range=True)
    x = preprocess_input(x)
    x = np.expand_dims(x, 0)
示例#15
0
        mask_bg = np.zeros(shape=(math.ceil((w - h) / 2), w), dtype=np.uint8)
        image = cv2.vconcat([image_bg, image, image_bg])
        mask = cv2.vconcat([mask_bg, mask, mask_bg])

    image = cv2.resize(image, (320, 320))
    mask = cv2.resize(mask, (320, 320))

    return image, mask


if MODEL_TYPE == 'classification':
    from classification_models import Classifiers
    from keras.layers import GlobalAveragePooling2D, Dense
    from keras.models import Model

    Vgg16, preprocess_input = Classifiers.get('vgg16')

    n_classes = 4

    # build model
    base_model = Vgg16(input_shape=(320, 320, 3),
                       weights='imagenet',
                       include_top=False)

    for layer in base_model.layers:
        layer.trainable = True

    global_avrg_pool = GlobalAveragePooling2D()(base_model.output)
    fc_1 = Dense(1024, activation='relu', name='fc_1')(global_avrg_pool)
    predictions = Dense(n_classes, activation='softmax',
                        name='predictions')(fc_1)