Exemplo n.º 1
0
def CreateXceptionModel():
    base_model = Xception(weights='imagenet',
                          include_top=False,
                          input_shape=(img_height, img_width, 3))
    base_model.trainable = False

    inputs = keras.Input(shape=(150, 150, 3))

    # Pre-trained Xception weights requires that input be normalized
    # from (0, 255) to a range (-1., +1.), the normalization layer
    # does the following, outputs = (inputs - mean) / sqrt(var)
    norm_layer = keras.layers.experimental.preprocessing.Normalization()
    mean = np.array([127.5] * 3)
    var = mean**2
    # Scale inputs to [-1, +1]
    x = norm_layer(inputs)
    norm_layer.set_weights([mean, var])

    # We make sure that the base_model is running in inference mode here,
    # by passing `training=False`. This is important for fine-tuning, as you will
    # learn in a few paragraphs.
    x = base_model(inputs, training=False)
    # Convert features of shape `base_model.output_shape[1:]` to vectors
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = keras.layers.Dropout(0.2)(x)  # Regularize with dropout
    # A Dense classifier with a single unit (binary classification)
    outputs = keras.layers.Dense(1)(x)
    model = keras.Model(inputs, outputs)
    return model, base_model
def create_model():
    init = TruncatedNormal(mean=0.0, stddev=0.05, seed=None)

    conv_base = Xception(include_top=False, weights='imagenet')
    conv_base.trainable = False

    model = Sequential()
    model.add(conv_base)
    model.add(GlobalAveragePooling2D(input_shape=training_features.shape[1:]))

    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu',
                    kernel_initializer=init,
                    bias_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(512, activation='relu',
                    kernel_initializer=init,
                    bias_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(133, activation='softmax',
                    kernel_initializer=init,
                    bias_initializer='zeros'))

    model.summary()

    # Compile the Model
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
Exemplo n.º 3
0
def createCnn(name):
    if name == "VGG16":
        cnn = VGG16(weights="imagenet", include_top=False, pooling="avg")
    elif name == "VGG19":
        cnn = VGG19(weights="imagenet", include_top=False, pooling="avg")
    elif name == "ResNet50":
        cnn = ResNet50(weights="imagenet", include_top=False, pooling="avg")
    elif name == "InceptionV3":
        cnn = InceptionV3(weights="imagenet", include_top=False, pooling="avg")
    elif name == "Xception":
        cnn = Xception(weights="imagenet", include_top=False, pooling="avg")
    cnn.trainable = False  # 参数不可训练(改变)
    return cnn
Exemplo n.º 4
0
def make_models():
    validator = Xception(weights="imagenet", include_top=True)
    validator.trainable = False
    for layer in validator.layers:
        layer.trainable = False

    predictor_input = Xception(weights="imagenet", include_top=True)
    predictor_input.layers.pop()
    predictor_input.trainable = False
    for layer in predictor_input.layers:
        layer.trainable = False

    predictor = Sequential()
    inp_shape = validator.layers[0].input_shape
    predictor.add(
        TimeDistributed(predictor_input,
                        input_shape=(30, inp_shape[1], inp_shape[2],
                                     inp_shape[3])))
    predictor.add(LSTM(2048, return_sequences=True))
    predictor.add(LSTM(2048))
    predictor.add(validator.layers[-1])
    predictor.layers[0].trainable = False

    return predictor, validator
def runCNNTransferModel( xtr, xte, ytr, yte, epochs=5, batchsize=100, dropout=0.4 ):
    
    conv_base = Xception(weights='imagenet',
                  include_top=False,
                  input_shape=(128, 128, 3))
    
    conv_base.trainable = False
    
    xtr_1 = addColourDimension(xtr)
    xte_1 = addColourDimension(xte)
    ytr_1 = to_categorical(ytr, num_classes=12)
    yte_1 = to_categorical(yte, num_classes=12)
    
    model = Sequential()
    # clever trick to "learn" a colour mapping from black and white into the the colour channels of
    # the pretrained Xception model
    model.add(Conv2D(10, kernel_size = (1,1), input_shape=(128, 128, 1), padding = 'same', activation = 'relu'))
    model.add(Conv2D(3, kernel_size = (1,1), padding = 'same', activation = 'relu'))
    model.add(conv_base)
    model.add(Flatten())
    model.add(Dropout(dropout))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(12, activation='softmax'))

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    callbacks = [ EarlyStopping(
        monitor='val_acc', 
        min_delta=0, 
        patience=10, 
        verbose=0, 
        mode='auto', 
        baseline=None, 
        # this will restore the best weights back to the model
        restore_best_weights=True) ]
    
    #train the model
    history = model.fit(xtr_1, ytr_1, 
                        epochs=epochs, batch_size=batchsize, 
                        validation_data=(xte_1, yte_1),
                        callbacks=callbacks, verbose=1)
    
    plotHistory(history)
    
    return history, model
Exemplo n.º 6
0
def build_finetune_model():
    inputs = Input((300, 300, 3))
    backbone = Xception(input_tensor=inputs,
                        include_top=False,
                        weights='imagenet')
    backbone.trainable = False
    x = backbone.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.2)(x)
    x = Dense(1024, activation="relu")(x)
    x = Dense(120, activation="softmax")(x)
    model = Model(inputs, x)
    model.load_weights('weights.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(1e-4),
                  metrics=['accuracy'])
    print("Model is loaded")
    return model
                    img = transformations(img, np.random.randint(6))
                    targets = np.zeros(17)
                    for t in tags.split(' '):
                        targets[label_map[t]] = 1
                    x_batch.append(img)
                    y_batch.append(targets)
                x_batch = np.array(x_batch, np.float32)
                y_batch = np.array(y_batch, np.uint8)
                yield x_batch, y_batch

    base_model = Xception(include_top=False,
                          weights='imagenet',
                          input_tensor=None,
                          input_shape=(image_size, image_size, 3),
                          pooling='avg')
    base_model.trainable = False
    model = Sequential()
    # Batchnorm input
    model.add(
        BatchNormalization(input_shape=(input_size, input_size,
                                        input_channels)))
    # Base model
    model.add(base_model)
    # Classifier
    model.add(Dense(2048, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(17, activation='sigmoid'))

    opt = Adam(lr=1e-4)

    model.compile(
Exemplo n.º 8
0
from preprocess import train, val, train_steps, val_steps, test, test_steps
from keras.applications import Xception
from keras import layers, models
from keras import callbacks, optimizers
# Uneven class distribution
class_weight = {
    train.class_indices['normal']: 1,
    train.class_indices['glaucoma']: 1.86
}

# Using pretrained Xception Net as Convolutional feature extractor
conv_base = Xception(include_top=False,
                     weights='imagenet',
                     input_shape=(299, 299, 3))
conv_base.trainable = False

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizers.RMSprop(1e-5),
              'binary_crossentropy',
              metrics=['accuracy'])

print(model.summary())
print("Loading weights...")
model.load_weights('/model/best_weights.h5')
print("Evaluating on test data")
print(model.evaluate_generator(test, test_steps))
Exemplo n.º 9
0
# pattern to get videos and classes
video_path = '/home/vislab/Downloads/Videos/Videos/fire_videos/{classname}/*.avi'

img_height = 299
img_width = 299
no_of_frames = 15  #21
channels = 3
no_of_epochs = 50
batch_size_value = 5

input_video = Input(shape=(no_of_frames, img_width, img_height, channels))
cnn_base = Xception(input_shape=(img_width, img_height, channels),
                    weights="imagenet",
                    include_top=False)
cnn_base.trainable = False

encoded_frames = TimeDistributed(cnn_base)(input_video)
encoded_sequence = ConvLSTM2D(64,
                              kernel_size=(7, 7),
                              strides=(2, 2),
                              padding='same',
                              return_sequences=False)(encoded_frames)

GAP_layer = GlobalAveragePooling2D()(encoded_sequence)

hidden_layer_1 = Dense(activation="relu", units=1024)(GAP_layer)
drop_layer = Dropout(0.2)(hidden_layer_1)
hidden_layer_2 = Dense(activation="relu", units=512)(drop_layer)
outputs = Dense(2, activation="softmax")(hidden_layer_2)
model = Model([input_video], outputs)
Exemplo n.º 10
0
from preprocess import train, val, train_steps, val_steps, test, test_steps
from keras.applications import Xception
from keras import layers, models
from keras import callbacks, optimizers
# Uneven class distribution
class_weight = {
    train.class_indices['normal']: 1,
    train.class_indices['glaucoma']: 1.86
}

# Using pretrained Xception Net as Convolutional feature extractor
conv_base = Xception(include_top=False,
                     weights=None,
                     input_shape=(299, 299, 3))
conv_base.trainable = False

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizers.RMSprop(1e-5),
              'binary_crossentropy',
              metrics=['accuracy'])
print(model.summary())

print("Loading pre-finetune weights...")
model.load_weights('/model0/trained_dense.h5')
print("Test Accuracy:{}".format(model.evaluate_generator(test, test_steps)[1]))
        class_mode="categorical",
        target_size=(299, 299))
    test_generator = test_datagen.flow_from_dataframe(
        dataframe=df_test,
        directory=img_path,
        x_col="Überschrift der Spalte mit den Dateinamen",
        y_col="Überschrift der Spalte mit den Schadensklassen",
        classes=['Klasse 1', 'Klasse 2', 'Klasse 3', 'Klasse 4', 'Klasse 5'],
        batch_size=9,
        shuffle=False,
        class_mode='categorical',
        target_size=(299, 299))
    inp = layers.Input([299, 299, 3])
    model_1 = Xception(weights='imagenet', include_top=False, input_tensor=inp)

    model_1.trainable = True

    set_trainable = False
    for layer in model_1.layers:
        if layer.name == 'block14_sepconv2':
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False

    x = layers.Flatten()(model_1.output)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(32, activation='relu')(x)
Exemplo n.º 12
0
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')

train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')

validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')

test_cats_dir = os.path.join(test_dir, 'cats')
test_dogs_dir = os.path.join(test_dir, 'dogs')

xc_base = Xception(input_shape=(150, 150, 3),
                   weights='imagenet',
                   include_top=False)
xc_base.trainable = False

model = models.Sequential()
# model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))
# model.add(layers.MaxPooling2D((2, 2)))
# model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# model.add(layers.MaxPooling2D((2, 2)))
# model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# model.add(layers.MaxPooling2D((2, 2)))
# model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# model.add(layers.MaxPooling2D((2, 2)))

model.add(xc_base)
model.add(layers.Flatten())
# model.add(layers.Dropout(0.5))
model.add(layers.Dense(256, activation='relu'))
Exemplo n.º 13
0
                     weights='imagenet',
                     input_shape=(299, 299, 3))

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizers.RMSprop(1e-5),
              'binary_crossentropy',
              metrics=['accuracy'])
model.load_weights('/model/best_weights.h5')

conv_base.trainable = True
for layer in conv_base.layers:
    if layer.name != 'block14_sepconv2':
        layer.trainable = False

model.compile(optimizers.RMSprop(1e-5),
              'binary_crossentropy',
              metrics=['accuracy'])

print("Starting fine tuning")

model.fit_generator(train,
                    train_steps,
                    30,
                    callbacks=[
                        callbacks.TensorBoard('/output', 1),