예제 #1
0
파일: models.py 프로젝트: azeus404/thesis
def Xceptionmodel(no_classes, shape):
    base_model = Xception(include_top=False,
                          weights='imagenet',
                          input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    #x = Dense(2048, activation='relu')(x)
    #x = Dropout(0.2)(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
예제 #2
0
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator

basemodel = Xception(include_top=False, weights='imagenet')
inputs = Input(shape=(100, 100, 3))

x = basemodel(inputs)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
y = Dense(1, activation='sigmoid')(x)

basemodel.trainable = False

model = Model(inputs=inputs, outputs=y)
#basemodel.summary()
model.summary()

trainGenerator = ImageDataGenerator(rescale=1. / 255,
                                    horizontal_flip=True,
                                    vertical_flip=True,
                                    rotation_range=359)
testGenerator = ImageDataGenerator(rescale=1. / 255,
                                   horizontal_flip=True,
                                   vertical_flip=True,
                                   rotation_range=359)

traindata = trainGenerator.flow_from_directory("../data/train",
예제 #3
0
image_width = 224

# Define the input placeholder of the neural network. This is what we will be passing our images into
input_tensor = Input(shape=(image_height, image_width, 3))

# Add the base neural network that we will be fine-tuning.
# input_tensor: This is the input placeholder that we defined above
# weights: These are the parameters that tell our model how to behave.
# include_top: This determines whether or not we want the last layers of the network that actually do the classifying.
#              We do not want to include these layers in our model, because we will be introducing our own layers that will classify based on our training.
base_model = Xception(input_tensor=input_tensor,
                      weights='imagenet',
                      include_top=False)

# Freeze all the layers
base_model.trainable = False
"""Now, we will add our fine-tuning layers to place at the end of the network. We start you off with a fairly standard series of final layers, but there are a vast number of architectures that can be used. Once you run through the notebook the first time, try out different architectures for the end of the network using the layers listed at this link: https://keras.io/api/layers/. Feel free to try out different numbers of layers, different types of layers, and different orderings.

Here are some of the layers you might use:


*   Dense: A standard fully-connected layer.
*   Dropout: Randomly sets a group of input units to zero. Can improve classification accuracy.
*   Flatten: "Flattens" a multi-dimensional input (aka an output from a convolutional layer) so it works well with later layers
"""

# https://www.learnopencv.com/keras-tutorial-fine-tuning-using-pre-trained-models/
hidden_layer_size = 1024
dropout_param = 0.5
num_categories = 2
inputs = keras.Input(shape=(224, 224, 3))
예제 #4
0
    else:
        base = next(
            lr for lr in keras.models.load_model(f"{base_model}.h5").layers
            if 'xception' in lr.name.lower())

    inputs = [Input(shape=shape), Input(shape=shape)]

    compare = keras.layers.subtract([base(i) for i in inputs])
    compare = Dense(256, activation='relu')(Dropout(0.25)(compare))
    compare = Dense(64, activation='relu')(Dropout(0.5)(compare))
    compare = Dense(1, activation='sigmoid',
                    name='compare')(Dropout(0.5)(compare))

    model = Model(inputs, compare)

    base.trainable = False

else:
    model = keras.models.load_model(model_path)

    base = next(lr for lr in model.layers if 'xception' in lr.name.lower())

    base.trainable = True
    trainable = False
    for lr in base.layers:
        if not trainable and 'block13' in lr.name.lower():
            trainable = True

        lr.trainable = trainable

model.summary()
예제 #5
0
from keras.preprocessing.image import ImageDataGenerator
import cv2
import os

basemodel = Xception(include_top=False, weights='imagenet')
inputs = Input(shape=(100, 100, 3))

x = basemodel(inputs)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.1)(x)
y = Dense(1, activation='sigmoid')(x)

basemodel.trainable = True

for i in basemodel.layers:
    if i.name == 'block14_sepconv1' or i.name == 'block14_sepconv1_bn' or i.name == 'block14_sepconv2' or i.name == 'block14_sepconv2_bn':
        i.trainable = True
        print(i, "==============================================")
    else:
        i.trainable = False

model = Model(inputs=inputs, outputs=y)
model.compile(
    loss='binary_crossentropy',
    optimizer=keras.optimizers.RMSprop(),
    metrics=['acc'])

model.load_weights("weight.hdf5")