Beispiel #1
0
def xception_model(input_shape=(197, 197, 3),
                   num_classes=1,
                   learning_rate=0.0001,
                   weight_path=None):
    base_model = Xception(include_top=False,
                          weights=None,
                          input_shape=input_shape)
    for i in range(10):
        base_model.layers.pop()
    base_model.outputs = [base_model.layers[-1].output]
    base_model.layers[-1].outbound_nodes = []

    model = Sequential()
    model.add(BatchNormalization(input_shape=input_shape))
    model.add(base_model)
    model.add(AveragePooling2D(pool_size=6, strides=2))
    model.add(Flatten())
    model.add(HadamardClassifier(output_dim=num_classes))
    model.add(Activation('softmax'))

    if weight_path:
        if os.path.isfile(weight_path):
            model.load_weights(weight_path)

    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(lr=learning_rate),
                  metrics=['accuracy'])

    return model
def Xception_model():
    input_tensor = Input(shape=(dimension, dimension, number_of_channels))
    model = Xception(input_tensor=input_tensor,
                     weights='imagenet',
                     include_top=True)
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    x = Dense(number_of_classes, activation='softmax')(model.output)
    model = Model(model.input, x)

    # the first 24 layers are not trained
    for layer in model.layers[:24]:
        layer.trainable = False

    return model
Beispiel #3
0
def Xception_model():
    input_tensor = Input(shape=(dimension, dimension, number_of_channels))
    model = Xception(input_tensor=input_tensor,
                     weights='imagenet',
                     include_top=True)
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    x = Dense(number_of_classes, activation='softmax')(model.output)
    model = Model(model.input, x)

    # the first 24 layers are not trained
    for layer in model.layers[:24]:
        layer.trainable = False

    lrate = 0.001
    decay = 0.000001
    adam = Adam(lr=lrate, decay=decay)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    print(model.summary())
    return model