Ejemplo n.º 1
0
def AlexNet(input_shape=(224, 224, 3), num_classes=10, l2_reg=0.0, weights=None):
    """
    AlexNet model
    :param input_shape: input shape
    :param num_classes: the number of classes
    :param l2_reg:
    :param weights:
    :return: model
    """
    input_layer = Input(shape=input_shape)

    # Layer 1
    # In order to get the same size of the paper mentioned, add padding layer first
    x = ZeroPadding2D(padding=(2, 2))(input_layer)
    x = conv_block(x, filters=96, kernel_size=(11, 11),
                   strides=(4, 4), padding="valid", l2_reg=l2_reg, name='Conv_1_96_11x11_4')
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid", name="maxpool_1_3x3_2")(x)

    # Layer 2
    x = conv_block(x, filters=256, kernel_size=(5, 5),
                   strides=(1, 1), padding="same", l2_reg=l2_reg, name="Conv_2_256_5x5_1")
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid", name="maxpool_2_3x3_2")(x)

    # Layer 3
    x = conv_block(x, filters=384, kernel_size=(3, 3),
                   strides=(1, 1), padding="same", l2_reg=l2_reg, name="Conv_3_384_3x3_1")

    # Layer 4
    x = conv_block(x, filters=384, kernel_size=(3, 3),
                   strides=(1, 1), padding="same", l2_reg=l2_reg, name="Conv_4_384_3x3_1")

    # Layer 5
    x = conv_block(x, filters=256, kernel_size=(3, 3),
                   strides=(1, 1), padding="same", l2_reg=l2_reg, name="Conv_5_256_3x3_1")
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid", name="maxpool_3_3x3_2")(x)

    # Layer 6
    x = Flatten()(x)
    x = Dense(units=4096)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Layer 7
    x = Dense(units=4096)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Layer 8
    x = Dense(units=num_classes)(x)
    x = BatchNormalization()(x)
    x = Activation("softmax")(x)

    if weights is not None:
        x.load_weights(weights)
    model = Model(input_layer, x, name="AlexNet")
    return model
Ejemplo n.º 2
0
def VGG_16(input_shape=(224, 224, 3), num_classes=10, weight_path=None):
    # instantiate a Keras tensor
    input_layer = Input(shape=input_shape)
    # stage 1
    x = conv_block(input_layer, filters=64, kernel_size=(3, 3), name="conv1_1_64_3x3_1")
    x = conv_block(x, filters=64, kernel_size=(3, 3), name="conv1_2_64_3x3_1")
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name="max_pool_1_2x2_2")(x)
    # stage 2
    x = conv_block(x, filters=128, kernel_size=(3, 3), name="conv2_1_128_3x3_1")
    x = conv_block(x, filters=128, kernel_size=(3, 3), name="conv2_2_128_3x3_1")
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name="max_pool_2_2x2_2")(x)
    # stage 3
    x = conv_block(x, filters=256, kernel_size=(3, 3), name="conv3_1_256_3x3_1")
    x = conv_block(x, filters=256, kernel_size=(3, 3), name="conv3_2_256_3x3_1")
    x = conv_block(x, filters=256, kernel_size=(1, 1), name="conv3_3_256_3x3_1")
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name="max_pool_3_2x2_2")(x)
    # stage 4
    x = conv_block(x, filters=512, kernel_size=(3, 3), name="conv4_1_512_3x3_1")
    x = conv_block(x, filters=512, kernel_size=(3, 3), name="conv4_2_512_3x3_1")
    x = conv_block(x, filters=512, kernel_size=(1, 1), name="conv4_3_512_3x3_1")
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name="max_pool_4_2x2_2")(x)
    # stage 5
    x = conv_block(x, filters=512, kernel_size=(3, 3), name="conv5_1_512_3x3_1")
    x = conv_block(x, filters=512, kernel_size=(3, 3), name="conv5_2_512_3x3_1")
    x = conv_block(x, filters=512, kernel_size=(1, 1), name="conv5_3_512_3x3_1")
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name="max_pool_5_2x2_2")(x)

    # FC layers
    # FC layer 1
    x = Flatten()(x)
    x = Dense(2048)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Activation("relu")(x)
    # FC layer 2
    x = Dense(1024)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Activation("relu")(x)
    # FC layer 3
    """我以为最后一层不需要dropout层"""
    x = Dense(num_classes)(x)
    x = BatchNormalization()(x)
    x = Activation("softmax")(x)

    if weight_path:
        x.load_weights(weight_path)
    model = Model(input_layer, x, name="VGG16_Net")
    model.summary()
    return model
Ejemplo n.º 3
0
    def _get_encoder(self):
        if self._encoder is not None:
            return self._encoder
        input_shape = self._input_shape
        target_dims = self._target_dims
        input_ = Input(input_shape)
        model = self._conv(64)(input_)
        model = self._conv(128)(model)
        model = self._conv(256)(model)
        model = self._conv(512)(model)

        model = Flatten()(model)
        model = Dense(target_dims, activation='tanh')(model)

        model = Model(input_, model)
        if os.path.exists('encoder.h5'):
            model.load_weights('encoder.h5')
        return model
Ejemplo n.º 4
0
def AlexNet(input_shape, num_classes, l2_reg=0.0, weights=None):
    ipt = Input(shape=input_shape, name="input")
    layer = ZeroPadding2D(padding=(2, 2))(ipt)
    layer = conv_block(layer,
                       filters=96,
                       kernel_size=(11, 11),
                       strides=(4, 4),
                       padding="valid",
                       l2_reg=l2_reg,
                       name='Conv_1_96_11layer11_4')
    layer = MaxPool2D(pool_size=(3, 3),
                      strides=(2, 2),
                      padding="valid",
                      name="maxpool_1_3x3_2")(layer)

    # Layer 2
    layer = conv_block(layer,
                       filters=256,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding="same",
                       l2_reg=l2_reg,
                       name="Conv_2_256_5layer5_1")
    layer = MaxPool2D(pool_size=(3, 3),
                      strides=(2, 2),
                      padding="valid",
                      name="maxpool_2_3x3_2")(layer)

    # Layer 3
    layer = conv_block(layer,
                       filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       l2_reg=l2_reg,
                       name="Conv_3_384_3x3_1")

    # Layer 4
    layer = conv_block(layer,
                       filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       l2_reg=l2_reg,
                       name="Conv_4_384_3x3_1")

    # Layer 5
    layer = conv_block(layer,
                       filters=256,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       l2_reg=l2_reg,
                       name="Conv_5_256_3x3_1")
    layer = MaxPool2D(pool_size=(3, 3),
                      strides=(2, 2),
                      padding="valid",
                      name="maxpool_3_3x3_2")(layer)

    # Layer 6
    layer = Flatten()(layer)
    layer = Dense(units=4096)(layer)
    layer = BatchNormalization()(layer)
    layer = Activation('relu')(layer)

    # Layer 7
    layer = Dense(units=4096)(layer)
    layer = BatchNormalization()(layer)
    layer = Activation('relu')(layer)

    # Layer 8
    layer = Dense(units=num_classes)(layer)
    layer = BatchNormalization()(layer)
    layer = Activation("softmax")(layer)

    if weights is not None:
        layer.load_weights(weights)
    model = Model(ipt, layer, name="AlelayerNet")
    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 5
0
def build_model(weights_path=None): 
    # Define image input layer
    if DIM_ORDERING == 'th':
        INP_SHAPE = (3, 224, 224)  # 3 - Number of RGB Colours
        img_input = Input(shape=INP_SHAPE)
        CONCAT_AXIS = 1
    elif DIM_ORDERING == 'tf':
        INP_SHAPE = (224, 224, 3)  # 3 - Number of RGB Colours
        img_input = Input(shape=INP_SHAPE)
        CONCAT_AXIS = 3
    else:
        raise Exception('Invalid dim ordering: ' + str(DIM_ORDERING))

    # Channel 1 - Convolution Net Layer 1
    model = conv2D_lrn2d(
        img_input, 3, 11, 11, subsample=(
            1, 1), border_mode='same')
    model = MaxPooling2D(
        strides=(
            4, 4), pool_size=(
                4, 4), dim_ordering=DIM_ORDERING)(model)
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    # Channel 1 - Convolution Net Layer 2
    model = conv2D_lrn2d(x, 48, 55, 55, subsample=(1, 1), border_mode='same')
    model = MaxPooling2D(
        strides=(
            2, 2), pool_size=(
                2, 2), dim_ordering=DIM_ORDERING)(model)
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(model)

    # Channel 1 - Convolution Net Layer 3
    model = conv2D_lrn2d(x, 128, 27, 27, subsample=(1, 1), border_mode='same')
    model = MaxPooling2D(
        strides=(
            2, 2), pool_size=(
                2, 2), dim_ordering=DIM_ORDERING)(model)
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(model)

    # Channel 1 - Convolution Net Layer 4
    model = conv2D_lrn2d(x, 192, 13, 13, subsample=(1, 1), border_mode='same')
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    model Channel 1 - Convolution Net Layer 5
    model = conv2D_lrn2d(x, 192, 13, 13, subsample=(1, 1), border_mode='same')
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(model)

    # Channel 1 - Cov Net Layer 6
    model = conv2D_lrn2d(model, 128, 27, 27, subsample=(1, 1), border_mode='same')
    model = MaxPooling2D(
        strides=(
            2, 2), pool_size=(
                2, 2), dim_ordering=DIM_ORDERING)(x)
    model = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    # Channel 1 - Cov Net Layer 7
    model = Flatten()(model)
    model = Dense(2048, activation='relu')(model)
    model = Dropout(DROPOUT)(model)

    # Channel 1 - Cov Net Layer 8
    model = Dense(2048, activation='relu')(model)
    model = Dropout(DROPOUT)(model)

    # Final Channel - Cov Net 9
    model = Dense(output_dim=NB_CLASS,
              activation='softmax')(model)
    
    if weights_path:
        model.load_weights(weights_path)
    return model, img_input, CONCAT_AXIS, INP_SHAPE, DIM_ORDERING
Ejemplo n.º 6
0
base_model = ResNet50(weights="imagenet",
                      include_top=False,
                      input_shape=(img_size1, img_size2, 3))
print("base_model = resnet50")

# Add a new top layer classifier
x = base_model.output
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
predictions = Dense(classes, activation='sigmoid')(x)  #softmax

# Training Model
model = Model(inputs=base_model.input, outputs=predictions)
x.load_weights(top_model_weights_path)
model.summary()
print(
    "This is the number of trainable weights before freezing the resenet50 weights:",
    len(model.trainable_weights))
# Freezing the first 45 resnet50 layers so the pretrained weights do not get updated, effectivley rendering the model useless

#for layer in base_model.layers: #turn this on
#layer.trainable = True
print(
    "This is the number of trainable weights after freezing the resnet50 weights:",
    len(model.trainable_weights))

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=1e-4),
              metrics=['accuracy'])  #optimizer = sgd # rmsprop
Ejemplo n.º 7
0
output_label = Dense(1, activation='sigmoid')(model)
model = Model(inputs=[question1, question2], outputs=output_label)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

callbacks = [
    ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_acc', save_best_only=True)
]
history = model.fit([q1_trainset, q2_trainset],
                    Y_train,
                    epochs=NB_EPOCHS,
                    validation_split=VALIDATION_SPLIT,
                    verbose=2,
                    batch_size=BATCH_SIZE,
                    callbacks=callbacks)

import matplotlib.pyplot as plt

plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])

model.save(MODEL_WEIGHTS_FILE)
model.load_weights(model_weight_file)
loss, accuracy = model.evaluate([q1_testset, q2_testset], Y_test, verbose=0)

print('loss = {0:.4f}, accuracy = {1:.4f}'.format(loss, accuracy))