Exemple #1
0
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers import Input

from dataset_test import DataSet

path_dataset = './datasets/RML2016.10a_dict.pkl'
dataset = DataSet(path_dataset)
X, lbl, snrs, mods = dataset.getX()
X_train, Y_train, X_test, Y_test, classes = dataset.getTrainAndTest()

in_shp = list(X_train.shape[1:])  # (2, 128)
# print(in_shp)

d_model1_3_1 = models.Sequential()
d_model1_3_1.add(Reshape([2, 128, 1], input_shape=[2, 128], name='d_reshape1'))
d_model1_3_1.add(ZeroPadding2D((0, 2), name='padding1_1'))
d_model1_3_1.add(
    Conv2D(256, (1, 3),
           strides=1,
           input_shape=[2, 132, 1],
           padding='valid',
           activation='relu',
           name='d_conv1_1',
           kernel_initializer='glorot_uniform'))
# d_model1_3_1.summary()
d_model1_3_1.load_weights('./model/d_model1_3_1.h5')

d_model1_3_2 = models.Sequential()
d_model1_3_2.add(
    Reshape([2, 130, 64], input_shape=[2, 130, 64], name='d_reshape1'))
d_model1_3_2.add(ZeroPadding2D((0, 2), name='padding1_2'))
Exemple #2
0
    return yy1


trainy = list(map(lambda x: mods.index(lbl[x][0]), train_idx))
Y_train = to_onehot(trainy)
Y_test = to_onehot(list(map(lambda x: mods.index(lbl[x][0]), test_idx)))
#%%
in_shp = list(X_train.shape[1:])
print(X_train.shape, in_shp)
classes = mods
#%%
dr = 0.5  # dropout rate (%) 卷积层部分  https://keras-cn.readthedocs.io/en/latest/layers/convolutional_layer/#conv2d
model = models.Sequential(
)  #这里使用keras的序贯模型  https://keras-cn.readthedocs.io/en/latest/models/sequential/
model.add(Reshape(([1] + in_shp), input_shape=in_shp))
model.add(ZeroPadding2D((0, 2)))
model.add(
    Conv2D(256, (1, 3),
           padding='valid',
           activation="relu",
           name="conv1",
           init='glorot_uniform',
           data_format="channels_first"))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2)))
model.add(
    Conv2D(80, (2, 3),
           padding="valid",
           activation="relu",
           name="conv2",
           init='glorot_uniform',
Exemple #3
0
def VGG_16(weights_path=None):
    model_name = "VGG_16"
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(20, activation='softmax'))
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # if weights_path:
    #     model.load_weights(weights_path)

    return model, model_name


# if __name__ == "__main__":
#     im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32)
#     im[:,:,0] -= 103.939
#     im[:,:,1] -= 116.779
#     im[:,:,2] -= 123.68
#     im = im.transpose((2,0,1))
#     im = np.expand_dims(im, axis=0)

#     # Test pretrained model
#     model = VGG_16('vgg16_weights.h5')
#     sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#     model.compile(optimizer=sgd, loss='categorical_crossentropy')
#     out = model.predict(im)
#     print np.argmax(out)
Exemple #4
0
def VGG_19(weights_path=VGG19_ILSVRC_WEIGHT_PATH):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))

    model.add(
        keras.layers.Conv2D(64, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block1_conv1'))
    model.add(
        keras.layers.Conv2D(64, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block1_conv2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

    model.add(
        keras.layers.Conv2D(128, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block2_conv1'))
    model.add(
        keras.layers.Conv2D(128, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block2_conv2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

    model.add(
        keras.layers.Conv2D(256, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block3_conv1'))
    model.add(
        keras.layers.Conv2D(256, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block3_conv2'))
    model.add(
        keras.layers.Conv2D(256, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block3_conv3'))
    model.add(
        keras.layers.Conv2D(256, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block3_conv4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block4_conv1'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block4_conv2'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block4_conv3'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block4_conv4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))

    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block5_conv1'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block5_conv2'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block5_conv3'))
    model.add(
        keras.layers.Conv2D(512, (3, 3),
                            activation='relu',
                            padding='same',
                            name='block5_conv4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))

    model.add(keras.layers.Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='fc1'))
    model.add(Dense(4096, activation='relu', name='fc2'))
    model.add(Dense(1000, activation='softmax', name='prediction'))

    if weights_path:
        model.load_weights(weights_path)

    return keras.models.Model(inputs=model.input,
                              outputs=model.get_layer('fc2').output)
def model():
    # create model
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemple #6
0
aspect_ratio = 0

# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True))
style_reference_image = K.variable(
    preprocess_image(style_reference_image_path))

# this will contain our generated image
combination_image = K.placeholder((1, 3, img_width, img_height))

# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate(
    [base_image, style_reference_image, combination_image], axis=0)

# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1))
first_layer.set_input(input_tensor, shape=(3, 3, img_width, img_height))

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
Exemple #7
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
def AlexNet(img_shape=(80, 80, 3), n_classes=2, l2_reg=0., weights=None):
    # Initialize model
    alexnet = Sequential()

    # Layer 1
    alexnet.add(
        Conv2D(96, (11, 11),
               input_shape=img_shape,
               padding='same',
               kernel_regularizer=l2(l2_reg)))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 2
    alexnet.add(Conv2D(256, (5, 5), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 3
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(512, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 4
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))

    # Layer 5
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 6
    alexnet.add(Flatten())
    alexnet.add(Dense(3072))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 7
    alexnet.add(Dense(4096))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 8
    alexnet.add(Dense(n_classes))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('softmax'))

    if weights is not None:
        alexnet.load_weights(weights)

    return alexnet
Exemple #9
0
 def ConvBlock(self, layers, filters):
     model = self.model
     for i in range(layers):
         model.add(ZeroPadding2D((1, 1)))
         model.add(Convolution2D(filters, 3, 3, activation='relu'))
     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
Exemple #10
0
def VGGCAM(nb_classes, num_input_channels=1024):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))

    # Add another conv layer with ReLU + GAP
    model.add(
        Convolution2D(num_input_channels,
                      3,
                      3,
                      activation='relu',
                      border_mode="same"))
    model.add(AveragePooling2D((14, 14)))
    model.add(Flatten())
    # Add the W layer
    model.add(Dense(nb_classes, activation='softmax'))

    model.name = "VGGCAM"

    return model
Exemple #11
0
def bottleneck(inp,
               output,
               internal_scale=4,
               asymmetric=0,
               dilated=0,
               downsample=False,
               dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(
        internal,
        (input_stride, input_stride),
        # padding='same',
        strides=(input_stride, input_stride),
        use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(
        encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric),
                         padding='same',
                         use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3),
                         dilation_rate=(dilated, dilated),
                         padding='same')(encoder)
    else:
        raise (Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(
        encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(
        encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
def hidden_layers(m, k):
    m.add(ZeroPadding2D(padding=(1, 1)))
    m.add(Convolution2D(k, 3, 3))
    m.add(Activation('relu'))

    return m
            open(SAVE_MODEL_FOLDER + "/lr.txt").read())


if __name__ == "__main__":

    if not os.path.exists(SAVE_MODEL_FOLDER):
        os.mkdir(SAVE_MODEL_FOLDER)

    print "load dataset.."
    train_x, train_y = load_dataset()
    print "..finish"

    print "make model.."
    model = Sequential()

    model.add(ZeroPadding2D(padding=(1, 1), input_shape=(38, 9, 9)))
    model.add(Convolution2D(k, 5, 5))
    model.add(Activation('relu'))

    for i in range(0, 13):
        model = hidden_layers(model, k)

    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(Convolution2D(1, 1, 1))
    model.add(Activation('relu'))

    model.add(Flatten())
    model.add(Activation('softmax'))

    print "..finish"
Exemple #14
0
def VGG_19(weights_path=None, heatmap=False):
    model = Sequential()

    if heatmap:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, None, None)))
    else:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    if heatmap:
        model.add(Convolution2D(4096, 7, 7, activation='relu', name='dense_1'))
        model.add(Convolution2D(4096, 1, 1, activation='relu', name='dense_2'))
        model.add(Convolution2D(1000, 1, 1, name='dense_3'))
        model.add(Softmax4D(axis=1, name='softmax'))
    else:
        model.add(Flatten())
        model.add(Dense(4096, activation='relu', name='dense_1'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu', name='dense_2'))
        model.add(Dropout(0.5))
        model.add(Dense(1000, name='dense_3'))
        model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Exemple #15
0
def VGG_16_test():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2622, activation='softmax'))

    model.add(Dense(5, activation='softmax'))
    return model
Exemple #16
0
def inceptionv2(input,
                dropout_keep_prob=0.8,
                num_classes=1000,
                is_training=True,
                scope='InceptionV2'):
    with tf.name_scope(scope, "InceptionV2", [input]):

        conv1_7x7_s2 = Conv2D(64, (7, 7),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='conv1/7x7_s2',
                              kernel_regularizer=l2(0.0002))(input)

        conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)

        pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='valid',
                                    name='pool1/3x3_s2')(conv1_zero_pad)

        pool1_norm1 = BatchNormalization(axis=3,
                                         scale=False,
                                         name='pool1/norm1')(pool1_3x3_s2)

        conv2_3x3_reduce = Conv2D(64, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='conv2/3x3_reduce',
                                  kernel_regularizer=l2(0.0002))(pool1_norm1)

        conv2_3x3 = Conv2D(192, (3, 3),
                           padding='same',
                           activation='relu',
                           name='conv2/3x3',
                           kernel_regularizer=l2(0.0002))(conv2_3x3_reduce)

        conv2_norm2 = BatchNormalization(axis=3,
                                         scale=False,
                                         name='conv2/norm2')(conv2_3x3)

        conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)

        pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='valid',
                                    name='pool2/3x3_s2')(conv2_zero_pad)

        inception_3a_1x1 = Conv2D(64, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_3a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_3x3_reduce = Conv2D(
            96, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_3x3 = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/3x3',
            kernel_regularizer=l2(0.0002))(inception_3a_3x3_reduce)

        inception_3a_5x5_reduce = Conv2D(
            16, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_5x5_a = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_3a_5x5_reduce)

        inception_3a_5x5_b = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_3a_5x5_a)

        inception_3a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_3a/pool')(pool2_3x3_s2)

        inception_3a_pool_proj = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_3a_pool)

        inception_3a_output = concatenate([
            inception_3a_1x1, inception_3a_3x3, inception_3a_5x5_b,
            inception_3a_pool_proj
        ],
                                          axis=3,
                                          name='inception_3a/output')

        inception_3b_1x1 = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/1x1',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_3x3_reduce = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_3x3 = Conv2D(
            192, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/3x3',
            kernel_regularizer=l2(0.0002))(inception_3b_3x3_reduce)

        inception_3b_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_5x5_a = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_3b_5x5_reduce)

        inception_3b_5x5_b = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_3b_5x5_a)

        inception_3b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_3b/pool')(inception_3a_output)

        inception_3b_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_3b_pool)

        inception_3b_output = concatenate([
            inception_3b_1x1, inception_3b_3x3, inception_3b_5x5_b,
            inception_3b_pool_proj
        ],
                                          axis=3,
                                          name='inception_3b/output')

        inception_3b_output_zero_pad = ZeroPadding2D(
            padding=(1, 1))(inception_3b_output)

        pool3_3x3_s2 = MaxPooling2D(
            pool_size=(3, 3),
            strides=(2, 2),
            padding='valid',
            name='pool3/3x3_s2')(inception_3b_output_zero_pad)

        inception_4a_1x1 = Conv2D(192, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_4a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_3x3_reduce = Conv2D(
            96, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_3x3 = Conv2D(
            208, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/3x3',
            kernel_regularizer=l2(0.0002))(inception_4a_3x3_reduce)

        inception_4a_5x5_reduce = Conv2D(
            16, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_5x5_a = Conv2D(
            48, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4a_5x5_reduce)

        inception_4a_5x5_b = Conv2D(
            48, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4a_5x5_a)

        inception_4a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4a/pool')(pool3_3x3_s2)

        inception_4a_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4a_pool)

        inception_4a_output = concatenate([
            inception_4a_1x1, inception_4a_3x3, inception_4a_5x5_b,
            inception_4a_pool_proj
        ],
                                          axis=3,
                                          name='inception_4a/output')

        loss1_ave_pool = AveragePooling2D(
            pool_size=(5, 5), strides=(3, 3),
            name='loss1/ave_pool')(inception_4a_output)

        loss1_conv = Conv2D(128, (1, 1),
                            padding='same',
                            activation='relu',
                            name='loss1/conv',
                            kernel_regularizer=l2(0.0002))(loss1_ave_pool)

        loss1_flat = Flatten()(loss1_conv)

        loss1_fc = Dense(1024,
                         activation='relu',
                         name='loss1/fc',
                         kernel_regularizer=l2(0.0002))(loss1_flat)

        loss1_drop_fc = Dropout(dropout_keep_prob)(loss1_fc,
                                                   training=is_training)

        loss1_classifier = Dense(num_classes,
                                 name='loss1/classifier',
                                 kernel_regularizer=l2(0.0002))(loss1_drop_fc)

        inception_4b_1x1 = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/1x1',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_3x3_reduce = Conv2D(
            112, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_3x3 = Conv2D(
            224, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/3x3',
            kernel_regularizer=l2(0.0002))(inception_4b_3x3_reduce)

        inception_4b_5x5_reduce = Conv2D(
            24, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4b_5x5_reduce)

        inception_4b_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4b_5x5_a)

        inception_4b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4b/pool')(inception_4a_output)

        inception_4b_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4b_pool)

        inception_4b_output = concatenate([
            inception_4b_1x1, inception_4b_3x3, inception_4b_5x5_b,
            inception_4b_pool_proj
        ],
                                          axis=3,
                                          name='inception_4b_output')

        inception_4c_1x1 = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/1x1',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_3x3_reduce = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_3x3 = Conv2D(
            256, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/3x3',
            kernel_regularizer=l2(0.0002))(inception_4c_3x3_reduce)

        inception_4c_5x5_reduce = Conv2D(
            24, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4c_5x5_reduce)

        inception_4c_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4c_5x5_a)

        inception_4c_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4c/pool')(inception_4b_output)

        inception_4c_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4c_pool)

        inception_4c_output = concatenate([
            inception_4c_1x1, inception_4c_3x3, inception_4c_5x5_b,
            inception_4c_pool_proj
        ],
                                          axis=3,
                                          name='inception_4c/output')

        inception_4d_1x1 = Conv2D(
            112, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/1x1',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_3x3_reduce = Conv2D(
            144, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_3x3 = Conv2D(
            288, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/3x3',
            kernel_regularizer=l2(0.0002))(inception_4d_3x3_reduce)

        inception_4d_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4d_5x5_reduce)

        inception_4d_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4d_5x5_a)

        inception_4d_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4d/pool')(inception_4c_output)

        inception_4d_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4d_pool)

        inception_4d_output = concatenate([
            inception_4d_1x1, inception_4d_3x3, inception_4d_5x5_b,
            inception_4d_pool_proj
        ],
                                          axis=3,
                                          name='inception_4d/output')

        loss2_ave_pool = AveragePooling2D(
            pool_size=(5, 5), strides=(3, 3),
            name='loss2/ave_pool')(inception_4d_output)

        loss2_conv = Conv2D(128, (1, 1),
                            padding='same',
                            activation='relu',
                            name='loss2/conv',
                            kernel_regularizer=l2(0.0002))(loss2_ave_pool)

        loss2_flat = Flatten()(loss2_conv)

        loss2_fc = Dense(1024,
                         activation='relu',
                         name='loss2/fc',
                         kernel_regularizer=l2(0.0002))(loss2_flat)

        loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc,
                                                   training=is_training)

        loss2_classifier = Dense(num_classes,
                                 name='loss2/classifier',
                                 kernel_regularizer=l2(0.0002))(loss2_drop_fc)

        inception_4e_1x1 = Conv2D(
            256, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/1x1',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_3x3_reduce = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_3x3 = Conv2D(
            320, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/3x3',
            kernel_regularizer=l2(0.0002))(inception_4e_3x3_reduce)

        inception_4e_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4e_5x5_reduce)

        inception_4e_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4e_5x5_a)

        inception_4e_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4e/pool')(inception_4d_output)

        inception_4e_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4e_pool)

        inception_4e_output = concatenate([
            inception_4e_1x1, inception_4e_3x3, inception_4e_5x5_b,
            inception_4e_pool_proj
        ],
                                          axis=3,
                                          name='inception_4e/output')

        inception_4e_output_zero_pad = ZeroPadding2D(
            padding=(1, 1))(inception_4e_output)

        pool4_3x3_s2 = MaxPooling2D(
            pool_size=(3, 3),
            strides=(2, 2),
            padding='valid',
            name='pool4/3x3_s2')(inception_4e_output_zero_pad)

        inception_5a_1x1 = Conv2D(256, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_5a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_3x3_reduce = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_3x3 = Conv2D(
            320, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/3x3',
            kernel_regularizer=l2(0.0002))(inception_5a_3x3_reduce)

        inception_5a_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_5a_5x5_reduce)

        inception_5a_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_5a_5x5_a)

        inception_5a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_5a/pool')(pool4_3x3_s2)

        inception_5a_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_5a_pool)

        inception_5a_output = concatenate([
            inception_5a_1x1, inception_5a_3x3, inception_5a_5x5_b,
            inception_5a_pool_proj
        ],
                                          axis=3,
                                          name='inception_5a/output')

        inception_5b_1x1 = Conv2D(
            384, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/1x1',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_3x3_reduce = Conv2D(
            192, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_3x3 = Conv2D(
            384, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/3x3',
            kernel_regularizer=l2(0.0002))(inception_5b_3x3_reduce)

        inception_5b_5x5_reduce = Conv2D(
            48, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_5b_5x5_reduce)

        inception_5b_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_5b_5x5_a)

        inception_5b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_5b/pool')(inception_5a_output)

        inception_5b_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_5b_pool)

        inception_5b_output = concatenate([
            inception_5b_1x1, inception_5b_3x3, inception_5b_5x5_b,
            inception_5b_pool_proj
        ],
                                          axis=3,
                                          name='inception_5b/output')

        net = inception_5b_output

        # Modified for 299x299
        pool5_10x10_s1 = AveragePooling2D(
            pool_size=(10, 10), strides=(1, 1),
            name='pool5/10x10_s2')(inception_5b_output)

        loss3_flat = Flatten()(pool5_10x10_s1)

        pool5_drop_10x10_s1 = Dropout(dropout_keep_prob)(loss3_flat,
                                                         training=is_training)

        loss3_classifier_W = Dense(num_classes,
                                   name='loss3/classifier',
                                   kernel_regularizer=l2(0.0002))

        loss3_classifier = loss3_classifier_W(pool5_drop_10x10_s1)

        w_variables = loss3_classifier_W.get_weights()

        logits = tf.cond(
            tf.equal(is_training, tf.constant(True)), lambda: tf.add(
                loss3_classifier,
                tf.scalar_mul(tf.constant(0.3),
                              tf.add(loss1_classifier, loss2_classifier))),
            lambda: loss3_classifier)
    return logits, net, tf.convert_to_tensor(w_variables[0])
Exemple #17
0
def rtn_1(classes, in_shp=[2, 128], weights=None, **kwargs):

    if weights is not None and not (os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), '
                         'or the path to the weights file to be loaded.')
    dr = 0.5

    _in_ = Input(shape=in_shp)
    input_x = Reshape(in_shp + [1])(_in_)
    # input_x_padding = ZeroPadding2D((0, 2))(input_x)
    #加强特征
    input_stn = Convolution2D(20, (2, 8),
                              padding='same',
                              activation='relu',
                              name="conv11",
                              kernel_initializer='glorot_uniform')(input_x)
    input_stn = BatchNormalization()(input_stn)
    # locnet = Permute((3,2,1))(locnet)
    # locnet = Lambda(LC)(locnet)
    # locnet = Permute((3,2,1))(locnet)
    # locnet = Activation('relu')(locnet)
    input_stn = Dropout(dr)(input_stn)
    ##############STN start:##################
    #######channel attention module#######################
    #cam = CAM()(input_stn)
    #cam = Convolution2D(20, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(cam)
    #cam = BatchNormalization()(cam)
    #cam = Activation('relu')(cam)
    #cam = Dropout(dr)(cam)
    ########attention fusion########################
    #attention_sum = add([cam, input_stn])
    locnet_size = list(np.shape(input_stn))
    input_dim = int(locnet_size[-1] * locnet_size[-2])
    timesteps = int(locnet_size[-3])
    locnet = Flatten()(input_stn)
    locnet = Reshape((1, input_dim * timesteps))(locnet)
    lstm_out = LSTM(32, return_sequences=True)(locnet)
    lstm_out = LSTM(64, return_sequences=True)(lstm_out)
    locnet = Dense(64, activation='relu')(lstm_out)
    locnet = Dropout(dr)(locnet)
    weights = get_initial_weights(64)
    locnet = Dense(6, weights=weights)(locnet)
    rtn_out = BilinearInterpolation((2, 128))([input_stn, locnet])
    #######channel attention module#######################
    cam = CAM()(rtn_out)
    #cam = Dense(20, activation='relu', init='he_normal')(cam)
    cam = Convolution2D(20, (1, 1),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='he_normal')(cam)
    cam = Dense(10, activation='relu', init='he_normal')(cam)
    cam = Dense(20, activation='relu', init='he_normal')(cam)
    cam = BatchNormalization()(cam)
    cam = Activation('relu')(cam)
    cam = Dropout(dr)(cam)
    ########attention fusion########################
    #attention_sum = add([cam, rtn_out])
    #######baseline classifier#######
    x = ZeroPadding2D((1, 2))(cam)
    x = Conv2D(kernel_initializer="glorot_uniform",
               activation="linear",
               padding="valid",
               name="conv21",
               filters=128,
               kernel_size=(2, 16))(x)
    x = BatchNormalization()(x)
    x = Permute((3, 2, 1))(x)
    x = Lambda(LC)(x)
    x = Permute((3, 2, 1))(x)
    x = Activation('relu')(x)
    x = Dropout(dr)(x)

    x = ZeroPadding2D((0, 2))(x)
    x = Conv2D(kernel_initializer="glorot_uniform",
               activation="relu",
               padding="valid",
               name="conv22",
               filters=64,
               kernel_size=(2, 8))(x)
    x = BatchNormalization()(x)
    x = Dropout(dr)(x)
    #x = MaxPooling2D(pool_size= (1,2))(x)
    x = Flatten()(x)
    x = Dense(256, activation='relu', init='he_normal', name="dense1")(x)
    x = BatchNormalization()(x)
    x = Dropout(dr)(x)
    x = Dense(128, activation='relu', init='he_normal', name="dense2")(x)
    x = BatchNormalization()(x)
    x = Dropout(dr)(x)
    x = Dense(len(classes), init='he_normal', name="dense3")(x)
    x = Activation('softmax')(x)
    _out_ = Reshape([len(classes)])(x)

    model = Model(inputs=_in_, outputs=_out_)

    return model
Exemple #18
0
X_test, y_test = load_data(train=False)

print "augment data"
X_train, y_train = augment_data(X_train, y_train)

print "build model"
model = Sequential()

model.add(Convolution2D(64, 5, 5,
                        border_mode='valid',
                        input_shape=(1, nb_dim, nb_dim)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(ZeroPadding2D(padding=(2, 2)))
model.add(Convolution2D(64, 5, 5))
model.add(Activation('relu'))
model.add(Dropout(0.3))

model.add(Convolution2D(256, 3, 3))
model.add(Activation('relu'))
model.add(Dropout(0.4))

model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
Exemple #19
0
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model


if __name__ == "__main__":

    # Test pretrained model
    model = VGG_16('.git/vgg16_weights.h5')
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')
    model2 = Sequential()
    model2.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model2.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      weights=model.layers[1].get_weights()))
    model2.add(ZeroPadding2D((1, 1)))
    model2.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      weights=model.layers[3].get_weights()))
    model2.add(MaxPooling2D((2, 2), strides=(2, 2)))
Exemple #20
0
    def build(width,
              height,
              depth,
              classes,
              stages,
              filters,
              reg=0.0001,
              epsilon=2e-5,
              mom=0.9):

        inputShape = (height, width, depth)
        inputs = Input(shape=inputShape)
        chanDim = -1

        # 2 x (3x3) => 64x64x3 -> 58x58x64
        x = BatchNormalization(axis=chanDim, epsilon=epsilon,
                               momentum=mom)(inputs)
        x = Activation("relu")(x)
        x = SeparableConv2D(filters[0], (3, 3),
                            use_bias=False,
                            depthwise_regularizer=l2(reg),
                            input_shape=inputShape)(x)
        x = BatchNormalization(axis=chanDim, epsilon=epsilon,
                               momentum=mom)(inputs)
        x = Activation("relu")(x)
        x = SeparableConv2D(filters[0], (3, 3),
                            use_bias=False,
                            depthwise_regularizer=l2(reg),
                            input_shape=inputShape)(x)
        x = BatchNormalization(axis=chanDim, epsilon=epsilon,
                               momentum=mom)(inputs)
        x = Activation("relu")(x)
        x = SeparableConv2D(filters[0], (3, 3),
                            use_bias=False,
                            depthwise_regularizer=l2(reg),
                            input_shape=inputShape)(x)

        # MaxPool Layer + ZeroPadding => 58x58x64 -> 31x31x64
        x = BatchNormalization(axis=chanDim, epsilon=epsilon, momentum=mom)(x)
        x = Activation("relu")(x)
        x = ZeroPadding2D((2, 2))(x)
        x = MaxPooling2D((2, 2))(x)

        for i in range(0, len(stages)):

            if i == 0:
                strides = (1, 1)
            else:
                strides = (2, 2)

            x = ResNet.residual_model(x,
                                      filters[i + 1],
                                      strides,
                                      chanDim,
                                      reduced=True)

            for j in range(0, stages[i] - 1):
                x = ResNet.residual_model(x,
                                          filters[i + 1], (1, 1),
                                          chanDim,
                                          epsilon=epsilon,
                                          mom=mom)

        x = BatchNormalization(axis=chanDim, epsilon=epsilon, momentum=mom)(x)
        x = Activation("relu")(x)
        x = Conv2D(200, (1, 1), use_bias=False, kernel_regularizer=l2(reg))(x)
        x = AveragePooling2D((3, 3))(x)

        x = Flatten()(x)
        x = Activation("softmax")(x)

        model = Model(inputs, x, name="resnet")
        return model
Exemple #21
0
def cnn_model(shape,
              nb_neurons=8,
              lr_rate=1e-2,
              momentum=0.9,
              init_weights='uniform',
              activation='relu',
              weight_constraint=0,
              dropout_rate=0):

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=shape + (3, )))

    model.add(
        Convolution2D(4,
                      3,
                      3,
                      init=init_weights,
                      activation=activation,
                      W_constraint=maxnorm(weight_constraint)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(
        Convolution2D(4,
                      3,
                      3,
                      init=init_weights,
                      activation=activation,
                      W_constraint=maxnorm(weight_constraint)))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))

    model.add(
        Convolution2D(8,
                      3,
                      3,
                      init=init_weights,
                      activation=activation,
                      W_constraint=maxnorm(weight_constraint)))

    model.add(ZeroPadding2D((1, 1)))

    model.add(
        Convolution2D(8,
                      3,
                      3,
                      init=init_weights,
                      activation=activation,
                      W_constraint=maxnorm(weight_constraint)))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(
        Dense(nb_neurons,
              init=init_weights,
              activation=activation,
              W_constraint=maxnorm(weight_constraint)))

    model.add(Dropout(dropout_rate))

    model.add(
        Dense(nb_neurons,
              init=init_weights,
              activation=activation,
              W_constraint=maxnorm(weight_constraint)))

    model.add(Dropout(dropout_rate))
    model.add(Dense(nb_classes, init=init_weights, activation='softmax'))

    sgd = SGD(lr=lr_rate, decay=1e-6, momentum=momentum, nesterov=True)

    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Exemple #22
0
es = EarlyStopping(monitor='val_loss',
                   min_delta=0,
                   patience=20,
                   verbose=0,
                   mode='auto',
                   baseline=None,
                   restore_best_weights=True)

from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import numpy as np

model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(32, 32, 3)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
Exemple #23
0
 def ConvBlock(self, layers, filters):
     model = self.model
     for i in range(layers):
         model.add(ZeroPadding2D((1, 1)))
         model.add(Conv2D(filters, kernel_size=(3, 3), activation='relu'))  # Keras2
     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
Exemple #24
0
def make_unet(image_dim, nlabels, activation_hidden, activation_output):
    img_rows = image_dim[1]
    img_cols = image_dim[2]
    nMLP = 16
    nRshp = int(sqrt(nMLP))
    nUpSm = int(image_dim[0] / nRshp)
    image = Input(shape=(image_dim[1], image_dim[2], 1))

    BN1 = BatchNormalization()(image)

    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(BN1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=3)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    conv6_up = UpSampling2D(size=(2, 2))(conv6)
    conv6_pad = ZeroPadding2D(((1, 0), (1, 0)))(conv6_up)
    up7 = merge([conv6_pad, conv3], mode='concat', concat_axis=3)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=3)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=3)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(nlabels, 1, 1, activation=activation)(conv9)

    model = keras.models.Model(input=[image], output=conv10)

    print(model.summary())
    return model
Exemple #25
0
def VGG_16(weights_path=None):
    #base_model = VGG16(include_top=False, weights=None, input_tensor=None, input_shape=img_shape)

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=img_shape))  #(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation='softmax'))

    # Add final layers
    #x = base_model.output
    #x = Flatten(name="flatten")(x)
    #x = Dense(4096, activation='relu', name='dense_1')(x)
    #x = Dropout(0.5)(x)
    #x = Dense(4096, activation='relu', name='dense_2')(x)
    #x = Dropout(0.5)(x)
    #x = Dense(n_classes, name='dense_3_{}'.format(n_classes))(x)
    #predictions = Activation("softmax", name="softmax")(x)

    #model = Model(input=base_model.input, output=predictions)

    if weights_path:
        model.load_weights(weights_path)

    return model
Exemple #26
0
def k_vgg_mlp(yao_indices_dim, face_image_shape, with_compile=True):
    '''
    'k_' prefix means keras_layers
    some layer parameters
    '''

    # vgg parameters
    zero_padding = (1, 1)
    _kernel_size = (3, 3)
    _vgg_activation = 'relu'
    _pool_size = (2, 2)
    _pool_strides = (2, 2)

    _nb_filters_1 = 64
    _nb_filters_2 = 128
    _nb_filters_3 = 256
    _nb_filters_4 = 512
    _nb_filters_5 = 512

    # mlp layer parameters
    _mlp_units = 200
    _mlp_activation = 'tanh'
    _mlp_dropout = 0.0
    _output_units = yao_indices_dim
    _output_activation = 'softmax'

    print('Build VGG + MLP model...')
    vgg_mlp_model = Sequential()

    vgg_mlp_model.add(ZeroPadding2D(zero_padding,
                                    input_shape=face_image_shape))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_1,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv1_1'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_1,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv1_2'))
    vgg_mlp_model.add(MaxPooling2D(pool_size=_pool_size,
                                   strides=_pool_strides))

    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_2,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv2_1'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_2,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv2_2'))
    vgg_mlp_model.add(MaxPooling2D(pool_size=_pool_size,
                                   strides=_pool_strides))

    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_3,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv3_1'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_3,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv3_2'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_3,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv3_3'))
    vgg_mlp_model.add(MaxPooling2D(pool_size=_pool_size,
                                   strides=_pool_strides))

    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_4,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv4_1'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_4,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv4_2'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_4,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv4_3'))
    vgg_mlp_model.add(MaxPooling2D(pool_size=_pool_size,
                                   strides=_pool_strides))

    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_5,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv5_1'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_5,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv5_2'))
    vgg_mlp_model.add(ZeroPadding2D(zero_padding))
    vgg_mlp_model.add(
        Convolution2D(filters=_nb_filters_5,
                      kernel_size=_kernel_size,
                      activation=_vgg_activation,
                      name='conv5_3'))
    vgg_mlp_model.add(MaxPooling2D(pool_size=_pool_size,
                                   strides=_pool_strides))

    vgg_mlp_model.add(Flatten())
    vgg_mlp_model.add(Dense(units=_mlp_units, activation=_mlp_activation))
    vgg_mlp_model.add(Dropout(rate=_mlp_dropout))
    vgg_mlp_model.add(Dense(units=_output_units))
    vgg_mlp_model.add(Activation(activation=_output_activation))

    # print layers framework
    vgg_mlp_model.summary()
trainX = np.array(
    [preprocess_image(x[0]).reshape(1, img_height, img_width) for x in trainX])

print(trainX.shape)

trainY = kutils.to_categorical(train[:, 0])
nb_classes = trainY.shape[1]

# Split the training data into training and validation data
trainX, valX, trainY, valY = train_test_split(trainX, trainY, test_size=0.2)

print(trainX.shape, valX.shape)

inputs = Input(shape=(28, 28, 1))

x = ZeroPadding2D((2, 2))(inputs)
x = Convolution2D(64, 5, 5, init='he_normal')(x)
x = Activation('relu')(x)
x = ZeroPadding2D((2, 2))(x)
x = Convolution2D(128, 5, 5, init='he_normal')(x)
x = Activation('relu')(x)
x = MaxPooling2D(strides=(2, 2))(x)

x = ZeroPadding2D((2, 2))(x)
x = Convolution2D(256, 5, 5, init='he_normal')(x)
x = Activation('relu')(x)
x = ZeroPadding2D((1, 1))(x)
x = Convolution2D(256, 3, 3, init='he_normal')(x)
x = Activation('relu')(x)
x = MaxPooling2D(strides=(2, 2))(x)
x = Dropout(0.2)(x)
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name='convpool_1')(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(
            128, 5, 5, activation='relu', name='conv_2_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_2')

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(
            192, 3, 3, activation='relu', name='conv_4_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_4')

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(
            128, 3, 3, activation='relu', name='conv_5_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_5')

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name='convpool_5')(conv_5)

    dense_1 = Flatten(name='flatten')(dense_1)
    dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(1000, name='dense_3')(dense_3)
    prediction = Activation('softmax', name='softmax')(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
Exemple #29
0
def VGG_16(nbOutput=1000):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nbOutput, activation='softmax'))
    return model
Exemple #30
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    #不要最后的FC层

    model.add(Flatten())

    if weights_path:
        load_weights(weights_path, model)

    return model