def SegNet(classes=1, dropout=0.4):
    input_img = Input((croppedImageSize, croppedImageSize, 1), name='img')
    # Encoder
    x = conv2d_block(input_img, 64, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 128, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 256, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 512, kernel_size=3)
    x = Dropout(dropout)(x)
    # Decoder
    x = conv2d_block(x, 512, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 256, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 128, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 64, kernel_size=3)
    x = Dropout(dropout)(x)
    x = Conv2D(1, (1, 1), activation='sigmoid')(x)
    model = Model(inputs=[input_img], outputs=[x])
    return model
Exemple #2
0
def extract_layer(input_tensor=None):

    x = TimeDistributed(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               trainable=True))(input_tensor)
    x = TimeDistributed(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               trainable=True))(x)
    x = TimeDistributed(
        MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))(x)

    # Block 2
    x = TimeDistributed(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               trainable=True))(x)
    x = TimeDistributed(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               trainable=True))(x)
    x = TimeDistributed(
        MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))(x)

    return x
Exemple #3
0
def get_unet(input_img, n_filters=16, dropout=0.1, batchnorm=True):
    """Function to define the UNET Model"""
    # Contracting Path
    c1 = conv2d_block(input_img,
                      n_filters * 1,
                      kernel_size=3,
                      batchnorm=batchnorm)
    p1 = MaxPooling2D((2, 2))(c1)
    p1 = Dropout(dropout)(p1)

    c2 = conv2d_block(p1, n_filters * 2, kernel_size=3, batchnorm=batchnorm)
    p2 = MaxPooling2D((2, 2))(c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv2d_block(p2, n_filters * 4, kernel_size=3, batchnorm=batchnorm)
    p3 = MaxPooling2D((2, 2))(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv2d_block(p3, n_filters * 8, kernel_size=3, batchnorm=batchnorm)
    p4 = MaxPooling2D((2, 2))(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv2d_block(p4,
                      n_filters=n_filters * 16,
                      kernel_size=3,
                      batchnorm=batchnorm)

    # Expansive Path
    u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    u6 = Dropout(dropout)(u6)
    c6 = conv2d_block(u6, n_filters * 8, kernel_size=3, batchnorm=batchnorm)

    u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2),
                         padding='same')(c6)
    u7 = concatenate([u7, c3])
    u7 = Dropout(dropout)(u7)
    c7 = conv2d_block(u7, n_filters * 4, kernel_size=3, batchnorm=batchnorm)

    u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])
    u8 = Dropout(dropout)(u8)
    c8 = conv2d_block(u8, n_filters * 2, kernel_size=3, batchnorm=batchnorm)

    u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2),
                         padding='same')(c8)
    u9 = concatenate([u9, c1])
    u9 = Dropout(dropout)(u9)
    c9 = conv2d_block(u9, n_filters * 1, kernel_size=3, batchnorm=batchnorm)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    model = Model(inputs=[input_img], outputs=[outputs])
    return model
 def create_pooling_layers(types_of_poolings: List[str],
                           ksizes: List[Tuple]) -> List:
     pools = []
     for p, k in zip(types_of_poolings, ksizes):
         if p == PoolingLayerFactory.MAX:
             pool = MaxPooling2D(pool_size=k)
         elif p == PoolingLayerFactory.AVG:
             pool = AveragePooling2D(pool_size=k)
         elif p == PoolingLayerFactory.ENTR:
             pool = EntropyPooling2D(pool_size=k)
             # pool = EntropyPoolLayer()
         else:
             pool = MaxPooling2D(pool_size=k)
         pools.append(pool)
     return pools
Exemple #5
0
def inception_block_2b(X):
    # inception4e
    X_3x3 = fr_utils.conv2d_bn(X,
                               layer='inception_4e_3x3',
                               cv1_out=160,
                               cv1_filter=(1, 1),
                               cv2_out=256,
                               cv2_filter=(3, 3),
                               cv2_strides=(2, 2),
                               padding=(1, 1))
    X_5x5 = fr_utils.conv2d_bn(X,
                               layer='inception_4e_5x5',
                               cv1_out=64,
                               cv1_filter=(1, 1),
                               cv2_out=128,
                               cv2_filter=(5, 5),
                               cv2_strides=(2, 2),
                               padding=(2, 2))

    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)),
                           data_format='channels_first')(X_pool)

    inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)

    return inception
Exemple #6
0
def inception_block_3b(X):
    X_3x3 = fr_utils.conv2d_bn(X,
                               layer='inception_5b_3x3',
                               cv1_out=96,
                               cv1_filter=(1, 1),
                               cv2_out=384,
                               cv2_filter=(3, 3),
                               cv2_strides=(1, 1),
                               padding=(1, 1))
    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = fr_utils.conv2d_bn(X_pool,
                                layer='inception_5b_pool',
                                cv1_out=96,
                                cv1_filter=(1, 1))
    X_pool = ZeroPadding2D(padding=(1, 1),
                           data_format='channels_first')(X_pool)

    X_1x1 = fr_utils.conv2d_bn(X,
                               layer='inception_5b_1x1',
                               cv1_out=256,
                               cv1_filter=(1, 1))
    inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)

    return inception
Exemple #7
0
def ApplyCnn(X32_train, X32_test, Y32_train):

    model_CNN = Sequential()
    model_CNN.add(
        Conv2D(64,
               kernel_size=3,
               activation='relu',
               input_shape=(X3_train.shape[1], X3_train.shape[2], 1)))
    model_CNN.add(MaxPooling2D(pool_size=(2, 2)))
    model_CNN.add(Conv2D(32, kernel_size=3, activation='relu'))
    model_CNN.add(MaxPooling2D(pool_size=(2, 2)))
    model_CNN.add(Conv2D(16, kernel_size=3, activation='relu'))
    model_CNN.add(MaxPooling2D(pool_size=(2, 2)))
    model_CNN.add(Flatten())
    model_CNN.add(Dense(10, activation='softmax'))
    model_CNN.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    model_CNN.fit(X32_train, Y32_train, epochs=10, verbose=0)
    return predict(X32_test, model_CNN)
Exemple #8
0
# In[8]:

#Build and train our neural network
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255)(inputs)

c1 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(s)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(64, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(p1)
c2 = Dropout(0.1)(c2)
c2 = Conv2D(64, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(128, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
def __create_wide_residual_network(nb_classes,
                                   img_input,
                                   include_top=True,
                                   depth=28,
                                   width=8,
                                   dropout=0.0,
                                   activation='softmax',
                                   aux_depth=None,
                                   num_coarse_classes=None,
                                   aux_layouts=None,
                                   aux_weight_decay=3e-4,
                                   se_net=False):
    ''' Creates a Wide Residual Network with specified parameters
	Args:
		nb_classes: Number of output classes
		img_input: Input tensor or layer
		include_top: Flag to include the last dense layer
		depth: Depth of the network. Compute N = (n - 4) / 6.
			   For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
			   For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
			   For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
		width: Width of the network.
		dropout: Adds dropout if value is greater than 0.0
	Returns:a Keras Model
	'''
    curr_id = 0
    aux_list = []

    N = (depth - 4) // 6

    # AUX1.0
    aux_list_temp, curr_id = apply_aux(img_input,
                                       major_id=1,
                                       minor_id=0,
                                       curr_id=curr_id,
                                       aux_depth=aux_depth,
                                       num_coarse_classes=num_coarse_classes,
                                       aux_layouts=aux_layouts,
                                       aux_weight_decay=aux_weight_decay,
                                       initialization=aux_initia)
    aux_list = aux_list + aux_list_temp

    x = __conv1_block(img_input)
    nb_conv = 4

    for i in range(N):
        # AUX2.i
        aux_list_temp, curr_id = apply_aux(
            x,
            major_id=2,
            minor_id=i,
            curr_id=curr_id,
            aux_depth=aux_depth,
            num_coarse_classes=num_coarse_classes,
            aux_layouts=aux_layouts,
            aux_weight_decay=aux_weight_decay,
            initialization=aux_initia)
        aux_list = aux_list + aux_list_temp

        x = __conv2_block(x, width, dropout, se_net=se_net)
        nb_conv += 2

    # AUX2.-1
    aux_list_temp, curr_id = apply_aux(x,
                                       major_id=2,
                                       minor_id=-1,
                                       curr_id=curr_id,
                                       aux_depth=aux_depth,
                                       num_coarse_classes=num_coarse_classes,
                                       aux_layouts=aux_layouts,
                                       aux_weight_decay=aux_weight_decay,
                                       initialization=aux_initia)
    aux_list = aux_list + aux_list_temp

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        # AUX3.i
        aux_list_temp, curr_id = apply_aux(
            x,
            major_id=3,
            minor_id=i,
            curr_id=curr_id,
            aux_depth=aux_depth,
            num_coarse_classes=num_coarse_classes,
            aux_layouts=aux_layouts,
            aux_weight_decay=aux_weight_decay,
            initialization=aux_initia)
        aux_list = aux_list + aux_list_temp

        x = __conv3_block(x, width, dropout, se_net=se_net)
        nb_conv += 2

    # AUX3.-1
    aux_list_temp, curr_id = apply_aux(x,
                                       major_id=3,
                                       minor_id=-1,
                                       curr_id=curr_id,
                                       aux_depth=aux_depth,
                                       num_coarse_classes=num_coarse_classes,
                                       aux_layouts=aux_layouts,
                                       aux_weight_decay=aux_weight_decay,
                                       initialization=aux_initia)
    aux_list = aux_list + aux_list_temp

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        # AUX4.i
        aux_list_temp, curr_id = apply_aux(
            x,
            major_id=4,
            minor_id=i,
            curr_id=curr_id,
            aux_depth=aux_depth,
            num_coarse_classes=num_coarse_classes,
            aux_layouts=aux_layouts,
            aux_weight_decay=aux_weight_decay,
            initialization=aux_initia)
        aux_list = aux_list + aux_list_temp

        x = ___conv4_block(x, width, dropout, se_net=se_net)
        nb_conv += 2

    if include_top:
        x = GlobalAveragePooling2D()(x)

        x = Dense(nb_classes,
                  activation=activation,
                  name='output',
                  kernel_initializer=initia)(x)

        if len(aux_depth) > 0:
            model = Model(inputs=img_input, outputs=[x] + aux_list)
        else:
            model = Model(inputs=img_input, outputs=x)

    return model
Exemple #10
0
def create_model(noise=True,
                 first_kernel_size=(7, 7),
                 n_filters=64,
                 n_covul_layers=5,
                 activation='swish',
                 dense_neurons=1024,
                 dropout=0.5,
                 lr=0.0001):
    kernel = (3, 3)
    n_classes = len(classes)

    input_layer = Input(shape=(300, 300, 3))
    if noise:
        input_layer = GaussianNoise(0.1)(input_layer)

    model = BatchNormalization(axis=[1, 2])(input_layer)

    model = Conv2D(filters=n_filters,
                   kernel_size=first_kernel_size,
                   activation=activation)(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    for i in range(2, n_covul_layers):
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation)(model)
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation,
                       padding='same')(model)
        model = BatchNormalization(axis=[1, 2])(model)
        model = MaxPooling2D((2, 2))(model)

    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    model = Flatten()(model)
    model = Dense(dense_neurons, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    model = Dense(dense_neurons / 2, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    output = Dense(n_classes, activation="softmax")(model)

    model = Model(input_layer, output)
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=keras.optimizers.Adam(lr=lr),
                  metrics=["accuracy"])

    return model
def create_model(Input):
    x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(Input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    x = Lambda(LRN2D, name='lrn_1')(x)
    x = Conv2D(64, (1, 1), name='conv2')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(192, (3, 3), name='conv3')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
    x = Activation('relu')(x)
    x = Lambda(LRN2D, name='lrn_2')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    # Inception3a
    inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
    inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
    inception_3a_3x3 = Conv2D(128, (3, 3),
                              name='inception_3a_3x3_conv2')(inception_3a_3x3)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

    inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
    inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
    inception_3a_5x5 = Conv2D(32, (5, 5),
                              name='inception_3a_5x5_conv2')(inception_3a_5x5)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

    inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
    inception_3a_pool = Conv2D(
        32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
    inception_3a_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3a_pool_bn')(inception_3a_pool)
    inception_3a_pool = Activation('relu')(inception_3a_pool)
    inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3,
                                                        4)))(inception_3a_pool)

    inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
    inception_3a_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
    inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

    inception_3a = concatenate([
        inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1
    ],
                               axis=3)

    # Inception3b
    inception_3b_3x3 = Conv2D(96, (1, 1),
                              name='inception_3b_3x3_conv1')(inception_3a)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
    inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
    inception_3b_3x3 = Conv2D(128, (3, 3),
                              name='inception_3b_3x3_conv2')(inception_3b_3x3)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

    inception_3b_5x5 = Conv2D(32, (1, 1),
                              name='inception_3b_5x5_conv1')(inception_3a)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
    inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
    inception_3b_5x5 = Conv2D(64, (5, 5),
                              name='inception_3b_5x5_conv2')(inception_3b_5x5)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

    inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
    inception_3b_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: x * 9,
                               name='mult9_3b')(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_3b')(inception_3b_pool)
    inception_3b_pool = Conv2D(
        64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
    inception_3b_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3b_pool_bn')(inception_3b_pool)
    inception_3b_pool = Activation('relu')(inception_3b_pool)
    inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

    inception_3b_1x1 = Conv2D(64, (1, 1),
                              name='inception_3b_1x1_conv')(inception_3a)
    inception_3b_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
    inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

    inception_3b = concatenate([
        inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1
    ],
                               axis=3)

    # Inception3c
    inception_3c_3x3 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_3x3',
                                       cv1_out=128,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))

    inception_3c_5x5 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))

    inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
    inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_3c_pool)

    inception_3c = concatenate(
        [inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

    # inception 4a
    inception_4a_3x3 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=192,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_4a_5x5 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(1, 1),
                                       padding=(2, 2))

    inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
    inception_4a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: x * 9,
                               name='mult9_4a')(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_4a')(inception_4a_pool)
    inception_4a_pool = utils.conv2d_bn(inception_4a_pool,
                                        layer='inception_4a_pool',
                                        cv1_out=128,
                                        cv1_filter=(1, 1),
                                        padding=(2, 2))
    inception_4a_1x1 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_4a = concatenate([
        inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1
    ],
                               axis=3)

    # inception4e
    inception_4e_3x3 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_3x3',
                                       cv1_out=160,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))
    inception_4e_5x5 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_5x5',
                                       cv1_out=64,
                                       cv1_filter=(1, 1),
                                       cv2_out=128,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))
    inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
    inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_4e_pool)

    inception_4e = concatenate(
        [inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

    # inception5a
    inception_5a_3x3 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))

    inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
    inception_5a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: x * 9,
                               name='mult9_5a')(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_5a')(inception_5a_pool)
    inception_5a_pool = utils.conv2d_bn(inception_5a_pool,
                                        layer='inception_5a_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1),
                                        padding=(1, 1))
    inception_5a_1x1 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))

    inception_5a = concatenate(
        [inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

    # inception_5b
    inception_5b_3x3 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
    inception_5b_pool = utils.conv2d_bn(inception_5b_pool,
                                        layer='inception_5b_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1))
    inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

    inception_5b_1x1 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_5b = concatenate(
        [inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

    av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
    reshape_layer = Flatten()(av_pool)
    dense_layer = Dense(128, name='dense_layer')(reshape_layer)
    norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1),
                        name='norm_layer')(dense_layer)

    # Final Model
    model = Model(inputs=[Input], outputs=norm_layer)

    return model
Exemple #12
0
def run(model):
    # Download kitti dataset
    build_data.maybe_download_training_img(DATA_DIRECTORY)

    x, y = build_data.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)

    if model is None:
        inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3))

        # Block 1
        block1_conv1 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv1')(inputs)
        block1_conv2 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv2')(block1_conv1)
        block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block1_pool')(block1_conv2)

        # Block 2
        block2_conv1 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv1')(block1_pool)
        block2_conv2 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv2')(block2_conv1)
        block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block2_pool')(block2_conv2)

        # Block 3
        block3_conv1 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv1')(block2_pool)
        block3_conv2 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv2')(block3_conv1)
        block3_conv3 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv3')(block3_conv2)
        block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block3_pool')(block3_conv3)

        # Block 4
        block4_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv1')(block3_pool)
        block4_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv2')(block4_conv1)
        block4_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv3')(block4_conv2)
        block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block4_pool')(block4_conv3)

        # Block 5
        block5_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv1')(block4_pool)
        block5_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv2')(block5_conv1)
        block5_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv3')(block5_conv2)
        block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block5_pool')(block5_conv3)

        pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block5_pool)
        upsample_1 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(pool5_conv1x1)

        pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block4_pool)
        add_1 = Add()([upsample_1, pool4_conv1x1])

        upsample_2 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(add_1)
        pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block3_pool)
        add_2 = Add()([upsample_2, pool3_conv1x1])

        upsample_3 = Conv2DTranspose(2,
                                     kernel_size=(16, 16),
                                     strides=(8, 8),
                                     padding="same")(add_2)
        output = Dense(2, activation='softmax')(upsample_3)

        model = Model(inputs, output, name='multinet_seg')

        adam = Adam(lr=LEARNING_RATE)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

    model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS)
    model.save('trained_model/trained_model' + str(time.time()) + '.h5')
import numpy as np
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers  import Dense, Conv2D, Flatten
from keras.utils import to_categorical
if __name__ == "__main__":
    file_path=sys.argv[1]
    mndata = MNIST(file_path)
    X_train,Y_train = mndata.load_training()
    X_test,Y_test= mndata.load_testing()
    X_train=np.array(X_train)
    Y_train=np.array(Y_train)
    X_test=np.array(X_test)
    Y_train_en = to_categorical(Y_train, 10)
    X_train_ = X_train.reshape(60000,28,28,1)
    X_test_ = X_test.reshape(10000,28,28,1)
    model = Sequential()
    model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(28,28,1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.fit(X_train_,Y_train_en,epochs=100, batch_size=100,verbose=0)
    pre= model.predict(X_test_)
    for i in pre:
        print(np.argmax(i))
Exemple #14
0
def faceRecoModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet

    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
Exemple #15
0
def vgg_16_base(input_tensor=None):
    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               trainable=True)(input_tensor)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               trainable=True)(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               trainable=True)(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               trainable=True)(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               trainable=True)(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               trainable=True)(x)
    # x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', trainable=True)(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               trainable=True)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               trainable=True)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               trainable=True)(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               trainable=True)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               trainable=True)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               trainable=True)(x)
    # x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    return x
Exemple #16
0
def inception_block_1a(X):
    """
    Implementation of an inception block
    """

    X_3x3 = Conv2D(96, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_3x3_conv1')(X)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_3x3_bn1')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)
    X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
    X_3x3 = Conv2D(128, (3, 3),
                   data_format='channels_first',
                   name='inception_3a_3x3_conv2')(X_3x3)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_3x3_bn2')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)

    X_5x5 = Conv2D(16, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_5x5_conv1')(X)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_5x5_bn1')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)
    X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
    X_5x5 = Conv2D(32, (5, 5),
                   data_format='channels_first',
                   name='inception_3a_5x5_conv2')(X_5x5)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_5x5_bn2')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)

    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = Conv2D(32, (1, 1),
                    data_format='channels_first',
                    name='inception_3a_pool_conv')(X_pool)
    X_pool = BatchNormalization(axis=1,
                                epsilon=0.00001,
                                name='inception_3a_pool_bn')(X_pool)
    X_pool = Activation('relu')(X_pool)
    X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)),
                           data_format='channels_first')(X_pool)

    X_1x1 = Conv2D(64, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_1x1_conv')(X)
    X_1x1 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_1x1_bn')(X_1x1)
    X_1x1 = Activation('relu')(X_1x1)

    # CONCAT
    inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)

    return inception
Exemple #17
0
def create_model(input_dim):
    model = tf.keras.models.Sequential([
        Conv2D(16, (3, 3),
               activation="relu",
               padding="same",
               input_shape=input_dim),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(32, (3, 3), activation="relu", padding="same"),
        MaxPooling2D((2, 2)),
        Activation('relu'),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(64, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(128, (3, 3), activation="relu", padding="same"),
        MaxPooling2D((2, 2)),
        Activation('relu'),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(256, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(512, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(1024, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Flatten(),
        Dropout(0.2),
        Dense(2048,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(1024,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(512,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(256,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(128,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(10, activation="softmax")
    ])
    model.compile(
        optimizer="adam",
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )
    return model
Exemple #18
0
train_data = train_data.map(preprocess).batch(batch_size).prefetch(1)
test_data = test_data.map(preprocess).batch(batch_size).prefetch(1)

learning_rate = 0.001
dense_neurons = 256
n_filters = 256
kernel = (3, 3)
n_classes = info.features['label'].num_classes
activation = 'elu'

input_layer = Input(shape=(300, 300, 3))

model = Conv2D(filters=n_filters, kernel_size=(3, 3),
               activation=activation)(input_layer)
model = MaxPooling2D((2, 2))(model)

model = Conv2D(filters=n_filters, kernel_size=kernel,
               activation=activation)(model)
model = MaxPooling2D((2, 2))(model)

model = Conv2D(filters=n_filters * 2,
               kernel_size=kernel,
               activation=activation)(model)
model = MaxPooling2D((2, 2))(model)

model = Conv2D(filters=n_filters * 2,
               kernel_size=kernel,
               activation=activation)(model)
model = MaxPooling2D((2, 2))(model)
Exemple #19
0
def VGG16_Places365(include_top=True,
                    weights='places',
                    input_tensor=None,
                    input_shape=None,
                    pooling=None,
                    classes=365):
    """Instantiates the VGG16-places365 architecture.

    Optionally loads weights pre-trained
    on Places. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format="channels_last"` in your Keras config
    at ~/.keras/keras.json.

    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.

    # Arguments
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization),
                 'places' (pre-training on Places),
                 or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 244)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 48.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`, or invalid input shape
        """
    if not (weights in {'places', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `places` '
                         '(pre-training on Places), '
                         'or the path to the weights file to be loaded.')

    if weights == 'places' and include_top and classes != 365:
        raise ValueError('If using `weights` as places with `include_top`'
                         ' as true, `classes` should be 365')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Block 1
    x = Conv2D(filters=64,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block1_conv1')(img_input)

    x = Conv2D(filters=64,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block1_conv2')(x)

    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     name="block1_pool",
                     padding='valid')(x)

    # Block 2
    x = Conv2D(filters=128,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block2_conv1')(x)

    x = Conv2D(filters=128,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block2_conv2')(x)

    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     name="block2_pool",
                     padding='valid')(x)

    # Block 3
    x = Conv2D(filters=256,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block3_conv1')(x)

    x = Conv2D(filters=256,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block3_conv2')(x)

    x = Conv2D(filters=256,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block3_conv3')(x)

    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     name="block3_pool",
                     padding='valid')(x)

    # Block 4
    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block4_conv1')(x)

    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block4_conv2')(x)

    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block4_conv3')(x)

    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     name="block4_pool",
                     padding='valid')(x)

    # Block 5
    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block5_conv1')(x)

    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block5_conv2')(x)

    x = Conv2D(filters=512,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               kernel_regularizer=l2(0.0002),
               activation='relu',
               name='block5_conv3')(x)

    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     name="block5_pool",
                     padding='valid')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, activation='relu', name='fc1')(x)
        x = Dropout(0.5, name='drop_fc1')(x)

        x = Dense(4096, activation='relu', name='fc2')(x)
        x = Dropout(0.5, name='drop_fc2')(x)

        x = Dense(365, activation='softmax', name="predictions")(x)

    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='vgg16-places365')

    # load weights
    if weights == 'places':
        if include_top:
            weights_path = get_file(
                'vgg16-places365_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models')
        else:
            weights_path = get_file(
                'vgg16-places365_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models')

        model.load_weights(weights_path)

        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='block5_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')

    elif weights is not None:
        model.load_weights(weights)

    return model