def VGG_celeba(nb_classes, img_dim, pretr_weights_file=None, model_name=None):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(Convolution2D(32, 3, 3, name="convolution2d_1", input_shape=(3, 224, 224), border_mode="same", activation='relu'))
    model.add(Convolution2D(32, 3, 3, name="convolution2d_2", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_1"))

    model.add(Convolution2D(64, 3, 3, name="convolution2d_3", border_mode="same", activation='relu'))
    model.add(Convolution2D(64, 3, 3, name="convolution2d_4", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_2"))

    model.add(Convolution2D(128, 3, 3, name="convolution2d_5", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_6", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_7", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_3"))

    model.add(Convolution2D(256, 3, 3, name="convolution2d_8", border_mode="same", activation='relu'))
    model.add(Convolution2D(256, 3, 3, name="convolution2d_9", border_mode="same", activation='relu'))
    model.add(Convolution2D(256, 3, 3, name="convolution2d_10", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_4"))

    model.add(Convolution2D(512, 3, 3, name="convolution2d_11", border_mode="same", activation='relu'))
    model.add(Convolution2D(512, 3, 3, name="convolution2d_12", border_mode="same", activation='relu'))
    model.add(Convolution2D(512, 3, 3, name="convolution2d_13", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_5"))

    model.add(Flatten(name="flatten_1"))
    model.add(Dense(4096, activation='relu', name="dense_1"))
    model.add(Dropout(0.5, name="dropout_1"))
    model.add(Dense(4096, activation='relu', name="dense_2"))
    model.add(Dropout(0.5, name="dropout_2"))
    model.add(Dense(2, activation='softmax', name="dense_3"))

    if model_name:
        model.name = model_name
    else:
        model.name = "VGG_celeba"

    if pretr_weights_file:
        model.load_weights(pretr_weights_file)
        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []
        model.add(Dense(nb_classes, activation='softmax', name="dense_4"))

    # Freeze layers until specified number
    # for k in range(freeze_until):
    #     model.layers[k].trainable = True

    return model
def CNN(nb_classes, img_dim, pretr_weights_file=None, model_name=None):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(Convolution2D(32, 3, 3, name="convolution2d_1", input_shape=(3, 224, 224), border_mode="same", activation='relu'))
    model.add(Convolution2D(32, 3, 3, name="convolution2d_2", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_1"))

    model.add(Convolution2D(64, 3, 3, name="convolution2d_3", border_mode="same", activation='relu'))
    model.add(Convolution2D(64, 3, 3, name="convolution2d_4", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_2"))

    model.add(Convolution2D(128, 3, 3, name="convolution2d_5", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_6", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_7", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_3"))

    # model.add(Convolution2D(256, 3, 3, name="convolution2d_8", border_mode="same", activation='relu'))
    # model.add(Convolution2D(256, 3, 3, name="convolution2d_9", border_mode="same", activation='relu'))
    # model.add(Convolution2D(256, 3, 3, name="convolution2d_10", border_mode="same", activation='relu'))
    # model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_4"))

    # model.add(Convolution2D(512, 3, 3, name="convolution2d_11", border_mode="same", activation='relu'))
    # model.add(Convolution2D(512, 3, 3, name="convolution2d_12", border_mode="same", activation='relu'))
    # model.add(Convolution2D(512, 3, 3, name="convolution2d_13", border_mode="same", activation='relu'))
    # model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_5"))

    model.add(Flatten(name="flatten_1"))
    model.add(Dense(1024, activation='relu', name="dense_1"))
    model.add(Dropout(0.5, name="dropout_1"))
    model.add(Dense(1024, activation='relu', name="dense_2"))
    model.add(Dropout(0.5, name="dropout_2"))
    model.add(Dense(nb_classes, activation='softmax', name="dense_3"))

    if model_name:
        model.name = model_name
    else:
        model.name = "CNN"

    if pretr_weights_file:
        model.load_weights(pretr_weights_file)
        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []
        model.add(Dense(nb_classes, activation='softmax', name="dense_4"))

    return model
Esempio n. 3
0
def DenseNorm(units, dropout, name=None):
    model = Sequential()
    model.add(Dense(units))
    model.add(BatchNormalization())
    model.add(ReLU())
    if dropout > 0:
        model.add(Dropout(dropout))
    if name is not None:
        model.name = name
    else:
        model.name = model.name.replace('sequential', 'dense_norm')
    return model
Esempio n. 4
0
def two_layers_classifier_model():
    model = Sequential()

    model.name = "Clasificador Complejo"
    model.add(Dense(1024, activation='relu', input_shape=(2048, )))
    model.add(Dense(200, activation='softmax'))
    return model
 def create_model(self):
     model = Sequential()
     model.add(
         Conv2D(16,
                kernel_size=(3, 3),
                activation='relu',
                input_shape=self.input_shape,
                padding='same',
                kernel_initializer="he_normal"))
     model.add(
         Conv2D(32,
                kernel_size=(3, 3),
                activation='relu',
                kernel_initializer="he_normal"))
     model.add(Flatten())
     model.add(Dense(256, activation='relu',
                     kernel_initializer="he_normal"))
     model.add(Dense(256, activation='relu',
                     kernel_initializer="he_normal"))
     model.add(
         Dense(self.output_nodes,
               activation='relu',
               kernel_initializer="he_normal"))
     model.name = self.model_name
     return model
Esempio n. 6
0
def MNIST_carlini(use_softmax=True, rel_path='./'):
    # Define neural architecture
    model = Sequential()
    model.add(Lambda(lambda x: x - 0.5, input_shape=(28, 28, 1)))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(32 * 2, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32 * 2, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(10))
    if use_softmax:
        model.add(Activation('softmax'))

    # Load pre-trained weights
    model.load_weights(
        os.path.join('%smodels/weights' % rel_path,
                     "MNIST_carlini.keras_weights.h5"))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['acc'])
    model.name = 'MNIST_carlini'
    return model
Esempio n. 7
0
    def create_model(name):
        """
        Creates a CNN model for NCPA calibration
        :return:
        """
        input_shape = (
            pix,
            pix,
            2,
        )  # Multiple Wavelength Channels
        model = Sequential()
        model.name = name
        model.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=input_shape))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(Conv2D(8, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(N_act))
        model.summary()
        model.compile(optimizer='adam', loss='mean_squared_error')

        return model
Esempio n. 8
0
def build_lstm_model(compile=True):
    from data import MAX_SEQUENCE_LEN, ENCODER
    max_features = len(ENCODER) + 1
    max_len = MAX_SEQUENCE_LEN
    embedding_size = 128

    model = Sequential([
        Embedding(max_features, embedding_size, input_length=max_len, mask_zero=True),
        LSTM(64, activation='sigmoid', recurrent_activation='hard_sigmoid',
             kernel_initializer=initializers.lecun_uniform(seed=None)),
        Dropout(0.5),
        Dense(32, activation='sigmoid'),
        Dropout(0.2),
        Dense(1, activation='sigmoid'),
    ])

    if compile:
        model.compile(loss='binary_crossentropy',
                      # optimizer=RMSprop(),
                      optimizer=Adam(lr=0.0005),
                      metrics=['accuracy'])
        model.summary()

    model.name = "lstm"
    return model
Esempio n. 9
0
def mlp_1(x_train, y_train, x_val, y_val, x_test, testid):
    # data standardization

    #y_transform
    y_train_transformed = keras.utils.to_categorical(y_train, 3)

    model = Sequential()
    model.name = 'model'
    model.add(
        Dense(200,
              activation='relu',
              kernel_initializer='random_uniform',
              input_shape=(x_train.shape[1], )))
    model.add(
        Dense(3, activation='softmax', kernel_initializer='random_uniform'))

    #optim = keras.optimizers.sgd(lr=0.01,decay = 1e-4,momentum=0.9)
    optim = keras.optimizers.Adadelta()
    #optim = keras.optimizers.RMSprop()
    model.compile(optimizer=optim,
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'])
    #from sklearn.model_selection import train_test_split
    #X_train_, X_test, y_train_, y_test_ = train_test_split(x_stand_train, y_train,test_size=0.33)
    model.fit(x_train,
              y_train_transformed,
              batch_size=100,
              epochs=45,
              verbose=1)
    y_pred_val = sp.argmax(model.predict(x_val), axis=1)
    BMAC = balanced_accuracy_score(y_val, y_pred_val)
    print("BMAC Score is ", BMAC)
    y_pred = sp.argmax(model.predict(x_test), axis=1)

    return y_pred, testid
Esempio n. 10
0
def nvidia_driving_team(input_shape, name="nvidia_v1", load_weight=None):
    model = Sequential()
    model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=input_shape))
    model.add(Lambda(lambda x: (x / 255) - 0.5))  # normalization layer
    model.add(Conv2D(24, kernel_size=(5, 5), activation="relu",
                     strides=(2, 2)))
    model.add(Conv2D(48, kernel_size=(5, 5), activation="relu",
                     strides=(2, 2)))
    model.add(Conv2D(72, kernel_size=(5, 5), activation="relu",
                     strides=(2, 2)))
    model.add(Conv2D(96, kernel_size=(3, 3), activation="relu"))
    model.add(Conv2D(120, kernel_size=(3, 3), activation="relu"))
    model.add(Flatten())
    model.add(Dense(200))
    model.add(Dropout(0.5))
    model.add(Dense(100))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Dense(1))
    model.name = name

    if load_weight is not None and os.path.isfile(load_weight):
        print('Loading weights', load_weight)
        model.load_weights(load_weight)
    else:
        print('Loading weights failed', load_weight)

    return model
Esempio n. 11
0
    def create_model_monochrom(name):
        """
        Creates a Monochromatic calibration model
        :param name:
        :return:
        """
        input_shape = (
            pix,
            pix,
            2,
        )  # 2 Channels [nominal, defocus]
        model = Sequential()
        model.name = name
        model.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=input_shape))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(Conv2D(8, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(N_act))
        model.summary()
        model.compile(optimizer='adam', loss='mean_squared_error')

        return model
Esempio n. 12
0
def basic_denseTemp2(input_shape=1849,
                     output_shape=1,
                     init="glorot_uniform",
                     nb_filter=1024,
                     dropout_r=.2,
                     rrd=.6,
                     mname=None):  #shape and rate reduction
    '''Temp Basic dense model
    
    '''
    model = Sequential()
    model.add(
        Dense(nb_filter,
              input_dim=input_shape,
              kernel_initializer=init,
              activation="relu"))
    model.add(BatchNormalization())
    model.add(Dropout(rate=dropout_r))
    model.add(
        Dense(np.int64(nb_filter * rrd * rrd),
              kernel_initializer=init,
              activation="linear"))
    model.add(
        Dense(np.int64(nb_filter * rrd * rrd),
              kernel_initializer=init,
              activation="relu"))
    model.add(BatchNormalization())
    model.add(Dropout(rate=dropout_r * rrd * rrd * rrd))
    model.add(Dense(output_shape, kernel_initializer=init,
                    activation="linear"))
    if mname != None:
        model.name = mname
    return model
Esempio n. 13
0
def blstm(hidden_units):
    m = Sequential()
    m.add(Bidirectional(LSTM(hidden_units, return_sequences=True), input_shape=(None, 1)))
    m.add(Dense(NUM_CLASSES, activation='softmax'))
    m.summary()
    m.name = 'blstm_{}'.format(hidden_units)
    return m
Esempio n. 14
0
    def create_model_multiwave(waves, name):
        """
        Creates a CNN model for NCPA calibration
        :param waves: Number of wavelengths in the training set (to adjust the number of channels)
        :return:
        """
        input_shape = (
            pix,
            pix,
            2 * waves,
        )  # Multiple Wavelength Channels
        model = Sequential()
        model.name = name
        model.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=input_shape))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(Conv2D(8, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(N_act))
        model.summary()
        model.compile(optimizer='adam', loss='mean_squared_error')

        return model
Esempio n. 15
0
    def conv_model_1(self):
        model = Sequential()

        model.add(Conv2D(16, 3, input_shape=(self.image_size, self.image_size, 3)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D())

        model.add(Conv2D(32, 3))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D())

        model.add(Conv2D(64, 3))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D())

        model.add(Flatten())
        model.add(Dense(32))
        model.add(Dropout(0.5))
        model.add(Activation('relu'))

        model.add(Dense(self.num_classes))
        model.add(Activation('softmax'))
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

        model.name = 'Convolution_model_1'
        return model
Esempio n. 16
0
def build_model_eff():
    model = Sequential()
    model.name = "sequential_eff"
    
    model.add(Convolution2D(32, kernel_size=(5,5), padding='valid', input_shape=(64,64,3), activation='relu'))
    model.add(BatchNormalization())
    model.add(Convolution2D(32, kernel_size=(5,5), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(rate=0.2))
    
    model.add(Convolution2D(64, kernel_size=(3,3), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Convolution2D(64, kernel_size=(3,3), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(rate=0.2))
    
    model.add(Flatten())
    
    model.add(Dense(128, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dense(1, activation='relu', kernel_regularizer=l2(0.01)))
    
    return model
Esempio n. 17
0
def model_P2(input_shape, name="old_friend_v1", load_weight=None):
    model = Sequential()
    model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=input_shape))
    model.add(Lambda(lambda x: (x / 255) - 0.5))  # normalization layer
    model.add(Conv2D(10, kernel_size=(3, 3), activation="relu"))
    model.add(MaxPool2D())
    model.add(Conv2D(20, kernel_size=(3, 3), activation="relu"))
    model.add(MaxPool2D())
    model.add(
        Conv2D(40, kernel_size=(3, 3), activation="relu", padding="VALID"))
    model.add(
        Conv2D(60, kernel_size=(3, 3), activation="relu", padding="VALID"))
    model.add(MaxPool2D())
    model.add(
        Conv2D(80, kernel_size=(3, 3), activation="relu", padding="VALID"))
    model.add(Flatten())
    model.add(Dense(600))
    model.add(Dropout(0.6))
    model.add(Dense(400))
    model.add(Dropout(0.6))
    model.add(Dense(200))
    model.add(Dropout(0.6))
    model.add(Dense(100))
    model.add(Dropout(0.6))
    model.add(Dense(1))
    model.name = name

    return model
Esempio n. 18
0
def model_cnn(input_shape=(1800,3,1), output_dim=5, 
              c_size=[32, 16, 8], k_size=[3, 3, 3], h_dim=[256, 32], d_p=[0.25, 0.25]):
    """Define CNN model
    returns model_cnn : CNN Keras model
    """

    m = Sequential()
    m.add(Layer(input_shape=input_shape, name='input'))
    
    for idx_n, n in enumerate(c_size):
        
        m.add(Conv2D(n, (k_size[idx_n], 1), padding='same', activation='relu', name='c_'+str(idx_n+1)))
        m.add(MaxPooling2D((2, 1), padding='same', name='p_'+str(idx_n+1)))
    
    m.add(Flatten(name='flatten'))
              
    for idx_n, n in enumerate(h_dim):
        m.add(Dense(n, activation='relu', name='h_'+str(idx_n+len(c_size)+1)))
        m.add(Dropout(d_p[idx_n], name='d_'+str(idx_n+len(c_size)+1)))
    
    m.add(Dense(output_dim, activation='relu', name='output'))
    sgd_lr, sgd_momentum , sgd_decay = (0.1, 0.8, 0.003)
    sgd = keras.optimizers.SGD(lr=sgd_lr,
                       momentum=sgd_momentum, 
                       decay=sgd_decay,
                       nesterov=False)
    m.compile(loss='binary_crossentropy',
              metrics=['categorical_accuracy'],
              optimizer=sgd)
              
    m.name = 'cnn'
    
    return m
Esempio n. 19
0
def cnn_cifar_batchnormalisation(image_shape):

    model = Sequential()
    model.add(Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=image_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(2))
    model.name = 'cnn_cifar_batchnormalisation'

    return model
Esempio n. 20
0
def cnn_cifar_small(image_shape=(5, 240, 160)):
    '''
    because of the overfitting problem, we reduce the number of filters to half
    This is the final used version for CVPRW
    :param image_shape:
    :return:
    '''
    model = Sequential()
    model.add(Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=image_shape))

    model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Dropout(0.25))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2))

    model.name = 'cifar_small'
    return model
Esempio n. 21
0
        def train_discriminator(y_pred_in, y_pred_out, validation_data):
            if self.featuremap_attacker is None:
                model = Sequential()
                model.name = "featuremap_mia"

                model.add(Dense(input_shape=(y_pred_in.shape[1:]), units=500))
                model.add(Dropout(0.2))
                model.add(Dense(units=250))
                model.add(Dropout(0.2))
                model.add(Dense(units=10))
                model.add(Dense(units=1, activation="sigmoid"))

                model.compile(optimizer="Adam",
                              metrics=["accuracy"],
                              loss="binary_crossentropy")
                self.featuremap_attacker = model
            self.featuremap_attacker.fit(np.concatenate(
                (y_pred_in, y_pred_out), axis=0),
                                         np.concatenate(
                                             (np.zeros(len(y_pred_in)),
                                              np.ones(len(y_pred_out)))),
                                         validation_data=validation_data,
                                         epochs=self.featuremap_mia_epochs,
                                         verbose=1)
            return self.featuremap_attacker
Esempio n. 22
0
    def build_generator(self):

        model = Sequential()
        model.name = "generator"

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Esempio n. 23
0
def cnn_cifar_small_batchnormalisation(image_shape=(5, 240, 160)):
    '''
    because of the overfitting problem, we reduce the number of filters to half
    :param image_shape:
    :return:
    '''
    model = Sequential()
    model.add(Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=image_shape))
    model.add(BatchNormalization(axis=1))

    model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(axis=1))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(BatchNormalization(axis=1))
    model.add(Dense(2))

    model.name = 'cnn_cifar_small_batchnormalisation'
    return model
Esempio n. 24
0
def build_model(num_conv_layers=1, name=None):
    model = Sequential()
    if name is not None:
        model.name = name
    else:
        model.name = "sequential_" + str(num_conv_layers)
    
    for i in range(num_conv_layers):
        model.add(Convolution2D(32, kernel_size=(3,3), padding='same', input_shape=(64,64,3), activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Flatten())
    model.add(Dense(128, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dropout(rate=0.2))
    model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(0.01)))
    return model
def create_conv_bidirect_lstm_model(vocab_size, embedding_dimension,
                                    num_timesteps, num_conv_filters,
                                    num_classes):

    model = Sequential()
    model.add(
        Embedding(vocab_size, embedding_dimension, input_length=num_timesteps))
    model.add(
        Conv1D(num_conv_filters,
               1,
               strides=1,
               activation='relu',
               padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(
        Bidirectional(
            LSTM(50, return_sequences=True, kernel_regularizer=l2(0.01))))
    model.add(Dropout(0.2))
    model.add(TimeDistributed(Dense(num_classes, activation='softmax')))
    opt = Adam(lr=0.01,
               beta_1=0.9,
               beta_2=0.999,
               epsilon=None,
               decay=0.0001,
               amsgrad=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['categorical_accuracy'],
                  sample_weight_mode="temporal")

    model.name = 'embed_conv_mp_bilstm_dense_adam'

    model.summary()

    return model
Esempio n. 26
0
def VGGCAM(nb_classes, num_input_channels=1024):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))

    # Add another conv layer with ReLU + GAP
    model.add(
        Convolution2D(num_input_channels,
                      3,
                      3,
                      activation='relu',
                      border_mode="same"))
    model.add(AveragePooling2D((14, 14)))
    model.add(Flatten())
    # Add the W layer
    model.add(Dense(nb_classes, activation='softmax'))

    model.name = "VGGCAM"

    return model
Esempio n. 27
0
File: Cifar.py Progetto: Ludvins/VC
def complex_base_net_model_norm_before_relu():
    model = Sequential()
    model.name = "Modelo BaseNet con BatchNormalization antes de Activacion"
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(25, activation='softmax'))

    return model
Esempio n. 28
0
def create_alex_net(input_shape=(227,227,3), num_classes=10):
    model = Sequential()
    # 1
    # conv1
    # output dim = (227-11)/4 + 1 = 55 --> 55*55*96
    model.add(Conv2D(96, (11,11), strides=4, activation='relu', input_shape=input_shape))
    print("conv_1(input): ", model.output_shape)
    # normalize layers
    model.add(BatchNormalization())
    # max_pool_1 27*27*96
    model.add(MaxPool2D(pool_size=(3,3), strides=2))
    print("max_pool_1: ", model.output_shape)
    # 2
    # conv2 27*27*256
    model.add(Conv2D(256, (5,5), padding='same', activation='relu'))
    print("conv_2: ", model.output_shape)
    # normalize layers
    model.add(BatchNormalization())
    # max_pool_2 13*13*256
    model.add(MaxPool2D(pool_size=(3,3), strides=2))
    print("max_pool_2: ", model.output_shape)
    # 3
    # conv3 13*13*384
    model.add(Conv2D(384, (3,3), padding='same', activation='relu'))
    print("conv_3: ", model.output_shape)
    # 4
    # conv4 13*13*384
    model.add(Conv2D(384, (3,3), padding='same', activation='relu'))
    print("conv_4: ", model.output_shape)
    # 5
    # conv5 13*13*256
    model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
    print("conv_5: ", model.output_shape)
    # max_pool_3 6*6*256
    model.add(MaxPool2D(pool_size=(3,3), strides=2))
    print("max_pool_3", model.output_shape)

    # 6
    # full_1
    model.add(Conv2D(4096, (6,6), activation='relu'))
    print("full_1: ", model.output_shape)
    model.add(Dropout(0.5))
    model.add(Flatten())
    print("flatten: ", model.output_shape)
    # 7
    # full_2
    model.add(Dense(4096, activation='relu'))
    print("full_2: ", model.output_shape)
    model.add(Dropout(0.5))
    # 8
    # full_3
    #model.add(Dense(1000, activation='softmax'))
    # change for apply mnist
    model.add(Dense(1000, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    print("full_3(output): ", model.output_shape)
    model.name = 'alex-net'
    return model
Esempio n. 29
0
def emb_bgru(hidden_units, words_vocabulary_size):
    m = Sequential()
    m.add(Embedding(words_vocabulary_size, hidden_units))
    m.add(Bidirectional(GRU(hidden_units, return_sequences=True)))
    m.add(Dense(NUM_CLASSES, activation='softmax'))
    m.summary()
    m.name = 'emb_bgru_{}'.format(hidden_units)
    return m
Esempio n. 30
0
def make_model(model_name, input_shape, batch_size, num_epochs, x_train,
               y_train):
    """
    DNN 기법으로 데이터를 학습함.

    Parameters
    ----------
    model_name : str
        모델의 이름(저장되는 파일 이름)
    input_shape : shape
        input data 의 shape
    batch_size : int
        batch 크기
    num_epochs : int
        epoch 횟수
    x_train : np.array
        train 데이터의 입력
    y_train : np.array
        train 데이터의 출력

    Returns
    -------
    model, history
        생성된 모델과 학습 과정을 기록한 history
    """

    model = Sequential()
    model.name = model_name
    model.add(Dense(100, input_shape=input_shape))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    if Const.DEBUG_MODE == 1:
        model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer="adam",
                  metrics=["accuracy"])

    file_path = os.path.join(Const.MODEL_DIR, model_name + '.hdf5')
    checkpoint = ModelCheckpoint(file_path,
                                 monitor='loss',
                                 verbose=0,
                                 save_weights_only=True,
                                 save_best_only=True,
                                 mode='auto',
                                 period=1)
    tensor_board = TensorBoard(log_dir='logs/',
                               histogram_freq=0,
                               batch_size=batch_size)
    history = model.fit(x=x_train,
                        y=y_train,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        verbose=Const.DEBUG_MODE,
                        callbacks=[checkpoint, tensor_board],
                        validation_split=0.2)
    model.save(file_path)

    return model, history
Esempio n. 31
0
def CNN(nb_classes, img_dim, pretr_weights_file=None, model_name=None):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(Convolution2D(32, 3, 3, name="convolution2d_1", input_shape=(3, 224, 224), border_mode="same", activation='relu'))
    model.add(Convolution2D(32, 3, 3, name="convolution2d_2", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_1"))

    model.add(Convolution2D(64, 3, 3, name="convolution2d_3", border_mode="same", activation='relu'))
    model.add(Convolution2D(64, 3, 3, name="convolution2d_4", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_2"))

    model.add(Convolution2D(128, 3, 3, name="convolution2d_5", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_6", border_mode="same", activation='relu'))
    model.add(Convolution2D(128, 3, 3, name="convolution2d_7", border_mode="same", activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2), name="maxpooling2d_3"))

    model.add(Flatten(name="flatten_1"))
    model.add(Dense(1024, activation='relu', name="dense_1"))
    model.add(Dropout(0.5, name="dropout_1"))
    model.add(Dense(1024, activation='relu', name="dense_2"))
    model.add(Dropout(0.5, name="dropout_2"))
    model.add(Dense(nb_classes, activation='softmax', name="dense_3"))

    if model_name:
        model.name = model_name
    else:
        model.name = "CNN"

    if pretr_weights_file:
        model.load_weights(pretr_weights_file)
        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []
        model.add(Dense(nb_classes, activation='softmax', name="dense_4"))

    return model
def VGGCAM(nb_classes, num_input_channels=1024):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))

    # Add another conv layer with ReLU + GAP
    model.add(Convolution2D(num_input_channels, 3, 3, activation='relu', border_mode="same"))
    model.add(AveragePooling2D((14, 14)))
    model.add(Flatten())
    # Add the W layer
    model.add(Dense(nb_classes, activation='softmax'))

    model.name = "VGGCAM"

    return model
Esempio n. 33
0
def vgg_std16_model():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))

    # Add another conv layer with ReLU + GAP
    model.add(Convolution2D(1024, 3, 3, activation='relu', border_mode="same"))
    model.add(AveragePooling2D((14, 14)))
    model.add(Flatten())

    model.add(Dense(10, activation='softmax'))
    model.name = "VGGCAM"
    return model
Esempio n. 34
0
def VGG(nb_classes, img_dim, pretr_weights_file=None, model_name=None):
    """
    Build Convolution Neural Network

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=img_dim, name="zeropadding2d_1"))
    model.add(Convolution2D(64, 3, 3, activation='relu', name="convolution2d_1"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_2"))
    model.add(Convolution2D(64, 3, 3, activation='relu', name="convolution2d_2"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_1"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_3"))
    model.add(Convolution2D(128, 3, 3, activation='relu', name="convolution2d_3"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_4"))
    model.add(Convolution2D(128, 3, 3, activation='relu', name="convolution2d_4"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_2"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_5"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_5"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_6"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_6"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_7"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_7"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_3"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_8"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_8"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_9"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_9"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_10"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_10"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_4"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_11"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_11"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_12"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_12"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_13"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_13"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_5"))

    model.add(Flatten(name="flatten_1"))
    model.add(Dense(4096, activation='relu', name="dense_1"))
    model.add(Dropout(0.5, name="dropout_1"))
    model.add(Dense(4096, activation='relu', name="dense_2"))
    model.add(Dropout(0.5, name="dropout_2"))
    model.add(Dense(1000, activation='softmax', name="dense_3"))

    if model_name:
        model.name = model_name
    else:
        model.name = "VGG"

    if pretr_weights_file:
        model.load_weights(pretr_weights_file)
        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []
        model.add(Dense(nb_classes, activation='softmax', name="dense_4"))

    # Freeze layers until specified number
    # for k in range(freeze_until):
    #     model.layers[k].trainable = True

    return model
Esempio n. 35
0
def VGGCAM(nb_classes, img_dim, pretr_weights_file=None, model_name=None):
    """
    Build VGGCAM network

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=img_dim, name="zeropadding2d_1"))
    model.add(Convolution2D(64, 3, 3, activation='relu', name="convolution2d_1"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_2"))
    model.add(Convolution2D(64, 3, 3, activation='relu', name="convolution2d_2"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_1"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_3"))
    model.add(Convolution2D(128, 3, 3, activation='relu', name="convolution2d_3"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_4"))
    model.add(Convolution2D(128, 3, 3, activation='relu', name="convolution2d_4"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_2"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_5"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_5"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_6"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_6"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_7"))
    model.add(Convolution2D(256, 3, 3, activation='relu', name="convolution2d_7"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_3"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_8"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_8"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_9"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_9"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_10"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_10"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name="maxpooling2d_4"))

    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_11"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_11"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_12"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_12"))
    model.add(ZeroPadding2D((1, 1), name="zeropadding2d_13"))
    model.add(Convolution2D(512, 3, 3, activation='relu', name="convolution2d_13"))

    # Add another conv layer with ReLU + GAP
    model.add(Convolution2D(1024, 3, 3, activation='relu', border_mode="same", name="convolution2d_14"))
    model.add(AveragePooling2D((14, 14), name="average_pooling2d_1"))
    model.add(Flatten(name="flatten_1"))
    # Add the W layer
    model.add(Dense(10, activation='softmax', name="dense_1"))

    if model_name:
        model.name = model_name
    else:
        model.name = "VGGCAM"

    if pretr_weights_file:

        with h5py.File(pretr_weights_file) as hw:
            for k in range(hw.attrs['nb_layers']):
                g = hw['layer_{}'.format(k)]
                weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
                model.layers[k].set_weights(weights)
                if model.layers[k].name == "convolution2d_13":
                    break
    return model