Esempio n. 1
0
def dn_layer(inputs,
             name,
             num_filters=16,
             kernel_size=3,
             strides=1,
             activation='relu',
             conv_first=True):
    """2D Convolution-Batch Normalization-Activation stack builder
    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
    # Returns
        x (tensor): tensor as input to the next layer
    """
    x = Conv2D(num_filters,
               kernel_size=kernel_size,
               strides=strides,
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(1e-4),
               name=name + '_con')(inputs)
    #x = BatchNormalization(name = name + '_bn')(x)
    x = Activation(activation, name=name + '_act')(x)
    return x
Esempio n. 2
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=1e-4,
              lr=1e-3):
    model = Sequential()
    model.add(
        Embedding(NB_WORDS,
                  128,
                  embeddings_regularizer=l2(weight_decay),
                  input_length=MAXLEN))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay)))
    optimizer = Adam(lr)
    model.compile(loss='mse', metrics=["mse"], optimizer=optimizer)
    return model
Esempio n. 3
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=WEIGHT_DECAY):
    model = Sequential()
    model.add(
        Embedding(
            NB_WORDS,
            128,
            embeddings_regularizer=l2(weight_decay),
            input_length=MAXLEN))  # , batch_input_shape=(batch_size, maxlen)))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay),
              activation='sigmoid'))
    optimizer = Adam(1e-3)
    model.compile(loss='binary_crossentropy',
                  metrics=["binary_accuracy"],
                  optimizer=optimizer)
    return model
Esempio n. 4
0
N_train = len(train)
vector_dim = train.shape[2]

epochs = 100
batch_size = 256
f_dim = train.shape[2]

model = Sequential()

model.add(
    Convolution2D(32, (2, 2),
                  2,
                  padding='same',
                  input_shape=train.shape[1:],
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(
    Convolution2D(64, (2, 2),
                  2,
                  padding='same',
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(Activation("relu"))

model.add(
    Convolution2D(128, (2, 2),
Esempio n. 5
0
N_train = len(train)
vector_dim = train.shape[2]

epochs = 100
batch_size = 1024
f_dim = train.shape[2]
lstm1_dim = 20
lstm2_dim = 20

model = Sequential()
model.add(
    Bidirectional(LSTM(lstm1_dim, dropout=0.2, return_sequences=True),
                  input_shape=(seq_len, f_dim)))
model.add(Bidirectional(LSTM(lstm2_dim, dropout=0.2)))
model.add(BatchNormalization(axis=1))
model.add(Dense(30, kernel_regularizer=l2(0.02)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(2, kernel_regularizer=l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer='Nadam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
hist = model.fit(train,
                 train_label,
def create_AlexNet(num_fc_neurons,
                   dropout_rate,
                   num_classes=24,
                   img_height=224,
                   img_width=224,
                   include_loc='all',
                   activation='softmax'):
    weight_decay = 0.0005
    kernel_regularizer = regularizers.l2(weight_decay)
    bias_regularizer = regularizers.l2(weight_decay)

    # build a convolutional model
    base_model = Sequential()
    base_model.add(
        Conv2D(96, (11, 11),
               strides=(4, 4),
               padding='valid',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv1',
               input_shape=get_input_shape(img_height, img_width)))
    base_model.add(LRN2D(name='lrn1'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool1'))

    base_model.add(
        Conv2D(256, (5, 5),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv2'))
    base_model.add(LRN2D(name='lrn2'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool2'))

    base_model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv3'))
    base_model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv4'))
    base_model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv5'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool3'))

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    for i in range(6, 8):
        top_model.add(
            Dense(num_fc_neurons,
                  kernel_regularizer=kernel_regularizer,
                  bias_regularizer=bias_regularizer,
                  name='fc' + str(i)))
        #top_model.add(BatchNormalization(axis=1, name='fc'+str(i)+'_bn'))
        top_model.add(Activation('relu', name='fc' + str(i) + '_ac'))
        top_model.add(Dropout(dropout_rate))
    top_model.add(
        Dense(num_classes,
              activation=activation,
              kernel_regularizer=kernel_regularizer,
              bias_regularizer=bias_regularizer,
              name='predictions'))

    if include_loc == 'base':
        model = base_model
    elif include_loc == 'top':
        model = top_model
    elif include_loc == 'all':  # add the model on top of the convolutional base
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
    else:
        raise ValueError('Only "base", "top" and "all" can be included.')
    return model
Esempio n. 7
0
#train = np.r_[train, test]
#train_label = np.r_[train_label, test_label]

epochs = 100
batch_size = 256
f_dim = train.shape[2]
lstm1_dim = 20
lstm2_dim = 20

model = Sequential()
model.add(Convolution1D(8, 4, input_shape=(seq_len, f_dim)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(3))
model.add(Convolution1D(12, 4, kernel_regularizer=l2(0.05)))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(MaxPooling1D(3))
model.add(Convolution1D(18, 4, kernel_regularizer=l2(0.04)))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(MaxPooling1D(3))
model.add(Bidirectional(LSTM(lstm1_dim, dropout=0.2, return_sequences=True)))
model.add(Bidirectional(LSTM(lstm2_dim, dropout=0.2)))
model.add(BatchNormalization(axis=1))
model.add(Dense(30, kernel_regularizer=l2(0.02)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
Esempio n. 8
0
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
    input_shape = (img_rows, img_cols, channels)

x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

inputs = Input(shape=input_shape)
x = Conv2D(num_filters,
           kernel_size=7,
           padding='same',
           strides=2,
           kernel_initializer='he_normal',
           kernel_regularizer=l2(1e-4))(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)

if use_max_pool:
    x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
    num_blocks = 3

# convolutional base (stack of blocks).
for i in range(num_blocks):
    for j in range(num_sub_blocks):
        strides = 1
        is_first_layer_but_not_first_block = j == 0 and i > 0

        if is_first_layer_but_not_first_block:
            strides = 2