Exemple #1
0
def make_discriminator_model():
    model = tf.keras.Sequential()

    model.add(layers.Input(shape=(1, 188)))
    # model.add(layers.Input(shape=(1, 187)))
    model.add(layers.Permute((2, 1)))

    model.add(layers.Conv1D(filters=32, kernel_size=16, strides=1, padding='same'))
    model.add(layers.LeakyReLU())

    model.add(layers.Dropout(0.1))  # COMMENT OUT MAYBE

    model.add(layers.Conv1D(filters=64, kernel_size=16, strides=1, padding='same'))
    model.add(layers.LeakyReLU())

    model.add(layers.MaxPool1D(pool_size=2))

    model.add(layers.Conv1D(filters=128, kernel_size=16, strides=1, padding='same'))
    model.add(layers.LeakyReLU())

    model.add(layers.Dropout(0.1))  # COMMENT OUT MAYBE

    model.add(layers.Conv1D(filters=256, kernel_size=16, strides=1, padding='same'))
    model.add(layers.LeakyReLU())

    model.add(layers.MaxPool1D(pool_size=2))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model
Exemple #2
0
def text_cnn(num_classes=10,
             max_features=20000,
             maxlen=50,
             embed_size=128,
             hid_dim_conv=64,
             pool_size=32,
             hid_dim_dense=50,
             dropout_rate=0.1,
             act='softmax'):
    input_tensor = L.Input(shape=(maxlen, ), dtype='int32')
    emb1 = L.Embedding(max_features, embed_size)(input_tensor)
    conv1 = L.Conv1D(filters=hid_dim_conv, kernel_size=1, padding='same')(emb1)
    pool1 = L.MaxPool1D(pool_size=pool_size)(conv1)
    conv2 = L.Conv1D(filters=hid_dim_conv, kernel_size=2, padding='same')(emb1)
    pool2 = L.MaxPool1D(pool_size=pool_size)(conv2)
    conv3 = L.Conv1D(filters=hid_dim_conv, kernel_size=3, padding='same')(emb1)
    pool3 = L.MaxPool1D(pool_size=pool_size)(conv3)
    conv4 = L.Conv1D(filters=hid_dim_conv, kernel_size=4, padding='same')(emb1)
    pool4 = L.MaxPool1D(pool_size=pool_size)(conv4)
    concat = L.concatenate([conv1, conv2, conv3, conv4], axis=-1)
    flat = L.Flatten()(concat)
    dense1 = L.Dense(hid_dim_dense, activation="relu")(flat)
    drop1 = L.Dropout(dropout_rate)(dense1)
    output = L.Dense(num_classes, activation=act)(drop1)
    return Model(inputs=input_tensor, outputs=output)
Exemple #3
0
    def cnn1d(shape, seed):
        np.random.seed(seed)
        if tf.__version__ == '1.14.0':
            tf.set_random_seed(seed)
        else:
            tf.random.set_seed(seed)
        inputs = layers.Input(shape)
        x = layers.Conv1D(64, 3, activation='relu')(inputs)
        x = layers.Conv1D(64, 3, activation='relu')(x)
        x = layers.MaxPool1D()(x)

        x = layers.Conv1D(64, 3, activation='relu')(x)
        x = layers.Conv1D(64, 3, activation='relu')(x)
        x = layers.MaxPool1D()(x)

        x = layers.Flatten()(x)
        x = layers.Dense(64, activation='relu')(x)
        x = layers.Dropout(0.2)(x)
        x = layers.Dense(64, activation='relu')(x)
        x = layers.Dropout(0.2)(x)
        x = layers.Dense(64, activation='relu')(x)
        x = layers.Dropout(0.2)(x)

        outputs = layers.Dense(10, activation='softmax')(x)
        model = keras.Model(inputs, outputs, name="fcnn")
        return model
Exemple #4
0
def build_model(input_layer_shape, l1, l2, lr, embed_in_dim, embed_out_dim):
    """
    We need three separate inputs, one for categorical data that gets an
    embedding, one for time since data start, and one for the numerical data.
    """
    # Weekly timeseries input
    input_num = layers.Input(shape=(input_layer_shape, 1))
    conv1 = layers.Conv1D(filters=5, kernel_size=7, padding='same', activation='relu')(input_num)
    pool1 = layers.MaxPool1D()(conv1)
    conv2 = layers.Conv1D(filters=1, kernel_size=4, padding='same', activation='relu')(pool1)
    pool2 = layers.MaxPool1D(12)(conv2)
    dropo = layers.Dropout(0.5)(pool2)
    feature_layer = layers.Flatten()(dropo)

    # Time since start input (for dealing with energy efficiency changes)
    input_time = layers.Input(shape=(1,))

    # Region embbeding input
    input_cat = layers.Input(shape=(1,))
    embed_layer = layers.Embedding(embed_in_dim, embed_out_dim)(input_cat)
    embed_layer = layers.Flatten()(embed_layer)

    merged_layer = layers.Concatenate()([feature_layer, input_time, embed_layer])
    output = layers.Dense(l1, activation='relu')(merged_layer)
    output = layers.Dense(l2, activation='relu')(output)
    output = layers.Dense(1, bias_initializer=tf.keras.initializers.constant(4.0))(output)

    model = keras.models.Model(inputs=[input_num, input_time, input_cat], outputs=[output])

    optimizer = tf.keras.optimizers.RMSprop(lr=lr)

    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['mean_absolute_error', 'mean_squared_error'])
    return model
Exemple #5
0
def baseline_model():
    num_out = 6
    model = keras.Sequential()

    model.add(layers.Conv1D(128, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(64, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(64, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(32, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Conv1D(32, 1, input_shape=(26, 1), activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.2))
    model.add(layers.Dense(60, activation='relu'))
    model.add(layers.Dense(30, activation='relu'))
    model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dense(num_out, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemple #6
0
def cnn_base():
    model = Sequential(layers=[
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation='relu',
                             padding='valid',
                             input_shape=(3000, 1)),
        layers.Convolution1D(
            16, kernel_size=5, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.SpatialDropout1D(rate=0.01),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.SpatialDropout1D(rate=0.01),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.Convolution1D(
            256, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            256, kernel_size=3, activation='relu', padding='valid'),
        layers.GlobalMaxPool1D(),
        layers.Dropout(rate=0.01),
        layers.Dense(64, activation='relu'),
    ])
    model.compile(optimizer=optimizers.Adam(0.001),
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])  #,class_model='categorical'
    return model
Exemple #7
0
def create_cnn_model1():
    model = Sequential()
    # model.add(layers.Flatten(input_shape=(3000, 1)))
    model.add(layers.Input(shape=(3000, 1)))
    model.add(
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    # model.add(layers.Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(layers.SpatialDropout1D(rate=0.01))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(layers.SpatialDropout1D(rate=0.01))
    model.add(
        layers.Convolution1D(256,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(256,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.GlobalMaxPool1D())
    # model.add(layers.Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(5, activation=activations.softmax))
    model.compile(optimizer=optimizers.Adam(0.001),
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    # model.summary()
    return model
Exemple #8
0
def create_cnn_model(num_features,
                     num_classes,
                     loss_func,
                     optimizer="adam",
                     conv_layers=[64, 128],
                     kernel_sizes=[7, 5],
                     dense_layers=[64, 32],
                     dropout=0.2):

    inp = Input(shape=(num_features, 1), name='input')
    c = layers.Conv1D(conv_layers[0],
                      kernel_sizes[0],
                      padding='same',
                      activation='relu')(inp)
    c = layers.MaxPool1D(pool_size=2)(c)
    for conv_channels, kernel_size in zip(conv_layers[1:], kernel_sizes[1:]):
        c = layers.Conv1D(conv_channels,
                          kernel_size,
                          padding='same',
                          activation='relu')(c)
        c = layers.MaxPool1D(pool_size=2)(c)
    c = layers.Flatten()(c)
    for hidden_units in dense_layers:
        c = layers.Dense(hidden_units)(c)
        if dropout:
            c = layers.Dropout(dropout)(c)
    c = layers.Dense(num_classes, activation="softmax", name="prediction")(c)
    model = Model(inp, c)

    model.compile(loss=loss_func, optimizer=optimizer, metrics=['accuracy'])
    return model
Exemple #9
0
def CNN2_LSTM_ATT(in_shape=(200, 4), num_filters=32, batch_norm=True, activation='relu', lstm_units=128, heads=8, key_size=64, dense_units=512, num_out=12):

    inputs = Input(shape=in_shape)
    nn = layers.Conv1D(filters=num_filters, kernel_size=19, use_bias=False, padding='same')(inputs)
    if batch_norm:
        nn = layers.BatchNormalization()(nn)
    nn = layers.Activation(activation, name='conv_activation')(nn)
    nn = layers.MaxPool1D(pool_size=4)(nn)
    nn = layers.Dropout(0.1)(nn)
    nn = layers.Conv1D(filters=num_filters, kernel_size=7, use_bias=False, padding='same')(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.MaxPool1D(pool_size=6)(nn)
    nn = layers.Dropout(0.1)(nn)

    forward = layers.LSTM(lstm_units//2, return_sequences=True)
    backward = layers.LSTM(lstm_units//2, activation='relu', return_sequences=True, go_backwards=True)
    nn = layers.Bidirectional(forward, backward_layer=backward)(nn)
    nn = layers.Dropout(0.1)(nn)
    
    nn, w = MultiHeadAttention(num_heads=heads, d_model=key_size)(nn, nn, nn)
    nn = layers.Dropout(0.1)(nn)

    nn = layers.Flatten()(nn)

    nn = layers.Dense(dense_units, use_bias=False)(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.Dropout(0.5)(nn)

    outputs = layers.Dense(num_out, activation='sigmoid')(nn)

    return Model(inputs=inputs, outputs=outputs)
Exemple #10
0
    def __init__(self, input_shape, **kwargs):
        super(MLP, self).__init__(**kwargs)
        # Add input layer
        self.input_layer = klayers.Input(input_shape)

        self.embedding = klayers.Embedding(10000, 7, input_length=200)
        self.conv_1 = klayers.Conv1D(16,
                                     kernel_size=5,
                                     name="conv_1",
                                     activation="relu")
        self.pool_1 = klayers.MaxPool1D()
        self.conv_2 = klayers.Conv1D(128,
                                     kernel_size=2,
                                     name="conv_2",
                                     activation="relu")
        self.pool_2 = klayers.MaxPool1D(
        )  # In order to show the second pool output_shape, define it again with different name.
        self.flatten = klayers.Flatten()
        self.dense = klayers.Dense(1, activation="sigmoid")

        # Get output layer with `call` method
        self.out = self.call(self.input_layer)

        # Reinitial
        super(MLP, self).__init__(inputs=self.input_layer,
                                  outputs=self.out,
                                  **kwargs)
Exemple #11
0
def CNN2(in_shape=(200, 4), num_filters=32, batch_norm=True, activation='relu', heads=8, key_size=64, dense_units=512, num_out=12):

    inputs = Input(shape=in_shape)
    nn = layers.Conv1D(filters=num_filters, kernel_size=19, use_bias=False, padding='same')(inputs)
    if batch_norm:
        nn = layers.BatchNormalization()(nn)
    nn = layers.Activation(activation, name='conv_activation')(nn)
    nn = layers.MaxPool1D(pool_size=4)(nn)
    nn = layers.Dropout(0.1)(nn)
    nn = layers.Conv1D(filters=num_filters, kernel_size=7, use_bias=False, padding='same')(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.MaxPool1D(pool_size=6)(nn)
    nn = layers.Dropout(0.1)(nn)

    nn = layers.Flatten()(nn)

    nn = layers.Dense(dense_units, use_bias=False)(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.Dropout(0.5)(nn)

    outputs = layers.Dense(num_out, activation='sigmoid')(nn)

    return Model(inputs=inputs, outputs=outputs)
def build_model(hparams):
    """Convolutional neural network architecture."""

    l2_reg = tf.keras.regularizers.l2

    return tf.keras.models.Sequential([

        # Two convolution + maxpooling blocks
        layers.Conv1D(filters=16,
                      kernel_size=5,
                      activation=tf.nn.relu,
                      kernel_regularizer=l2_reg(hparams.l2)),
        layers.MaxPool1D(pool_size=3, strides=1),
        layers.Conv1D(filters=16,
                      kernel_size=3,
                      activation=tf.nn.relu,
                      kernel_regularizer=l2_reg(hparams.l2)),
        layers.MaxPool1D(pool_size=3, strides=1),

        # Flatten the input volume
        layers.Flatten(),

        # Two fully connected layers, each followed by a dropout layer
        layers.Dense(units=16,
                     activation=tf.nn.relu,
                     kernel_regularizer=l2_reg(hparams.l2)),
        layers.Dropout(rate=0.3),
        layers.Dense(units=16,
                     activation=tf.nn.relu,
                     kernel_regularizer=l2_reg(hparams.l2)),
        layers.Dropout(rate=0.3),

        # Output layer with softmax activation
        layers.Dense(units=len(_ALLOWED_BASES), activation='softmax')
    ])
Exemple #13
0
    def call(self, inputs, pos_inputs, training):
        word_embed = tf.nn.embedding_lookup(self.embeddings, inputs)
        pos_embed = tf.nn.embedding_lookup(self.embeddings, pos_inputs)

        tokens_mask = tf.cast(inputs != 0, tf.float32)

        x = tf.concat([word_embed, pos_embed], axis=2)

        x = self.drop(x)
        # print(inputs.shape)
        out1 = tf.nn.tanh(
            self.conv1(x))  # bsz x (filter_size - seq_len + 1) x n_filters
        out2 = tf.nn.tanh(self.conv2(x))
        out3 = tf.nn.tanh(self.conv3(x))

        # bsz x 1 x n_filters - before squeeze
        # bsz x n_filters - after squeeze
        pool1 = tf.squeeze(layers.MaxPool1D(out1.shape[1])(out1))
        pool2 = tf.squeeze(layers.MaxPool1D(out2.shape[1])(out2))
        pool3 = tf.squeeze(layers.MaxPool1D(out3.shape[1])(out3))

        # bsz x n_filters * (number of different conv2d layers)
        x = tf.concat([pool1, pool2, pool3], axis=1)

        x = self.dropout(x)

        x = self.linear(x)

        return {'logits': x}
Exemple #14
0
    def __init__(self):
        super(EventDetector, self).__init__()
        self.conv1 = layers.Conv1D(
            64,
            3,
            activation='relu',
            kernel_regularizer=keras.regularizers.L2(0.1))
        self.bn1 = layers.BatchNormalization()
        self.mp1 = layers.MaxPool1D(3)

        self.conv2 = layers.Conv1D(
            128,
            3,
            activation='relu',
            kernel_regularizer=keras.regularizers.L2(0.1))
        self.bn2 = layers.BatchNormalization()
        self.mp2 = layers.MaxPool1D(3)

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(0.15)

        self.d1 = layers.Dense(64,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))
        self.d2 = layers.Dense(32,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))
        self.d3 = layers.Dense(3,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))

        self.softmax = layers.Softmax()
def DeepAnt_Model():
    model_m = keras.Sequential([
        layers.Input(shape=(X.shape[1], X.shape[2])),
        #layers.BatchNormalization(),
        layers.Conv1D(filters=32,
                      kernel_size=7,
                      padding="same",
                      strides=1,
                      activation="relu"),
        #layers.Dropout(rate=0.2),
        layers.MaxPool1D(pool_size=2),
        layers.Conv1D(filters=32,
                      kernel_size=7,
                      padding="same",
                      strides=1,
                      activation="relu"),
        layers.MaxPool1D(pool_size=2),
        layers.Flatten(),
        layers.Dropout(rate=0.25),
        layers.Dense(128, activation='relu'),
        layers.Dropout(rate=0.45),
        layers.Dense(window_size - 2 * margin, activation='relu'),
    ])
    model_m.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0005),
                    loss="mse")
    model_m.summary()
    return model_m
Exemple #16
0
    def __init__(self, hparams):
        super(AttentionNET, self).__init__()

        self.downres0 = DownResLayer(
            channels_out=6,
            dropout_rate=0.2,
            kernel_size=6,
            first_layer=True,
            regularization=tf.keras.regularizers.l2(0.0005))

        self.max_pool0 = layers.MaxPool1D(pool_size=2, strides=2)
        self.attention_layer = AttentionLayer(
            channels_out=6,
            filters_per_head=4,
            num_attention_heads=1,
            use_positional_encoding=False,
            regularization=tf.keras.regularizers.l2(0.0005),
            kernel_size=6,
        )

        self.max_pool1 = layers.MaxPool1D(pool_size=2, strides=2)

        self.downres1 = DownResLayer(
            channels_out=8,
            dropout_rate=0.2,
            kernel_size=6,
            regularization=tf.keras.regularizers.l2(0.0005))

        self.final_pool = layers.MaxPool1D(pool_size=2, strides=2)
        self.dense_dropout = layers.Dropout(0.5)

        self.dense_output = layers.Dense(2, activation="softmax")
Exemple #17
0
    def build(self, input_shape):
        self.conv1 = layers.Conv1D(filters=8,
                                   kernel_size=3,
                                   padding='same',
                                   activation='relu')
        self.conv2 = layers.Conv1D(filters=16,
                                   kernel_size=3,
                                   padding='same',
                                   activation='relu')
        self.conv3 = layers.Conv1D(filters=32,
                                   kernel_size=3,
                                   padding='same',
                                   activation='relu')
        self.conv4 = layers.Conv1D(filters=64,
                                   kernel_size=3,
                                   padding='same',
                                   activation='relu')

        self.pool1 = layers.MaxPool1D(pool_size=2, strides=2)
        self.pool2 = layers.MaxPool1D(pool_size=2, strides=2)
        self.pool3 = layers.MaxPool1D(pool_size=2, strides=2)
        self.pool4 = layers.MaxPool1D(pool_size=2, strides=2)

        self.global_avg = layers.GlobalAveragePooling1D()
        self.dense1 = layers.Dense(32, activation='relu')
        self.dropout = layers.Dropout(self.rate)
        self.dense2 = layers.Dense(1, activation='linear')
 def conv_net(self, word_pos):
     sent_len = word_pos.shape[1]
     conv_output = self.conv1layer(word_pos)
     conv_output2 = self.conv2layer(word_pos)
     pool_output = layers.MaxPool1D(pool_size=sent_len - 1)(conv_output)
     pool_output2 = layers.MaxPool1D(pool_size=sent_len - 2)(conv_output2)
     pool_concat = tf.concat([pool_output, pool_output2], axis=2)
     flatten_output = self.flatten(pool_concat)
     return flatten_output
Exemple #19
0
 def build(self,input_shape):
     self.embedding = layers.Embedding(MAX_WORDS,7,input_length=MAX_LEN)
     self.conv_1 = layers.Conv1D(16,kernel_size=5,name="conv_1",activation="relu")
     self.pool_1 = layers.MaxPool1D(name="pool_1")
     self.conv_2 = layers.Conv1D(128,kernel_size=2,name="conv_2",activation="relu")
     self.pool_2 = layers.MaxPool1D(name="pool_2")
     self.flatten = layers.Flatten()
     self.dense = layers.Dense(1,activation="sigmoid")
     super(CnnModel, self).build(input_shape)
def create_model():
    model = models.Sequential()
    model.add(layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN))
    model.add(layers.Conv1D(filters=64, kernel_size=5, activation="relu"))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(filters=32, kernel_size=3, activation="relu"))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Flatten())
    model.add(layers.Dense(CAT_NUM, activation="softmax"))
    return model
Exemple #21
0
def CNN1D(classes):
    input = layers.Input(shape=(250, 1))
    x = layers.Conv1D(128, 13, padding='same', activation='relu')(input)
    x = layers.BatchNormalization()(x)
    x = layers.Conv1D(128, 13, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(pool_size=2)(x)

    x = layers.Conv1D(256, 7, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv1D(256, 7, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(pool_size=2)(x)

    x = layers.Conv1D(512, 5, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv1D(512, 5, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv1D(512, 5, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv1D(512, 5, padding='same', activation='relu')(x)
    x = layers.BatchNormalization()(x)

    x = layers.MaxPool1D(pool_size=2)(x)

    # x = layers.Conv1D(256,1,padding='same',activation='relu')(x)

    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.MaxPool1D(pool_size=2)(x)

    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.MaxPool1D(pool_size=2)(x)

    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.Conv1D(512,3,padding='same',activation='relu')(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.MaxPool1D(pool_size=2)(x)

    x = layers.Flatten()(x)
    x = layers.Dense(units=128)(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(units=64)(x)
    x = layers.Dropout(0.5)(x)

    output = layers.Dense(units=classes, activation='softmax')(x)

    CNN = tf.keras.Model(input, output, name='CNN1D')
    return CNN
    def hist_encoder(conv_input):
        conv = layers.Reshape((n_steps_in, 1))(conv_input)
        conv = layers.Conv1D(4, [3], activation=activation,
                             padding='same')(conv)
        conv = layers.MaxPool1D(pool_size=2)(conv)

        conv = layers.Conv1D(1, [7], activation=activation,
                             padding='same')(conv)
        conv = layers.MaxPool1D(pool_size=2)(conv)
        conv = layers.Flatten()(conv)
        conv = layers.Dense(n_steps_out)(conv)
        conv = layers.Reshape((n_steps_out, 1))(conv)
        return conv
Exemple #23
0
 def make_conv_layers(name: str):
     """Input -> Conv -> MaxPool -> Conv -> MaxPool -> Flatten"""
     model = models.Sequential(name=name)
     model.add(layers.Embedding(voc_size, dim, input_length=maxlen))
     model.add(layers.Conv1D(dim * 2, kernel_size, activation='relu'))
     model.add(layers.MaxPool1D(2))
     model.add(layers.Conv1D(dim * 2, 2, activation='relu'))
     model.add(layers.MaxPool1D(feature_size))
     model.add(layers.Flatten())
     model.add(layers.GaussianNoise(0.1 / dim))
     model.add(layers.Dropout(0.5))
     model.add(layers.Dense(dim))
     return model
Exemple #24
0
def create_convolution_model(
    input_size: Tuple[int, ...]
) -> Tuple[Union[tf.Tensor, List[tf.Tensor]], tf.Tensor]:
    """
    Creates a convolutional neural network with kernel size 3 and max pooling after every convolution.

    The output layer and activation are omitted, as they are added by the wrapper function.
    """
    input_cont = keras.Input((2, ))
    input_conv = keras.Input(input_size)

    conv1 = layers.Conv1D(128, kernel_size=10, strides=10,
                          activation='relu')(input_conv)
    pool1 = layers.MaxPool1D(2)(conv1)
    bn1 = layers.BatchNormalization()(pool1)

    conv2 = layers.Conv1D(256,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(bn1)
    pool2 = layers.MaxPool1D(2)(conv2)
    bn2 = layers.BatchNormalization()(pool2)

    conv3 = layers.Conv1D(512,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(bn2)
    pool3 = layers.MaxPool1D(2)(conv3)
    bn3 = layers.BatchNormalization()(pool3)

    conv4 = layers.Conv1D(1024,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(bn3)
    pool4 = layers.MaxPool1D(2)(conv4)
    bn4 = layers.BatchNormalization()(pool4)

    flatten = layers.Flatten()(bn4)

    conc = layers.Concatenate()([input_cont, flatten])

    dense1 = layers.Dense(1024, activation='relu')(conc)
    drop1 = layers.Dropout(0.2)(dense1)
    dense2 = layers.Dense(1024, activation='relu')(drop1)
    drop2 = layers.Dropout(0.2)(dense2)
    out = layers.Dense(512, activation='relu')(drop2)

    # Omit final layer as it is added by the wrapper function
    # out = layers.Dense(1, activation='sigmoid')(dense4)

    return [input_cont, input_conv], out
Exemple #25
0
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Conv1D(filters=hp.Int('units_1',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
    						kernel_size=hp.Int('kernel_1',min_value=2,max_value=3),
    									input_shape=input_shape,
    									padding='same'))
    model.add(layers.Conv1D(filters=hp.Int('units_2',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
    						kernel_size=hp.Int('kernel_2',min_value=2,max_value=3),
                           				activation='relu',
                           				padding='same'))
    model.add(layers.MaxPool1D(hp.Int('pool_1',min_value=2,max_value=3)))
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv1D(filters=hp.Int('units_3',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
    						kernel_size=hp.Int('kernel_3',min_value=2,max_value=3),
                           				activation='relu',
                           				padding='same'))
    model.add(layers.MaxPool1D(hp.Int('pool_2',min_value=2,max_value=3)))
    model.add(layers.Dropout(0.5))
    model.add(layers.Flatten())
    model.add(layers.Dense(units=hp.Int('units_4',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
                           				activation='relu'))
    model.add(layers.Dense(units=hp.Int('units_5',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
                           				activation='relu'))
    model.add(layers.Dense(units=hp.Int('units_6',
                                        min_value=32,
                                        max_value=512,
                                        step=32),
                           				activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(
        optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate',
                      values=[1e-2, 1e-3, 1e-4])),
        loss='binary_crossentropy',
        metrics=['acc'])
    return model
Exemple #26
0
 def build(self, input_shape):
     self.embedding = layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN)
     self.conv1 = layers.Conv1D(16,
                                kernel_size=5,
                                name='conv_1',
                                activation='relu')
     self.pool1 = layers.MaxPool1D(name='maxpool_1')
     self.conv2 = layers.Conv1D(128,
                                kernel_size=2,
                                name='conv_2',
                                activation='relu')
     self.pool2 = layers.MaxPool1D(name='maxpool_2')
     self.flatten = layers.Flatten()
     self.dense = layers.Dense(1, activation='sigmoid')
     super(CnnModel, self).build(input_shape)
def get_baseline_convolutional_encoder(filters,
                                       embedding_dimension,
                                       input_shape=None,
                                       dropout=0.05):
    encoder = Sequential()

    # Initial conv
    if input_shape is None:
        # In this case we are using the encoder as part of a siamese network and the input shape will be determined
        # automatically based on the input shape of the siamese network
        encoder.add(
            layers.Conv1D(filters, 32, padding='same', activation='relu'))
    else:
        # In this case we are using the encoder to build a classifier network and the input shape must be defined
        encoder.add(
            layers.Conv1D(filters,
                          32,
                          padding='same',
                          activation='relu',
                          input_shape=input_shape))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D(4, 4))

    # Further convs
    encoder.add(
        layers.Conv1D(2 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(3 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(4 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(layers.GlobalMaxPool1D())

    encoder.add(layers.Dense(embedding_dimension))

    return encoder
Exemple #28
0
    def __init__(self, hparams):
        super(BaseNET2, self).__init__()
        self.expand = layers.Conv1D(
            filters=4,
            kernel_size=5,
            padding="same"
        )

        self.downres0 = DownResLayer(
            channels_out=6,
            dropout_rate=0.0,
            kernel_size=6,
            first_layer=False,
            regularization=tf.keras.regularizers.l2(0.0005)
        )

        self.conv3 = layers.Conv1D(
            filters=8,
            kernel_size=6,
            strides=1,
            padding="same",
            activation="relu",
            kernel_regularizer=tf.keras.regularizers.l2(0.0005)
        )
        self.maxp = layers.MaxPool1D(2,2)

        self.flat = layers.Flatten()
        self.drop1 = layers.Dropout(0.5)
        self.dense_out = layers.Dense(2, activation="softmax")
def Conv1D():
    channels = None
    input_shape = spectrogram_dim

    model = keras.models.Sequential([
        layers.Conv1D(256, 3, activation="relu"),
        layers.MaxPool1D(2),
        layers.Conv1D(256, 3, activation="relu"),
        layers.MaxPool1D(2),
        layers.Conv1D(256, 3, activation="relu"),
        layers.MaxPool1D(2),
        layers.Conv1D(256, 3, activation="relu"),
        layers.Flatten(),
        layers.Dense(len(birdcodes.bird_code), activation="sigmoid")
    ])
    return model, input_shape, channels
Exemple #30
0
def get_hotel_model():
    embedding_dim = 16
    units = 76
    vocab_size = 49536
    input_length = 350

    model = tf.keras.Sequential([
        layers.Embedding(vocab_size, embedding_dim, input_length=input_length),
        layers.Bidirectional(layers.LSTM(units, return_sequences=True)),
        # L.LSTM(units,return_sequences=True),
        layers.Conv1D(64, 3),
        layers.MaxPool1D(),
        layers.Flatten(),
        layers.Dropout(0.5),
        layers.Dense(128, activation="relu"),
        layers.Dropout(0.5),
        layers.Dense(64, activation="relu"),
        layers.Dropout(0.5),
        layers.Dense(5, activation="softmax"),
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    # model.summary()

    return model