Пример #1
0
def CNN2_LSTM_ATT(in_shape=(200, 4), num_filters=32, batch_norm=True, activation='relu', lstm_units=128, heads=8, key_size=64, dense_units=512, num_out=12):

    inputs = Input(shape=in_shape)
    nn = layers.Conv1D(filters=num_filters, kernel_size=19, use_bias=False, padding='same')(inputs)
    if batch_norm:
        nn = layers.BatchNormalization()(nn)
    nn = layers.Activation(activation, name='conv_activation')(nn)
    nn = layers.MaxPool1D(pool_size=4)(nn)
    nn = layers.Dropout(0.1)(nn)
    nn = layers.Conv1D(filters=num_filters, kernel_size=7, use_bias=False, padding='same')(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.MaxPool1D(pool_size=6)(nn)
    nn = layers.Dropout(0.1)(nn)

    forward = layers.LSTM(lstm_units//2, return_sequences=True)
    backward = layers.LSTM(lstm_units//2, activation='relu', return_sequences=True, go_backwards=True)
    nn = layers.Bidirectional(forward, backward_layer=backward)(nn)
    nn = layers.Dropout(0.1)(nn)
    
    nn, w = MultiHeadAttention(num_heads=heads, d_model=key_size)(nn, nn, nn)
    nn = layers.Dropout(0.1)(nn)

    nn = layers.Flatten()(nn)

    nn = layers.Dense(dense_units, use_bias=False)(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.Dropout(0.5)(nn)

    outputs = layers.Dense(num_out, activation='sigmoid')(nn)

    return Model(inputs=inputs, outputs=outputs)
Пример #2
0
def CNN_LSTM2_TRANS(in_shape=(200, 4), num_filters=32, batch_norm=True, activation='relu', num_layers=1, heads=8, key_size=64, dense_units=512, num_out=12):
    
    inputs = Input(shape=in_shape)
    nn = layers.Conv1D(filters=num_filters, kernel_size=19, use_bias=False, padding='same')(inputs)
    if batch_norm:
        nn = layers.BatchNormalization()(nn)
    nn = layers.Activation(activation, name='conv_activation')(nn)
    nn = layers.MaxPool1D(pool_size=4)(nn)
    nn = layers.Dropout(0.1)(nn)
    
    forward = layers.LSTM(key_size // 2, return_sequences=True)
    backward = layers.LSTM(key_size // 2, activation='relu', return_sequences=True, go_backwards=True)
    nn = layers.Bidirectional(forward, backward_layer=backward)(nn)
    nn = layers.MaxPool1D(pool_size=6)(nn)
    nn = layers.Dropout(0.1)(nn)
    
    nn = layers.LayerNormalization(epsilon=1e-6)(nn)
    for i in range(num_layers):
        nn2,_ = MultiHeadAttention(d_model=key_size, num_heads=heads)(nn, nn, nn)
        nn2 = layers.Dropout(0.1)(nn2)
        nn = layers.Add()([nn, nn2])
        nn = layers.LayerNormalization(epsilon=1e-6)(nn)
        nn2 = layers.Dense(32, activation='relu')(nn)
        nn2 = layers.Dropout(0.1)(nn2)
        nn2 = layers.Dense(key_size)(nn2)
        nn2 = layers.Dropout(0.1)(nn2)
        nn = layers.Add()([nn, nn2])
        nn = layers.LayerNormalization(epsilon=1e-6)(nn)
    
    nn = layers.Flatten()(nn)

    nn = layers.Dense(dense_units, use_bias=False)(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.Dropout(0.5)(nn)

    outputs = layers.Dense(num_out, activation='sigmoid')(nn)

    return Model(inputs=inputs, outputs=outputs)
Пример #3
0
def CNN_ATT(in_shape=(200, 4), num_filters=32, batch_norm=True, activation='relu', heads=8, key_size=64, dense_units=512, num_out=12):

    inputs = Input(shape=in_shape)
    nn = layers.Conv1D(filters=num_filters, kernel_size=19, use_bias=False, padding='same')(inputs)
    if batch_norm:
        nn = layers.BatchNormalization()(nn)
    nn = layers.Activation(activation, name='conv_activation')(nn)
    nn = layers.MaxPool1D(pool_size=24)(nn)
    nn = layers.Dropout(0.1)(nn)

    nn, w = MultiHeadAttention(num_heads=heads, d_model=key_size)(nn, nn, nn)
    nn = layers.Dropout(0.1)(nn)

    nn = layers.Flatten()(nn)

    nn = layers.Dense(dense_units, use_bias=False)(nn)
    nn = layers.BatchNormalization()(nn)
    nn = layers.Activation('relu')(nn)
    nn = layers.Dropout(0.5)(nn)

    outputs = layers.Dense(num_out, activation='sigmoid')(nn)

    return Model(inputs=inputs, outputs=outputs)
Пример #4
0
        # Convolutional Block
        nn = layers.Conv1D(filters=32,
                           kernel_size=19,
                           use_bias=False,
                           padding='same')(inputs)
        nn = layers.BatchNormalization()(nn)
        if variants[i]:
            nn = layers.Activation('relu', name='conv_activation')(nn)
        else:
            nn = layers.Activation('exponential', name='conv_activation')(nn)
        nn = layers.MaxPool1D(pool_size=25)(nn)
        nn = layers.Dropout(0.1)(nn)

        # Multi-Head Attention
        nn, weights = MultiHeadAttention(num_heads=8, d_model=32)(nn, nn, nn)
        nn = layers.Dropout(0.1)(nn)

        nn = layers.Flatten()(nn)

        # Feed Forward
        nn = layers.Dense(512, use_bias=False)(nn)
        nn = layers.BatchNormalization()(nn)
        nn = layers.Activation('relu')(nn)
        nn = layers.Dropout(0.5)(nn)

        # Output
        outputs = layers.Dense(12, activation='sigmoid')(nn)

        # Compile model
        model = Model(inputs=inputs, outputs=outputs, name=names[i])
Пример #5
0
def model(input_shape, num_labels, filters=32, dims=128, num_heads=12, num_layers=4, pool_size=20, 
          num_units=[1024], activation='relu', bn=False, l2=None, rc=True):

  # l2 regularization
  if l2 is not None:
    l2 = keras.regularizers.l2(l2)

  if bn:
    use_bias = True
  else: 
    use_bias = False

  # input layer
  inputs = keras.layers.Input(shape=input_shape)

  # layer 1 - convolution
  if rc:  
    nn = RevCompConv1D(filters=filters, kernel_size=19, use_bias=use_bias, padding='same',
                                    kernel_regularizer=l2, concat=True)(inputs)      
  else:
    nn = keras.layers.Conv1D(filters=filters, kernel_size=19, use_bias=use_bias, padding='same',
                             kernel_regularizer=l2)(inputs)        
  if bn:
    nn = keras.layers.BatchNormalization()(nn)
  nn = keras.layers.Activation(activation)(nn)
  nn = keras.layers.MaxPool1D(pool_size=pool_size)(nn)
  nn = keras.layers.Dropout(0.1)(nn)

  forward_layer = keras.layers.LSTM(dims//2, return_sequences=True)
  backward_layer = keras.layers.LSTM(dims//2, activation='relu', return_sequences=True, go_backwards=True)
  nn2 = keras.layers.Bidirectional(forward_layer, backward_layer=backward_layer)(nn)
  nn = keras.layers.Dropout(0.1)(nn2)
  #nn = keras.layers.Add()([nn, nn2])
  nn = keras.layers.LayerNormalization(epsilon=1e-6)(nn)

  for i in range(num_layers):
    nn2,_ = MultiHeadAttention(d_model=dims, num_heads=num_heads)(nn, nn, nn)
    nn2 = keras.layers.Dropout(0.1)(nn2)
    nn = keras.layers.Add()([nn, nn2])
    nn = keras.layers.LayerNormalization(epsilon=1e-6)(nn)

    nn2 = keras.layers.Dense(16, activation='relu')(nn)
    nn2 = keras.layers.Dense(dims)(nn2)
    nn2 = keras.layers.Dropout(0.1)(nn2)
    nn = keras.layers.Add()([nn, nn2])
    nn = keras.layers.LayerNormalization(epsilon=1e-6)(nn)

  # layer 3 - Fully-connected 
  nn = keras.layers.Flatten()(nn)
  for num in num_units:
    nn = keras.layers.Dense(num, activation=None, use_bias=False)(nn)      
    nn = keras.layers.BatchNormalization()(nn)
    nn = keras.layers.Activation('relu')(nn)
    nn = keras.layers.Dropout(0.5)(nn)
  
  # Output layer
  logits = keras.layers.Dense(num_labels, activation='linear', use_bias=True)(nn)
  outputs = keras.layers.Activation('sigmoid')(logits)

  # create keras model
  return keras.Model(inputs=inputs, outputs=outputs)