Example #1
0
def alstm_fcn_model(proto_num, max_seq_lenth, nb_class):
    ip1 = Input(shape=(1, max_seq_lenth))
    ip2 = Input(shape=(proto_num, max_seq_lenth))

    x1 = AttentionLSTM(128)(ip1)
    x1 = Dropout(0.8)(x1)

    y1 = Permute((2, 1))(ip1)
    y1 = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y1)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)

    y1 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y1)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)

    y1 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y1)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)

    y1 = GlobalAveragePooling1D()(y1)

    x1 = concatenate([x1, y1])

    x2 = AttentionLSTM(128)(ip2)
    x2 = Dropout(0.8)(x2)

    y2 = Permute((2, 1))(ip2)
    y2 = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y2)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)

    y2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y2)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)

    y2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y2)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)

    y2 = GlobalAveragePooling1D()(y2)

    x2 = concatenate([x2, y2])

    x = concatenate([x1, x2])

    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)

    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)

    out = Dense(nb_class, activation='softmax')(x)

    model = Model([ip1, ip2], out)

    model.summary()

    return model
def generate_alstmfcn(MAX_SEQUENCE_LENGTH, NB_CLASS, NUM_CELLS=8):

    ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

    x = AttentionLSTM(NUM_CELLS)(ip)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model
Example #3
0
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model
Example #4
0
def se_alstm_fcn_block():
    ip = Input(shape = (1, MAX_SEQUENCE_LENGTH))

    x = AttentionLSTM(8)(ip)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASSES, activation = "softmax")(x)

    model = Model(ip, out)

    model.summary()

    return model    
Example #5
0
def generate_alstmfcn(MAX_SEQUENCE_LENGTH,
                      NB_CLASS,
                      NUM_CELLS=8,
                      EMBEDDINGS=False):

    ip_rates = Input(shape=(1, MAX_SEQUENCE_LENGTH))

    if EMBEDDINGS:
        input_dims = [13, 32, 8, 25, 61, 61]
        output_dims = [6, 16, 4, 12, 30, 30]
        input_layers = [Input(shape=(60, )) for i in range(len(input_dims))]
        embedding_layers = [
            Embedding(input_dims[i], output_dims[i])(input_layers[i])
            for i in range(len(input_dims))
        ]
        embedding_layers = [
            Reshape((-1, ))(embedding_layers[i])
            for i in range(len(input_dims))
        ]
        embeddings = concatenate(embedding_layers)
    else:
        ip_sin_features = Input(shape=(
            60,
            6,
        ))
        embeddings = Reshape((-1, ))(ip_sin_features)

    z = Dense(128, activation='relu')(embeddings)

    x = AttentionLSTM(NUM_CELLS)(ip_rates)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip_rates)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y, z])

    out = Dense(NB_CLASS, activation='softmax')(x)

    if EMBEDDINGS:
        model = Model([ip_rates] + input_layers, out)
    else:
        model = Model([ip_rates, ip_sin_features], out)
    model.summary()

    # add load model code here to fine-tune

    return model
Example #6
0
def malstm_fcn_block():
    
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Conv1D(256, 5, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Conv1D(128, 3, padding = "same", kernel_initializer = "he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASSES, activation = "softmax")(x)

    model = Model(ip, out)
    model.summary()

    return model
Example #7
0
    def __init__(self, n_channels, n_timesteps, n_classes):
        super(ALSTM_FCN, self).__init__()

        self.n_channels = n_channels
        self.n_timesteps = n_timesteps
        self.n_classes = n_classes

        # attention
        self.alstm = AttentionLSTM(8)

        self.dropout = Dropout(0.8)
        self.conv1d_1 = Conv1D(128,
                               8,
                               padding='same',
                               kernel_initializer='he_uniform')
        self.conv1d_2 = Conv1D(256,
                               5,
                               padding='same',
                               kernel_initializer='he_uniform')
        self.conv1d_3 = Conv1D(128,
                               3,
                               padding='same',
                               kernel_initializer='he_uniform')
        self.bn = BatchNormalization()
        self.relu = Activation('relu')

        self.dense = Dense(self.n_classes)
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model
    def train_lstm_attention(self,
                             x_train,
                             y_train,
                             x_valid,
                             y_valid,
                             figplot=False):
        n_channel = self.conf.n_channels
        ip = Input(shape=(n_channel, self.conf.n_steps))
        x = AttentionLSTM(32)(ip)
        x = Dropout(0.8)(x)
        out = Dense(self.conf.n_class, activation='softmax')(x)

        model = Model(ip, out)
        self.run(model, x_train, y_train, x_valid, y_valid, None,
                 "lstm_attention", figplot)
def generate_model():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    #out1 = Dense(11,input_dim=11, kernel_initializer='he_uniform', activation='relu')(x)
    out = Dense(1, kernel_initializer='he_uniform')(x)
    model = Model(ip, out)
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.summary()

    # add load model code here to fine-tune

    return model
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    ''' sabsample timesteps to prevent OOM due to Attention LSTM '''
    stride = 2

    x = Permute((2, 1))(ip)
    x = Conv1D(MAX_NB_VARIABLES // stride,
               8,
               strides=stride,
               padding='same',
               activation='relu',
               use_bias=False,
               kernel_initializer='he_uniform')(
                   x)  # (None, variables / stride, timesteps)
    x = Permute((2, 1))(x)

    x = Masking()(x)
    x = AttentionLSTM(128)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model
Example #12
0
    def __init__(self, n_channels, n_timesteps, n_classes):
        super(MALSTM_FCN, self).__init__()

        self.n_channels = n_channels
        self.n_timesteps = n_timesteps
        self.n_classes = n_classes
        ''' sabsample timesteps to prevent OOM due to Attention LSTM '''
        self.stride = 3

        self.conv1d_0 = Conv1D(self.n_channels / self.stride,
                               8,
                               strides=self.stride,
                               padding='same',
                               activation='relu',
                               use_bias=False,
                               kernel_initializer='he_uniform'
                               )  # (None, n_channels / stride, n_timestemps)

        self.conv1d_1 = Conv1D(128,
                               8,
                               padding='same',
                               kernel_initializer='he_uniform')
        self.conv1d_2 = Conv1D(256,
                               5,
                               padding='same',
                               kernel_initializer='he_uniform')
        self.conv1d_3 = Conv1D(128,
                               3,
                               padding='same',
                               kernel_initializer='he_uniform')

        # attention
        self.alstm = AttentionLSTM(384, unroll=True)
        self.dropout = Dropout(0.8)

        self.bn = BatchNormalization()
        self.relu = Activation('relu')
        self.seb1 = SQUEEZE_EXCITE_BLOCK(n_channels=self.n_channels)
        self.seb2 = SQUEEZE_EXCITE_BLOCK(n_channels=self.n_channels)
        self.dense = Dense(self.n_classes)
Example #13
0
def DenseNet(input_shape=None, dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=None, dropout_rate=None,
             compression=1.0, weight_decay=1e-4, depth=40,avg_pooling=True):
    """
    Creating a DenseNet
    
    Arguments:
        input_shape  : shape of the input images. E.g. (28,28,1) for MNIST    
        dense_blocks : amount of dense blocks that will be created (default: 3)    
        dense_layers : number of layers in each dense block. You can also use a list for numbers of layers [2,4,3]
                       or define only 2 to add 2 layers at all dense blocks. -1 means that dense_layers will be calculated
                       by the given depth (default: -1)
        growth_rate  : number of filters to add per dense block (default: 12)
        nb_classes   : number of classes
        dropout_rate : defines the dropout rate that is accomplished after each conv layer (except the first one).
                       In the paper the authors recommend a dropout of 0.2 (default: None)
        bottleneck   : (True / False) if true it will be added in convolution block (default: False)
        compression  : reduce the number of feature-maps at transition layer. In the paper the authors recomment a compression
                       of 0.5 (default: 1.0 - will have no compression effect)
        weight_decay : weight decay of L2 regularization on weights (default: 1e-4)
        depth        : number or layers (default: 40)
        
    Returns:
        Model        : A Keras model instance
    """
    
    if nb_classes==None:
        raise Exception('Please define number of classes (e.g. num_classes=10). This is required for final softmax.')
    
    if compression <=0.0 or compression > 1.0:
        raise Exception('Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.')
    
    if type(dense_layers) is list:
        if len(dense_layers) != dense_blocks:
            raise AssertionError('Number of dense blocks have to be same length to specified layers')
    elif dense_layers == -1:
        dense_layers = (depth - (dense_blocks + 1))//dense_blocks
        dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
    else:
        dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
        
        
    
    data_input = Input(shape=input_shape)
    nb_channels = growth_rate * 2

    
    print('Creating DenseNet')
    print('#############################################')
    print('Dense blocks: %s' % dense_blocks)
    print('Layers per dense block: %s' % dense_layers)
    print('#############################################')
          
    y = Masking()(data_input)
    y = AttentionLSTM(8)(y)
    y = Dropout(0.8)(y)
    
    x = Permute((2, 1))(data_input)
    # Initial convolution layer
    x = Conv1D(nb_channels, (5,), padding='same',strides=(1,), use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_uniform')(x)
    #x = Conv1D(nb_channels, (8,), padding='same',strides=(1,), kernel_initializer='he_uniform')(x)

    

    # Building dense blocks
    for block in range(dense_blocks):
        
        # Add dense block
        x, nb_channels = dense_block(x, dense_layers[block], nb_channels, growth_rate, dropout_rate, weight_decay)
        
        if block < dense_blocks - 1:  # if it's not the last dense block
            # Add transition_block
            x = transition_layer(x, nb_channels, dropout_rate, compression, weight_decay,avg_pooling)
            nb_channels = int(nb_channels * compression)
    
    x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling1D()(x)
    
    
    
    y = concatenate([y, x])
    y = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(y)
    
    model_name = None
    if growth_rate >= 36:
        model_name = 'widedense'
    else:
        model_name = 'dense'
        
   
    if compression < 1.0:
        model_name = model_name + 'c'
        
    return Model(data_input, y, name=model_name), model_name
Example #14
0
#Part 2 - Building the RNN

#Importing the keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout

from utils.layer_utils import AttentionLSTM

# Initializing the RNN
regressor = Sequential()

#Adding the first LSTM layer and some Dropout regularisation
regressor.add(
    AttentionLSTM(units=100,
                  return_sequences=True,
                  input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(0.2))

#Adding a second LSTM layer and some Dropout regularisation
regressor.add(AttentionLSTM(units=50))
regressor.add(Dropout(0.2))

#Adding the output layer
regressor.add(Dense(units=1))

#Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')

#Fitting the RNN to the training set
regressor.fit(X_train, y_train, epochs=20, batch_size=32)
#Part 2 - Building the RNN

#Importing the keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Masking
from keras.layers import Dropout

from utils.layer_utils import AttentionLSTM

# Initializing the RNN
regressor = Sequential()

#Adding the first LSTM layer and some Dropout regularisation
regressor.add(Masking(input_shape=(X_train.shape[1], 1)))
regressor.add(AttentionLSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))

#Adding a second LSTM layer and some Dropout regularisation
regressor.add(AttentionLSTM(units=50))
regressor.add(Dropout(0.2))

#Adding the output layer
regressor.add(Dense(units=1))

#Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')

#Fitting the RNN to the training set
regressor.fit(X_train, y_train, epochs=25, batch_size=32)