def generate_model_2(): ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS)) x = Masking()(ip) x = AttentionLSTM(8)(x) x = Dropout(0.8)(x) y = Permute((2, 1))(ip) y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = GlobalAveragePooling1D()(y) x = concatenate([x, y]) out = Dense(NB_CLASS, activation='softmax')(x) model = Model(ip, out) model.summary() # add load model code here to fine-tune return model
def generate_model_2(): ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS)) ''' sabsample timesteps to prevent OOM due to Attention LSTM ''' stride = 2 x = Permute((2, 1))(ip) x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False, kernel_initializer='he_uniform')( x) # (None, variables / stride, timesteps) x = Permute((2, 1))(x) x = Masking()(x) x = AttentionLSTM(128)(x) x = Dropout(0.8)(x) y = Permute((2, 1))(ip) y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = GlobalAveragePooling1D()(y) x = concatenate([x, y]) out = Dense(NB_CLASS, activation='softmax')(x) model = Model(ip, out) model.summary() # add load model code here to fine-tune return model