def model_build(xLen): model = Sequential() model.add( Conv1D(filters=50, kernel_size=5, input_shape=(xLen * 2, 1), activation="linear", padding='same')) model.add(MaxPool1D(pool_size=2)) model.add( Conv1D(filters=100, kernel_size=5, activation="linear", padding='same')) model.add(MaxPool1D(pool_size=2)) model.add( Conv1D(filters=200, kernel_size=5, activation="linear", padding='same')) model.add(GlobalMaxPooling1D()) model.add(Dropout(0.1)) model.add(Dense(100, activation="linear")) model.add(Dense(1, activation="linear")) return model
def CRNN1_1D(input_shape, n_classes): X_input = Input(input_shape) X = Lambda(lambda q: expand_dims(q, -1), name='expand_dims')(X_input) X = Conv1D(16, 9, activation=relu, padding='valid')(X) X = MaxPool1D(8)(X) X = Conv1D(32, 9, activation=relu, padding='valid')(X) X = MaxPool1D(8)(X) X = Conv1D(32, 9, activation=relu, padding='valid')(X) X = MaxPool1D(6)(X) X = CuDNNGRU(32, return_sequences=True)(X) X = Dropout(0.1)(X) X = CuDNNGRU(32, return_sequences=True)(X) X = Dropout(0.1)(X) X = Flatten()(X) X = Dense(64)(X) X = Dropout(0.5)(X) X = Activation(relu)(X) X = Dense(n_classes, activation=softmax)(X) model = Model(inputs=X_input, outputs=X) return model
def _build_encoder(self): x = Input(shape=(self.step, self.feature_sz + 1)) # Encoder h = Conv1D(256, 16, padding='same', activation='relu')(x) h = MaxPool1D(5, padding='same')(h) h = Conv1D(128, 8, padding='same', activation='relu')(h) h = MaxPool1D(2, padding='same')(h) h = Flatten()(h) # None, 128 _sz = h.shape[1] h = Dense(self.z_sz)(h) # Decoder out = h out = Dense(_sz, activation='relu')(out) out = Reshape((-1, 128))(out) # Be careful with the shape out = Conv1D(128, 4, padding='same', activation='relu')(out) out = UpSampling1D(2)(out) out = Conv1D(self.feature_sz + 1, 16, padding='same')(out) out = UpSampling1D(5)(out) ae = Model(x, out) encoder = Model(x, h) return ae, encoder
def CNN(dim1,dim2): model = Sequential() model.add(Conv1D(filters=10, kernel_size=5, strides=1, padding="same",activation='relu', input_shape=(dim1,dim2))) model.add(MaxPool1D(pool_size=2)) model.add(BatchNormalization(momentum=0.9)) model.add(Conv1D(filters=20, kernel_size=5, strides=1, padding="same",activation='relu')) model.add(MaxPool1D(pool_size=2)) model.add(BatchNormalization(momentum=0.9)) model.add(Conv1D(filters=30, kernel_size=5)) model.add(MaxPool1D(pool_size=2)) model.add(BatchNormalization(momentum=0.9)) model.add(Flatten()) model.add(Dense(500)) model.add(Dropout(0.5)) model.add(Dense(1,activation='sigmoid')) model.compile(loss=LOSS_FN, optimizer=Adam(lr=LEARNING_RATE, amsgrad=True), metrics=['accuracy']) model.summary() return model
def create_model(self): # 使用model模式 main_input = Input(shape=(maxlen, ), dtype='float64') embedder = Embedding(max_words + 1, 300, input_length=maxlen) embed = embedder(main_input) # 3,4,5 windows cnn1 = Convolution1D(256, 3, padding='same', strides=1, activation='relu')(embed) cnn1 = MaxPool1D(pool_size=4)(cnn1) cnn2 = Convolution1D(256, 4, padding='same', strides=1, activation='relu')(embed) cnn2 = MaxPool1D(pool_size=4)(cnn2) cnn3 = Convolution1D(256, 5, padding='same', strides=1, activation='relu')(embed) cnn3 = MaxPool1D(pool_size=4)(cnn3) # concat cnn = concatenate([cnn1, cnn2, cnn3], axis=-1) flat = Flatten()(cnn) drop = Dropout(0.5)(flat) main_output = Dense(1, activation='sigmoid')(drop) model = Model(inputs=main_input, outputs=main_output) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy']) return model
def Attention_CNN_Bi_LSTM_AE(n_steps, n_features, activation): en_input = Input(shape=[n_steps, n_features]) e = Conv1D(32, kernel_size=1, padding="SAME", activation=activation)(en_input) e = MaxPool1D(pool_size=2)(e) e = Conv1D(64, kernel_size=3, padding="SAME", activation=activation)(e) e = MaxPool1D(pool_size=2)(e) e = Conv1D(128, kernel_size=5, padding="SAME", activation=activation)(e) e = MaxPool1D(pool_size=2)(e) e = Bidirectional(LSTM(64, recurrent_dropout=0.1, dropout=0.1))(e) e = Attention(use_scale=True)([e, e]) en_output = Dense(get_output_dim(n_steps * n_features), kernel_initializer='lecun_normal', activation='selu')(e) encoder = keras.models.Model(inputs=[en_input], outputs=[en_output]) decoder = keras.models.Sequential([ RepeatVector(n_steps, input_shape=[get_output_dim(n_steps * n_features)]), LSTM(256, return_sequences=True), keras.layers.Reshape([n_steps, 256, 1]), Conv2DTranspose(filters=16, kernel_size=3, activation=activation), Conv2DTranspose(filters=1, kernel_size=3, activation=activation), keras.layers.Flatten(), Dense(n_steps * n_features), keras.layers.Reshape([n_steps, n_features]) ]) return encoder, decoder
def CNN_AE(n_steps, n_features, activation): encoder = keras.models.Sequential([ Conv1D(32, kernel_size=1, padding="SAME", activation=activation, input_shape=[n_steps, n_features]), MaxPool1D(pool_size=2), Conv1D(64, kernel_size=3, padding="SAME", activation=activation), MaxPool1D(pool_size=2), Conv1D(128, kernel_size=5, padding="SAME", activation=activation), MaxPool1D(pool_size=2), keras.layers.Flatten(), Dense(get_output_dim(n_steps * n_features), kernel_initializer='lecun_normal', activation='selu') ]) decoder = keras.models.Sequential([ keras.layers.Reshape( [get_output_dim(n_steps * n_features), 1, 1], input_shape=[get_output_dim(n_steps * n_features)]), Conv2DTranspose(filters=64, kernel_size=5, activation=activation), Conv2DTranspose(filters=32, kernel_size=3, activation=activation), Conv2DTranspose(filters=1, kernel_size=1, activation=activation), keras.layers.Flatten(), Dense(n_steps * n_features), keras.layers.Reshape([n_steps, n_features]) ]) return encoder, decoder
def create_cnn(self, params, index): input = keras.Input(batch_shape=(params[index]["batch_size"], params[index]["max_code_length"] + 2, self.input_embedding_size), name='place_holder_input') conv = Conv1D(1024, 7, strides=1, padding="same", activation="relu", name='conv_1')(input) if params[index]['BN']: conv = BatchNormalization()(conv) conv = MaxPool1D(3, padding="valid", name="max_pool_1")(conv) conv = Conv1D(1024, 7, strides=1, padding="same", activation="relu", name='conv_2')(conv) if params[index]['BN']: conv = BatchNormalization()(conv) conv = MaxPool1D(3, padding="valid", name="max_pool_2")(conv) conv = Conv1D(1024, 3, strides=1, padding="same", activation="relu", name='conv_3')(conv) if params[index]['BN']: conv = BatchNormalization()(conv) conv = Conv1D(1024, 3, strides=1, padding="same", activation="relu", name='conv_4')(conv) if params[index]['BN']: conv = BatchNormalization()(conv) conv = Conv1D(1024, 3, strides=1, padding="same", activation="relu", name='conv_5')(conv) if params[index]['BN']: conv = BatchNormalization()(conv) conv = Conv1D(1024, 3, strides=1, padding="same", activation="relu", name='conv_6')(conv) if params[index]['BN']: conv = BatchNormalization()(conv) conv = MaxPool1D(3, padding="valid", name="max_pool_3")(conv) conv = Flatten()(conv) return keras.Model(input, conv, name="siamese_cnn")
def CNN_Bi_LSTM_AE(n_steps, n_features, activation): encoder = keras.models.Sequential([ Conv1D(32, kernel_size=1, padding="SAME", activation=activation, input_shape=[n_steps, n_features]), MaxPool1D(pool_size=2), Conv1D(64, kernel_size=3, padding="SAME", activation=activation), MaxPool1D(pool_size=2), Conv1D(128, kernel_size=5, padding="SAME", activation=activation), MaxPool1D(pool_size=2), Bidirectional(LSTM(128)), Dense(get_output_dim(n_steps * n_features), kernel_initializer='lecun_normal', activation='selu') ]) decoder = keras.models.Sequential([ # RepeatVector(n_steps, input_shape=[get_output_dim(n_steps * n_features)]), # LSTM(16, return_sequences=True), # LSTM is significant to accuracy!!! keras.layers.Reshape( [get_output_dim(n_steps * n_features), 1, 1], input_shape=[get_output_dim(n_steps * n_features)]), Conv2DTranspose(filters=32, kernel_size=3, activation=activation), Conv2DTranspose(filters=16, kernel_size=1, activation=activation), keras.layers.Flatten(), # Dense((n_steps * n_features) / 1.2, activation=activation), Dense(n_steps * n_features), keras.layers.Reshape([n_steps, n_features]) ]) return encoder, decoder
def Build(): main_input = Input(shape=(maxlen, ), dtype='float64') embedder = Embedding(304, 256, input_length=maxlen) embed = embedder(main_input) # avg = GlobalAveragePooling1D()(embed) # cnn1模块,kernel_size = 3 conv1_1 = Conv1D(64, 3, padding='same', activation='relu')(embed) conv1_2 = Conv1D(64, 3, padding='same', activation='relu')(conv1_1) cnn1 = MaxPool1D(pool_size=2)(conv1_2) conv1_1 = Conv1D(64, 3, padding='same', activation='relu')(cnn1) conv1_2 = Conv1D(64, 3, padding='same', activation='relu')(conv1_1) cnn1 = MaxPool1D(pool_size=2)(conv1_2) conv1_1 = Conv1D(64, 3, padding='same', activation='relu')(cnn1) conv1_2 = Conv1D(64, 3, padding='same', activation='relu')(conv1_1) cnn1 = MaxPool1D(pool_size=2)(conv1_2) rl = LSTM(64)(cnn1) # flat = Flatten()(cnn3) # drop = Dropout(0.5)(flat) fc = Dense(64)(rl) main_output = Dense(8, activation='softmax')(rl) model = Model(inputs=main_input, outputs=main_output) return model
def create_model(sample_rate): inputs = Input((sample_rate, 1)) # 1D Convolutional Layers, first two blocks include max pooling X = Conv1D(16, kernel_size=64, strides=2, activation="relu")(inputs) X = BatchNormalization()(X) X = MaxPool1D(pool_size=8, strides=8)(X) X = Conv1D(32, kernel_size=32, strides=2, activation="relu")(X) X = BatchNormalization()(X) X = MaxPool1D(pool_size=8, strides=8)(X) X = Conv1D(64, kernel_size=16, strides=2, activation="relu")(X) X = BatchNormalization()(X) X = Conv1D(128, kernel_size=8, strides=2, activation="relu")(X) X = BatchNormalization()(X) # Fully connected layers X = Flatten()(X) X = Dense(128, activation="relu")(X) X = Dropout(rate=0.25)(X) X = Dense(64, activation="relu")(X) X = Dropout(rate=0.25)(X) outputs = Dense(1, activation="sigmoid")(X) model = Model(inputs=inputs, outputs=outputs) return model
def get_model(): nclass = 5 inp = Input(shape=(187, 1)) img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp) img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = GlobalMaxPool1D()(img_1) img_1 = Dropout(rate=0.2)(img_1) dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1) dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1) dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3_mitbih")(dense_1) model = models.Model(inputs=inp, outputs=dense_1) opt = optimizers.Adam(0.001) model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc']) #model.summary() return model
def basic_cnn(num_frame, num_artist): x_input = Input(shape=(num_frame, 128)) out = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal')(x_input) out = BatchNormalization(axis=2)(out) out = LeakyReLU(0.2)(out) out = MaxPool1D(pool_size=3)(out) out = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal')(out) out = BatchNormalization(axis=2)(out) out = LeakyReLU(0.2)(out) out = MaxPool1D(pool_size=3)(out) out = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal')(out) out = BatchNormalization(axis=2)(out) out = LeakyReLU(0.2)(out) out = MaxPool1D(pool_size=3)(out) out = Dropout(0.5)(out) out = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal')(out) out = BatchNormalization(axis=2)(out) out = LeakyReLU(0.2)(out) out = MaxPool1D(pool_size=3)(out) out = Dropout(0.5)(out) out = Conv1D(256, kernel_size=1, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal')(out) out = BatchNormalization(axis=2)(out) out = LeakyReLU(0.2)(out) out = Dropout(0.5)(out) out = GlobalAvgPool1D()(out) out = Dense(num_artist, activation='softmax')(out) model = Model(inputs=x_input, outputs=out) return model
def new_model(): input_shape = (1164, 1) input = Input(shape=input_shape) # Conv block 1 x = Conv1D(128, 3, padding='same', name='conv1')(input) x = BatchNormalization(name='bn1')(x) x = ReLU()(x) x = MaxPool1D(pool_size=2, name='pool1')(x) # Conv block 2 x = Conv1D(128, 3, padding='same', name='conv2')(x) x = BatchNormalization(name='bn2')(x) x = ReLU()(x) x = MaxPool1D(pool_size=2, name='pool2')(x) # Conv block 3 x = Conv1D(128, 3, padding='same', name='conv3')(x) x = BatchNormalization(name='bn1')(x) x = ReLU()(x) x = MaxPool1D(pool_size=2, name='pool3')(x) x = Flatten()(x) output = Dense(1, activation='linear', name='output')(x) model = Model(inputs=input, outputs=output) return model
def D_v3(self): ''' url:https://github.com/MikhailMurashov/ecgGAN ''' model = Sequential(name='Discriminator_v3') model.add(Conv1D(filters=32, kernel_size=16, strides=1, padding='same')) model.add(LeakyReLU()) # model.add(Dropout(0.4)) model.add(Conv1D(filters=64, kernel_size=16, strides=1, padding='same')) model.add(LeakyReLU()) model.add(MaxPool1D(pool_size=2)) model.add(Conv1D(filters=128, kernel_size=16, strides=1, padding='same')) model.add(LeakyReLU()) # model.add(Dropout(0.4)) model.add(Conv1D(filters=256, kernel_size=16, strides=1, padding='same')) model.add(LeakyReLU()) model.add(MaxPool1D(pool_size=2)) model.add(Flatten()) model.add(Dense(1)) model.summary() signal = Input(shape=self.input_shape) validity = model(signal) return Model(inputs=signal, outputs=validity)
def conv1d_v1(input_shape, n_classes): X_input = Input(shape=input_shape) X = Lambda(lambda q: expand_dims(q, -1), name='expand_dims')(X_input) X = Conv1D(16, 9, activation=relu, padding='valid')(X) X = Conv1D(16, 9, activation=relu, padding='valid')(X) X = MaxPool1D(16)(X) X = Dropout(0.1)(X) X = Conv1D(32, 3, activation=relu, padding='valid')(X) X = Conv1D(32, 3, activation=relu, padding='valid')(X) X = MaxPool1D(4)(X) X = Dropout(0.1)(X) X = Conv1D(32, 3, activation=relu, padding='valid')(X) X = Conv1D(32, 3, activation=relu, padding='valid')(X) X = MaxPool1D(4)(X) X = Dropout(0.1)(X) X = Conv1D(256, 3, activation=relu, padding='valid')(X) X = Conv1D(256, 3, activation=relu, padding='valid')(X) X = GlobalMaxPool1D()(X) X = Dense(64, activation=relu)(X) X = Dense(128, activation=relu)(X) X = Dense(n_classes, activation=softmax)(X) model = Model(inputs=X_input, outputs=X) return model
def skeleton_cnn(num_frame, weights): x_input = Input(shape=(num_frame, 128)) # audio model conv1 = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal') bn1 = BatchNormalization() activ1 = LeakyReLU(0.2) # activ1 = Activation('relu') mp1 = MaxPool1D(pool_size=3) conv2 = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal') bn2 = BatchNormalization() activ2 = LeakyReLU(0.2) # activ2 = Activation('relu') mp2 = MaxPool1D(pool_size=3) conv3 = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal') bn3 = BatchNormalization() activ3 = LeakyReLU(0.2) # activ3 = Activation('relu') mp3 = MaxPool1D(pool_size=3) do3 = Dropout(0.5) conv4 = Conv1D(128, kernel_size=3, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal') bn4 = BatchNormalization() activ4 = LeakyReLU(0.2) # activ4 = Activation('relu') mp4 = MaxPool1D(pool_size=3) conv5 = Conv1D(256, kernel_size=1, padding='same', use_bias=True, kernel_regularizer=l2(1e-5), kernel_initializer='he_normal') bn5 = BatchNormalization() activ5 = LeakyReLU(0.2) # activ5 = Activation('relu') do5 = Dropout(0.5) ap = GlobalAvgPool1D() # Anchor out = mp1(activ1(bn1(conv1(x_input)))) out = mp2(activ2(bn2(conv2(out)))) out = mp3(activ3(bn3(conv3(out)))) out = do3(out) out = mp4(activ4(bn4(conv4(out)))) out = activ5(bn5(conv5(out))) out = do5(out) out = ap(out) # out = Dense(num_artist, activation='softmax')(out) out = dot([out, out], axes=1, normalize=True) out = Activation('linear')(out) model = Model(inputs=x_input, outputs = out) model.load_weights(weights) return model
def make_CNN_model(self, input_shape, num_authors): """ Implements CNN model from "Code authorship identification using convolutional neural networks" """ param_mapping.map_params(self.params["model_params"]) input = keras.Input(batch_shape=(None, input_shape), name='secondary_model_input') embedding = Embedding(self.encoding_len, 256, input_length=input_shape)(input) conv = Conv1D(128, 3, strides=1, padding="same", activation="relu", name='conv_1', kernel_regularizer='l2')(embedding) conv = Dropout(0.6)(conv) conv = MaxPool1D(4, padding="valid", name="max_pool_1")(conv) conv = Conv1D(128, 5, strides=1, padding="same", activation="relu", name='conv_2', kernel_regularizer='l2')(conv) conv = Dropout(0.6)(conv) conv = MaxPool1D(4, padding="valid", name="max_pool_2")(conv) conv = Conv1D(128, 7, strides=1, padding="same", activation="relu", name='conv_3', kernel_regularizer='l2')(conv) conv = Dropout(0.6)(conv) conv = MaxPool1D(4, padding="valid", name="max_pool_3")(conv) conv = Flatten()(conv) prediction_layer = Dense(num_authors, name="prediction", kernel_regularizer='l2') softmax = Softmax(name="prediction_probs") prediction = prediction_layer(conv) prediction_probs = softmax(prediction) model = keras.Model(inputs=input, outputs=prediction_probs) model.summary() return model
def get_model(): model = Sequential() model.add( Conv1D(filters=32, kernel_size=3, padding='Same', activation='relu', input_shape=(11, 1))) model.add( Conv1D( filters=32, kernel_size=3, padding='Same', activation='relu', )) model.add(MaxPool1D(2)) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add( Conv1D(filters=64, kernel_size=3, padding='Same', activation='relu')) model.add( Conv1D(filters=64, kernel_size=3, padding='Same', activation='relu')) model.add(MaxPool1D(2)) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add( Conv1D(filters=128, kernel_size=3, padding='Same', activation='relu')) model.add( Conv1D(filters=128, kernel_size=3, padding='Same', activation='relu')) model.add(MaxPool1D(2)) model.add(Dropout(0.25)) model.add(BatchNormalization()) # model.add(Conv1D(filters = 256, kernel_size = 3,padding = 'Same', activation ='relu')) # model.add(Conv1D(filters = 256, kernel_size = 3,padding = 'Same', activation ='relu')) # model.add(Dropout(0.25)) # model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add(Dense(2, activation="softmax")) opt = keras.optimizers.Adam() model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model.summary() return model
def softwareDefectCNN1D(): ''' CNN model with 1D convolutional layers ''' classifier = Sequential() classifier.add(Conv1D(96, 1, input_shape=(1200,1), activation='relu')) classifier.add(MaxPool1D(pool_size= 1, strides=2)) #classifier.add(BatchNormalization()) classifier.add(Conv1D(256, 1, activation='relu')) classifier.add(MaxPool1D(pool_size= 1, strides=2)) # classifier.add(BatchNormalization()) classifier.add(Conv1D(384, 1, activation='relu')) classifier.add(MaxPool1D(pool_size= 1, strides=2)) #classifier.add(BatchNormalization()) #classifier.add(Conv2D(384, 1, activation='relu')) #classifier.add(MaxPooling2D(pool_size=(1, 1), strides=2)) #classifier.add(BatchNormalization()) classifier.add(Conv1D(256, 1, activation='relu')) classifier.add(Flatten()) classifier.add(Dense(1024, activation='relu')) classifier.add(Dropout(0.6)) classifier.add(Dense(512, activation='relu')) classifier.add(Dropout(0.6)) classifier.add(Dense(64, activation='relu')) classifier.add(Dropout(0.6)) classifier.add(Dense(64, activation='relu')) classifier.add(Dropout(0.6)) classifier.add(Dense(16, activation='relu')) classifier.add(Dropout(0.6)) classifier.add(Dense(1, activation='sigmoid')) return classifier '''
def buildNets1DLSTM(self): """ According to below lists, this function will build and compile several versions of a 1d convolutional neural network followed by a LSTM """ models = [] denseLayers = [0] CNNLayers = [3] filters = [128] for dense in denseLayers: for CNNLayer in CNNLayers: for filt in filters: nameOfModel = "{}-conv-{}-filter-{}-dense-{}".format( CNNLayer, filt, dense, int(time.time())) model = Sequential() model.add( Conv1D(input_shape=( 128, 3, ), kernel_size=(3), padding="valid", filters=filt)) model.add(Activation("elu")) model.add(BatchNormalization()) model.add(MaxPool1D(pool_size=2, padding="valid")) for _ in range(CNNLayer): model.add( Conv1D(kernel_size=(2), padding="valid", filters=filt)) model.add(Activation("elu")) model.add(BatchNormalization()) model.add(MaxPool1D(pool_size=2, padding="valid")) #model.add(Flatten()) model.add(LSTM(100)) for _ in range(dense): model.add(Dense(filt)) model.add(Activation("elu")) model.add(Dense(self.nClasses, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #model.summary() keyVals = {"model": model, "name": nameOfModel} models.append(keyVals) self.models = models
def get_model(self): inp = Input(shape=(187, 1)) img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp) img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = MaxPool1D(pool_size=2)(img_1) img_1 = Dropout(rate=0.1)(img_1) img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1) img_1 = GlobalMaxPool1D()(img_1) img_1 = Dropout(rate=0.2)(img_1) dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1) dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1) dense_1 = Dense(self.output_dim, activation=self.last_activation, name="dense_3_mitbih")(dense_1) model = models.Model(inputs=inp, outputs=dense_1, name=self.name) return model
def build_deep_enhancers(window_size: int, nucleotides_number: int = 4) -> Sequential: """Return Deep Enhancers fixed model. Parameters -------------------------- window_size: int, Window size of the nucleotides windows. nucleotides_number: int, Number of nucleotides considered in each window. By default, the value is 4. Returns -------------------------- DeepEnhancer model. References -------------------------- https://www.nature.com/articles/nmeth.2987 """ model = Sequential([ Input((window_size, nucleotides_number)), Conv1D(filters=128, kernel_size=8), BatchNormalization(), Activation("relu"), Conv1D(filters=128, kernel_size=8), BatchNormalization(), Activation("relu"), MaxPool1D(pool_size=2), Conv1D(filters=64, kernel_size=3), BatchNormalization(), Activation("relu"), Conv1D(filters=64, kernel_size=3), BatchNormalization(), Activation("relu"), MaxPool1D(pool_size=2), Flatten(), Dense(units=256, activation="relu"), Dropout(rate=0.1), Dense(units=128, activation="relu"), Dropout(rate=0.1), Dense(units=1, activation="sigmoid"), ], name="DeepEnhancer") model.compile(optimizer="nadam", loss="binary_crossentropy", metrics=get_model_metrics()) return model
def buildNets1D(self): models = [] denseLayers = [0, 1, 2] CNNLayers = [1, 2, 3, 4, 5] filters = [2, 4, 8, 16, 32, 64] for dense in denseLayers: for CNNLayer in CNNLayers: for filt in filters: nameOfModel = "{}-conv-{}-filter-{}-dense-{}".format( CNNLayer, filt, dense, int(time.time())) model = Sequential() model.add( Conv1D(input_shape=( 128, 3, ), kernel_size=(3), padding="valid", filters=filt)) model.add(Activation("elu")) model.add(BatchNormalization()) model.add(MaxPool1D(pool_size=2, padding="valid")) for _ in range(CNNLayer): model.add( Conv1D(kernel_size=(2), padding="valid", filters=filt)) model.add(Activation("elu")) model.add(BatchNormalization()) model.add(MaxPool1D(pool_size=2, padding="valid")) model.add(Flatten()) for _ in range(dense): model.add(Dense(filt)) model.add(Activation("elu")) model.add(Dense(self.nClasses, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) keyVals = {"model": model, "name": nameOfModel} models.append(keyVals) self.models = models
def build(self, input_shape) -> None: self.embedding: Embedding = Embedding(input_dim=self.vocabulary_size, output_dim=self.embedding_size, input_length=self.sentence_len, trainable=True) self.conv_1: Conv1D = Conv1D(filters=self.conv_filter, kernel_size=3, activation="relu", name="conv_1") self.conv_2: Conv1D = Conv1D(filters=self.conv_filter, kernel_size=4, activation="relu", name="conv_2") self.conv_3: Conv1D = Conv1D(filters=self.conv_filter, kernel_size=5, activation="relu", name="conv_3") if not self.global_max_pool: self.pool_1: MaxPool1D = MaxPool1D(pool_size=self.pool_size, strides=1, name="pool_1") self.pool_2: MaxPool1D = MaxPool1D(pool_size=self.pool_size, strides=1, name="pool_2") self.pool_3: MaxPool1D = MaxPool1D(pool_size=self.pool_size, strides=1, name="pool_3") else: self.pool_1: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_1") self.pool_2: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_2") self.pool_3: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_3") self.concatenate: Concatenate = Concatenate(axis=1) self.flatten: Flatten = Flatten() self.dropout_1: Dropout = Dropout(self.drop_rate, name="dropout_1") self.dense1 = Dense(self.dense_size, activation="sigmoid", kernel_regularizer=regularizers.l1_l2( self.l1_regularization, self.l2_regularization)) self.dropout_2: Dropout = Dropout(self.drop_rate, name="dropout_2") self.dense: Dense = Dense(self.class_num, activation="softmax", kernel_regularizer=regularizers.l1_l2( self.l1_regularization, self.l2_regularization)) super(TextCNN, self).build(input_shape)
def encoder(seq_dim=3): in_seq_input = Input((None, seq_dim), name='in_sequence') in_seq = Bidirectional(GRU(128, return_sequences=True), name="gru_1")(in_seq_input) in_seq = MaxPool1D(pool_size=10)(in_seq) in_seq = Bidirectional(GRU(256, return_sequences=True), name="gru_2")(in_seq) in_seq = MaxPool1D(pool_size=10)(in_seq) in_seq = Bidirectional(GRU(1024, return_sequences=True), name="gru_3")(in_seq) model = Model(in_seq_input, in_seq) return model
def build_model(self): sequence_input = Input(shape=(self.seq, self.features), dtype='float32', name='sequence_input') conv_a = Conv1D(self.params['conv1'], kernel_size=3, activation='relu', padding='same')(sequence_input) conv_a = Conv1D(self.params['conv2'], kernel_size=3, activation='relu', padding='same')(conv_a) conv_a = Conv1D(self.params['conv3'], kernel_size=3, activation='relu', padding='same')(conv_a) maxp = MaxPool1D(2, 2)(conv_a) conv_a = Conv1D(self.params['conv4'], kernel_size=3, activation='relu', padding='same')(maxp) conv_a = Conv1D(self.params['conv5'], kernel_size=3, activation='relu', padding='same')(conv_a) conv_a = Conv1D(self.params['conv6'], kernel_size=3, activation='relu', padding='same')(conv_a) maxp = MaxPool1D(2, 2)(conv_a) flt = Flatten()(maxp) fc1 = Dense(self.params['d1'], activation='relu')(flt) fc2 = Dense(self.params['d2'], activation='relu')(fc1) fc3 = Dense(self.params['classes'], activation='softmax')(fc2) model = Model(sequence_input, fc3) adam = optimizers.Adam(lr=self.params['lr']) model.compile(loss=self.loss.focal_loss(), optimizer=self.params['op'], metrics=['acc']) return model
def create_cnn(self, params, index): input = keras.Input(batch_shape=(params[index]["batch_size"], params[index]["max_lines"], params[index]["max_line_length"], self.input_embedding_size), name='place_holder_input') conv = Conv2D(128, [1, 30], strides=[1, 1], padding="same", activation="relu", name='conv_1')(input) conv = Conv2D(128, [1, 15], strides=[1, 2], padding="same", activation="relu", name='conv_2')(conv) conv = Conv2D(128, [1, 7], strides=[1, 2], padding="same", activation="relu", name='conv_3')(conv) conv = Conv2D(128, [5, 30], strides=[1, 999], padding="same", activation="relu", name='conv_4')(conv) conv = K.squeeze(conv, 2) if not params[index]['maxpool']: conv = Conv1D(128, 10, strides=1, padding="same", activation="relu", name='conv_5')(conv) conv = Conv1D(128, 4, strides=2, padding="same", activation="relu", name='conv_6')(conv) conv = Conv1D(128, 2, strides=1, padding="same", activation="relu", name='conv_7')(conv) else: conv = MaxPool1D(2, padding="valid", name="max_pool")(conv) conv = Flatten()(conv) return keras.Model(input, conv, name="siamese_cnn")
def build_encoder(n_input_dim, n_encoding_dim, n_conv_block, n_conv_layers, n_conv_filters, conv_filter_size, n_dense_layers, n_dense_units, activation, batch_norm=False, l2_lambda=0, dropout_prob=0): ## build model graph # convolution part input_op = Input([FEATURE_VEC_LEN, n_input_dim]) x = input_op for i in range(n_conv_block): x = conv_block(n_conv_filters[i], conv_filter_size[i], n_conv_layers[i], activation, batch_norm, l2_lambda, dropout_prob)(x) x = MaxPool1D((2, ))(x) x = GlobalAvgPool1D()(x) # dense part x = dense_block(n_dense_units, n_dense_layers, activation, batch_norm, l2_lambda, dropout_prob)(x) # produce encoding encoding = Dense(n_encoding_dim)(x) return Model(input_op, encoding)
def block(layer, fs, ks, ps): layer = Conv1D(filters=fs, kernel_size=ks, padding="same")(layer) layer = BatchNormalization()(layer) layer = Activation('relu')(layer) layer = MaxPool1D(pool_size=ps, padding='same')(layer) layer = Dropout(0.25)(layer) return layer