def create_model(input_shape, init): """ CNN model. Arguments: input_shape -- the shape of our input init -- the weight initialization Returns: CNN model """ x = inp(shape=input_shape) x1 = Conv2D(32, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(32, 1, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x1) x2 = BatchNormalization()(x2) x3 = Concatenate()([x, x2]) l = Reshape((-1, 256))(x2) l1 = LSTM(256, return_sequences=True, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(l) # l1 = Dropout(0.5)(l1) l2 = LSTM(191, return_sequences=False, go_backwards=True, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(l1) l2 = Dropout(0.5)(l2) x4 = Conv2D(64, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x3) x4 = BatchNormalization()(x4) x5 = Conv2D(64, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x4) x5 = BatchNormalization()(x5) x6 = Concatenate()([x3, x5]) x7 = Conv2D(96, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x6) x7 = BatchNormalization()(x7) x8 = Conv2D(96, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x7) x8 = BatchNormalization()(x8) x9 = Concatenate()([x6, x8]) x10 = Conv2D(128, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x9) x10 = BatchNormalization()(x10) x11 = Conv2D(128, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x10) # x8 = Concatenate()([x4,x6]) x11 = BatchNormalization()(x11) x12 = Concatenate()([x9, x11]) x13 = GlobalAveragePooling2D()(x12) x14 = Concatenate()([x13, l2]) x14 = Reshape((-1, 128))(x14) x15 = LSTM(1024, return_sequences=True, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(x14) # x15 = Dropout(0.5)(x15) x16 = LSTM(1024, go_backwards=True, return_sequences=False, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(x15) x17 = Dropout(0.5)(x16) x18 = Dense(1, activation='sigmoid', kernel_initializer=init)(x17) model = Model(inputs=x, outputs=x18) return model
def create_model(input_shape, init): """ CNN model. Arguments: input_shape -- the shape of our input init -- the weight initialization Returns: CNN model """ ''' model = Sequential() model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Conv2D(32, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(128, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)) #model.add(Dense(32, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)) #model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid', kernel_initializer = init)) return model ''' x = inp(shape=input_shape) x1 = Conv2D(16, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x) x2 = Conv2D(16, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x1) x3 = Concatenate()([x,x2]) x4 = Conv2D(32, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x3) x5 = Conv2D(32, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x4) x6 = Concatenate()([x3,x5]) x7 = Conv2D(32, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x6) x8 = Conv2D(32, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x7) x9 = Concatenate()([x6,x8]) x10 = Conv2D(64, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x9) x11 = Conv2D(64, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x10) #x8 = Concatenate()([x4,x6]) x12 = Concatenate()([x9,x11]) x13 = GlobalAveragePooling2D()(x12) x14 = Flatten()(x13) #x9 = BatchNormalization()(x8) #x11 = Dropout(0.5)(x8) #x12 = Dense(128, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)(x10) #x11 = BatchNormalization()(x10) #x11 = Dropout(0.5)(x10) #x12 = Dense(512, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)(x11) #x13 = BatchNormalization()(x12) x15 = Dropout(0.5)(x14) x16 = Dense(1, activation='sigmoid', kernel_initializer = init)(x15) model = Model(inputs=x, outputs=x16) return model
def create_model(input_shape, init): """ CNN model. Arguments: input_shape -- the shape of our input init -- the weight initialization Returns: CNN model """ num_heads = 2 # Number of attention heads ff_dim = 1024 # Hidden layer size in feed forward network inside transformer x = inp(shape=input_shape) x1 = Conv2D(32, 3, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(32, 1, activation="relu", kernel_initializer=init, bias_regularizer='l2', padding='same')(x1) x2 = BatchNormalization()(x2) x3 = Concatenate()([x, x2]) t3 = Reshape((-1, 126))(x3) transformer_block = TransformerBlock(126, num_heads, ff_dim) x4 = transformer_block(t3) x5 = GlobalAveragePooling1D()(x4) x6 = Dropout(0.5)(x5) x7 = Dense(1, activation='sigmoid', kernel_initializer=init)(x6) model = Model(inputs=x, outputs=x7) return model
def create_model(input_shape, init): """ CNN model. Arguments: input_shape -- the shape of our input init -- the weight initialization Returns: CNN model """ x = inp(shape=input_shape) x1 = Conv2D(32, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(32, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x1) x2 = BatchNormalization()(x2) x3 = Concatenate()([x,x2]) x4 = Conv2D(64, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x3) x4 = BatchNormalization()(x4) x5 = Conv2D(64, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x4) x5 = BatchNormalization()(x5) x6 = Concatenate()([x3,x5]) x7 = Conv2D(96, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x6) x7 = BatchNormalization()(x7) x8 = Conv2D(96, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x7) x8 = BatchNormalization()(x8) x9 = Concatenate()([x6,x8]) x10 = Conv2D(128, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x9) x10 = BatchNormalization()(x10) x11 = Conv2D(128, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x10) #x8 = Concatenate()([x4,x6]) x11 = BatchNormalization()(x11) x12 = Concatenate()([x9,x11]) x13 = GlobalAveragePooling2D()(x12) x14 = Flatten()(x13) x14 = Reshape((-1,107))(x14) x15 = LSTM(1024, return_sequences=True, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(x14) x16 = LSTM(1024, go_backwards=True, return_sequences=False, kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)(x15) # model = Sequential() # model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', kernel_initializer = init, bias_regularizer='l2', input_shape=input_shape)) # model.add(BatchNormalization()) # model.add(MaxPooling2D(pool_size=(2,2))) # model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) # model.add(BatchNormalization()) # model.add(Conv2D(64, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2')) # model.add(Conv2D(96, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) # model.add(BatchNormalization()) # model.add(Conv2D(96, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2')) # model.add(GlobalAveragePooling2D()) # model.add(Flatten()) # model.add(Reshape((-1,8))) # model.add(Dropout(0.3)) # model.add(LSTM(512, return_sequences=True, # kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)) # model.add(LSTM(512, go_backwards=True, return_sequences=False, # kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)) #model.add(LSTM(512, return_sequences=False, # kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)) #model.add(LSTM(512, go_backwards=True, return_sequences=False, # kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5)) #model.add(BatchNormalization()) x17 = Dropout(0.5)(x16) x18 = Dense(1, activation='sigmoid', kernel_initializer = init)(x17) model = Model(inputs=x, outputs=x18) return model '''
def create_model(input_shape, init): """ CNN model. Arguments: input_shape -- the shape of our input init -- the weight initialization Returns: CNN model """ ''' model = Sequential() model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Conv2D(32, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2')) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(128, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)) #model.add(Dense(32, kernel_regularizer = 'l2', activation='relu', kernel_initializer = init)) #model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid', kernel_initializer = init)) return model ''' x = inp(shape=input_shape) # Initial Layers x1 = Conv2D(128, 5, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x) x2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1) x3 = BatchNormalization()(x2) # Dense Block 1 x3_input = x3 x4 = Conv2D(256, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x3_input) x5 = BatchNormalization()(x4) x6 = Conv2D(256, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x5) x7 = BatchNormalization()(x6) x8 = Conv2D(256, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x7) x9 = BatchNormalization()(x8) x10 = Conv2D(256, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x9) x11 = BatchNormalization()(x10) # Transition Layer 1 x11_input = Concatenate()([x3,x11]) x12 = Conv2D(512,1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x11_input) x13 = MaxPooling2D(pool_size=(2, 2), strides=2)(x12) # Dense Block 2 x14 = Conv2D(512, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x13) x15 = BatchNormalization()(x14) x16 = Conv2D(512, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x15) x17 = BatchNormalization()(x16) x18 = Conv2D(512, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x17) x19 = BatchNormalization()(x18) x20 = Conv2D(512, 3, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x19) x21 = BatchNormalization()(x20) # Transition Layer 2 x21_input = Concatenate()([x13,x21]) x22 = Conv2D(1024, 1, activation="relu", kernel_initializer = init, bias_regularizer='l2', padding='same')(x21_input) x23 = BatchNormalization()(x22) # Final Layer x24 = GlobalAveragePooling2D()(x23) x25 = Flatten()(x24) x26 = Dense(1, activation='sigmoid', kernel_initializer = init)(x25) model = Model(inputs=x, outputs=x26) return model