def create_keras_model(inputShape, nClasses, output_activation='linear'): """ SegNet model ---------- inputShape : tuple Tuple with the dimensions of the input data (ny, nx, nBands). nClasses : int Number of classes. """ filter_size = 64 kernel = (3, 3) pad = (1, 1) pool_size = (2, 2) inputs = Input(shape=inputShape, name='image') # Encoder x = Conv2D(64, kernel, padding='same')(inputs) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Decoder x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(64, kernel, padding='same')(x) x = BatchNormalization()(x) x = Conv2D(nClasses, (1, 1), padding='valid')(x) outputs = Activation(output_activation, name='output')(x) model = Model(inputs=inputs, outputs=outputs, name='segnet') return model
def build_generator(channels, num_classes, latent_dim): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) #7x7x128 model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) #14x14x128 model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) #28x28x64 model.add(Conv2D(channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) #28x28x3 model.summary() noise = Input(shape=(latent_dim, )) label = Input(shape=(1, ), dtype='int32') label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
def modelDecode(cae, filterSize, poolSize, gpus): if gpus > 1: cae = cae.layers[-2] # initialize decoder decode = Sequential() decode.add( Dense(128 * 4 * 4, input_dim=(1024), weights=cae.layers[18].get_weights())) decode.add(Activation('relu')) decode.add(Reshape((128, 4, 4))) decode.add(Activation('relu')) decode.add(UpSampling2D(size=(poolSize, poolSize))) decode.add( Convolution2D(64, (filterSize, filterSize), padding='same', weights=cae.layers[23].get_weights())) decode.add(Activation('relu')) decode.add(UpSampling2D(size=(poolSize, poolSize))) decode.add( Convolution2D(32, (filterSize, filterSize), padding='same', weights=cae.layers[26].get_weights())) decode.add(Activation('relu')) decode.add(UpSampling2D(size=(poolSize, poolSize))) decode.add( Convolution2D(16, (filterSize, filterSize), padding='same', weights=cae.layers[29].get_weights())) decode.add(Activation('relu')) decode.add(UpSampling2D(size=(poolSize, poolSize))) decode.add( Convolution2D(8, (filterSize, filterSize), padding='same', weights=cae.layers[32].get_weights())) decode.add(Activation('relu')) decode.add(UpSampling2D(size=(poolSize, poolSize))) decode.add( Convolution2D(3, (filterSize, filterSize), padding='same', weights=cae.layers[35].get_weights())) decode.add(Activation('sigmoid')) if gpus > 1: decode = multi_gpu_model(decode, gpus=gpus) decode.compile(loss='mse', optimizer='adam') return decode
def UpSampling(ndim=2,*args, **kwargs): if ndim==2: return UpSampling2D(*args, **kwargs) elif ndim==3: return UpSampling3D(*args, **kwargs) else: raise ValueError("ndim must be 2 or 3")
def generator_model(noise_dim, feature_dim): noise_input = keras.layers.Input(shape=(noise_dim,)) eeg_input = keras.layers.Input(shape=(feature_dim,)) x = MoGLayer(kernel_initializer=RandomUniform(minval=-0.2, maxval=0.2), bias_initializer=RandomUniform(minval=-1.0, maxval=1.0), kernel_regularizer=l2(0.01))(noise_input) x = keras.layers.concatenate([x, eeg_input]) x = Dense(1024, activation="tanh")(x) x = BatchNormalization(momentum=0.8)(x) x = Dense(128 * 7 * 7, activation="tanh")(x) x = Reshape((7, 7, 128))(x) x = UpSampling2D()(x) x = BatchNormalization(momentum=0.8)(x) x = Conv2D(64, kernel_size=5, padding="same", activation="tanh")(x) x = UpSampling2D()(x) x = Conv2D(1, kernel_size=3, padding="same")(x) output = Activation("tanh")(x) return Model(inputs=[noise_input, eeg_input], outputs=[output])
def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4, bn=True): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = BatchNormalization(momentum=0.8)(u) u = Concatenate()([u, skip_input]) return u # Image input d0 = Input(shape=self.img_shape) # Downsampling d1 = conv2d(d0, self.gf, bn=False) d2 = conv2d(d1, self.gf * 2) d3 = conv2d(d2, self.gf * 4) d4 = conv2d(d3, self.gf * 8) d5 = conv2d(d4, self.gf * 8) d6 = conv2d(d5, self.gf * 8) d7 = conv2d(d6, self.gf * 8) # Upsampling u1 = deconv2d(d7, d6, self.gf * 8) u2 = deconv2d(u1, d5, self.gf * 8) u3 = deconv2d(u2, d4, self.gf * 8) u4 = deconv2d(u3, d3, self.gf * 4) u5 = deconv2d(u4, d2, self.gf * 2) u6 = deconv2d(u5, d1, self.gf) u7 = UpSampling2D(size=2)(u6) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7) return Model(d0, output_img)
def build_autoencoder(self): input_img = Input(shape=(128, 128, 3)) x = Conv2D(8, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Dropout(0.2)(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(64, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(128, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = BatchNormalization(momentum = 0.8)(x) x = Dropout(0.2)(x) x = Flatten()(x) encoded = Dense(8192)(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Dense(8192)(encoded) x = Reshape((8,8,128))(x) x = Conv2D(128, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(64, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = BatchNormalization(momentum = 0.8)(x) x = Dropout(0.2)(x) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = BatchNormalization(momentum = 0.8)(x) x = Conv2D(16, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = BatchNormalization(momentum = 0.8)(x) x = Dropout(0.2)(x) decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = Model(input_img, decoded) return autoencoder
def up_sampling_block(model, kernal_size, filters, strides): model = Conv2D(filters=filters, kernel_size=kernal_size, strides=strides, padding="same")(model) model = UpSampling2D(size=2)( model) # *Conv2D and UpSampling2D* or Conv2DTranspose model = LeakyReLU(alpha=0.2)(model) return model
def up_sampling_block(model, kernal_size, filters, strides): # In place of Conv2D and UpSampling2D we can also use Conv2DTranspose (Both are used for Deconvolution) # Even we can have our own function for deconvolution (i.e one made in Utils.py) # model = Conv2DTranspose(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model) model = Conv2D(filters=filters, kernel_size=kernal_size, strides=strides, padding="same")(model) model = UpSampling2D(size=2)(model) model = LeakyReLU(alpha=0.2)(model) return model
def G_block(x, w, noise_inp, filter): hidden = UpSampling2D()(x) hidden = Conv2D(filter, (3, 3), padding='same')(hidden) noise = B_block(noise_inp, filter, hidden.shape[1]) y_s, y_b = A_block(w, filter) hidden = Add()([hidden, noise]) hidden = AdaIN()([hidden, y_b, y_s]) hidden = Activation('relu')(hidden) hidden = Conv2D(filter, (3, 3), padding='same')(hidden) noise = B_block(noise_inp, filter, hidden.shape[1]) y_s, y_b = A_block(w, filter) hidden = Add()([hidden, noise]) hidden = AdaIN()([hidden, y_b, y_s]) x_out = Activation('relu')(hidden) to_rgb = Conv2D(3, (1, 1), padding='same')(x_out) to_rgb = UpSampling2D(size=(int(256 / hidden.shape[1]), int(256 / hidden.shape[1])))(to_rgb) return x_out, to_rgb
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layer used during upsampling""" output = UpSampling2D(size=2)(layer_input) output = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(output) if dropout_rate: output = layers.Dropout(dropout_rate)(output) output = InstanceNormalization()(output) output = layers.Concatenate()([output, skip_input]) return output
def build_generator(self): model = Sequential() model.add( Dense(235 * 2 * 1, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((2, 235, 1))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, 2, strides=4)) model.add(Activation("relu")) model.summary() noise = Input(shape=(self.latent_dim, )) img = model(noise) return Model(noise, img)
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = BatchNormalization(momentum=0.8)(u) u = Concatenate()([u, skip_input]) return u
def build_generator(self): """U-Net Generator""" d_0 = layers.Input(shape=(self.img_rows, self.img_cols, self.channels)) # Downsampling d_1 = self.conv2d(d_0, self.generator_filters) d_2 = self.conv2d(d_1, self.generator_filters * 2) d_3 = self.conv2d(d_2, self.generator_filters * 4) d_4 = self.conv2d(d_3, self.generator_filters * 8) # Upsampling u_1 = self.deconv2d(d_4, d_3, self.generator_filters * 4) u_2 = self.deconv2d(u_1, d_2, self.generator_filters * 2) u_3 = self.deconv2d(u_2, d_1, self.generator_filters) u_4 = UpSampling2D(size=2)(u_3) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u_4) return Model(d_0, output_img)
def modelCAE(filterSize, poolSize, sampSize, gpus, weights=None): #strategy = tf.distribute.MirroredStrategy() #with strategy.scope(): # initialize cae cae = Sequential() # convolution + pooling 1 cae.add( Convolution2D(8, (filterSize, filterSize), input_shape=(3, sampSize, sampSize), padding='same')) cae.add(MaxPooling2D(pool_size=(poolSize, poolSize))) cae.add(Activation('relu')) # convolution + pooling 2 cae.add(Convolution2D(16, (filterSize, filterSize), padding='same')) cae.add(MaxPooling2D(pool_size=(poolSize, poolSize))) cae.add(Activation('relu')) # convolution + pooling 3 cae.add(Convolution2D(32, (filterSize, filterSize), padding='same')) cae.add(MaxPooling2D(pool_size=(poolSize, poolSize))) cae.add(Activation('relu')) # convolution + pooling 4 cae.add(Convolution2D(64, (filterSize, filterSize), padding='same')) cae.add(MaxPooling2D(pool_size=(poolSize, poolSize))) cae.add(Activation('relu')) # convolution + pooling 5 cae.add(Convolution2D(128, (filterSize, filterSize), padding='same')) cae.add(MaxPooling2D(pool_size=(poolSize, poolSize))) cae.add(Activation('relu')) # dense network cae.add(Flatten()) cae.add(Dense(1024)) cae.add(Activation('relu')) cae.add(Dense(128 * 4 * 4)) cae.add(Activation('relu')) cae.add(Reshape((128, 4, 4))) cae.add(Activation('relu')) # unpooling + deconvolution 1 cae.add(UpSampling2D(size=(poolSize, poolSize))) cae.add(Convolution2D(64, (filterSize, filterSize), padding='same')) cae.add(Activation('relu')) # unpooling + deconvolution 2 cae.add(UpSampling2D(size=(poolSize, poolSize))) cae.add(Convolution2D(32, (filterSize, filterSize), padding='same')) cae.add(Activation('relu')) # unpooling + deconvolution 3 cae.add(UpSampling2D(size=(poolSize, poolSize))) cae.add(Convolution2D(16, (filterSize, filterSize), padding='same')) cae.add(Activation('relu')) # unpooling + deconvolution 4 cae.add(UpSampling2D(size=(poolSize, poolSize))) cae.add(Convolution2D(8, (filterSize, filterSize), padding='same')) cae.add(Activation('relu')) # final unpooling + deconvolution cae.add(UpSampling2D(size=(poolSize, poolSize))) cae.add(Convolution2D(3, (filterSize, filterSize), padding='same')) cae.add(Activation('sigmoid')) # ADDITION -DM # compile and load pretrained weights if gpus > 1: cae = multi_gpu_model(cae, gpus=gpus) cae.compile(loss='mse', optimizer=Adam(lr=0.0005, decay=1e-5)) if weights: #print('loading pretrained weights') cae.load_weights(weights) return cae
def get_model(img_size, num_classes, nrInputChannels): inputs = keras.Input( shape=img_size + (nrInputChannels, ) ) #Change this dependant on nr of input channels MUST MATCH GENERATOR nr_kernels_first_layer = 32 kernel_init = 'GlorotUniform' #downsampling/encoder ec1 = layers.Conv2D(nr_kernels_first_layer, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(inputs) ec1 = layers.Conv2D(nr_kernels_first_layer, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(ec1) p1 = layers.MaxPooling2D((2, 2))(ec1) # dims (256, 256, 16) ec2 = layers.Conv2D(nr_kernels_first_layer * 2, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(p1) ec2 = layers.Conv2D(nr_kernels_first_layer * 2, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(ec2) p2 = layers.MaxPooling2D((2, 2))(ec2) # dims (None, 128, 128, 32) ec3 = layers.Conv2D(nr_kernels_first_layer * 4, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(p2) ec3 = layers.Conv2D(nr_kernels_first_layer * 4, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(ec3) p3 = layers.MaxPooling2D((2, 2))(ec3) #dims (None, 64, 64, 64) ec4 = layers.Conv2D(nr_kernels_first_layer * 8, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(p3) ec4 = layers.Conv2D(nr_kernels_first_layer * 8, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(ec4) ## ekstra lag p4 = layers.MaxPooling2D((2, 2))(ec4) #dims (None, 64, 64, 64) ec5 = layers.Conv2D(nr_kernels_first_layer * 16, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(p4) ec5 = layers.Conv2D(nr_kernels_first_layer * 16, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(ec5) ##Upsampling/decoder #convolution and upsampling block u4 = layers.Conv2D(nr_kernels_first_layer * 8, (3, 3), padding='same', kernel_initializer=kernel_init)(ec5) u4 = UpSampling2D((2, 2))(u4) # dims (None, 128, 128, 64) u4 = layers.concatenate([u4, ec4]) dc4 = layers.Conv2D(nr_kernels_first_layer * 8, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(u4) dc4 = layers.Conv2D(nr_kernels_first_layer * 8, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(dc4) u3 = layers.Conv2D(nr_kernels_first_layer * 4, (3, 3), padding='same', kernel_initializer=kernel_init)(dc4) u3 = UpSampling2D((2, 2))(u3) # dims (None, 128, 128, 64) u3 = layers.concatenate([u3, ec3]) dc3 = layers.Conv2D(nr_kernels_first_layer * 4, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(u3) dc3 = layers.Conv2D(nr_kernels_first_layer * 4, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(dc3) #convolution and upsampling block u2 = layers.Conv2D(nr_kernels_first_layer * 2, (3, 3), padding='same', kernel_initializer=kernel_init)(dc3) u2 = UpSampling2D((2, 2))(u2) # dims (None, 256, 256, 32) u2 = layers.concatenate([u2, ec2]) dc2 = layers.Conv2D(nr_kernels_first_layer * 2, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(u2) dc2 = layers.Conv2D(nr_kernels_first_layer * 2, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(dc2) #convolution and upsampling block u1 = layers.Conv2D(nr_kernels_first_layer, (3, 3), padding='same', kernel_initializer=kernel_init)(dc2) u1 = UpSampling2D((2, 2))(u1) # dims (None, 512, 512, 16) u1 = layers.concatenate([u1, ec1]) dc1 = layers.Conv2D(nr_kernels_first_layer, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(u1) dc1 = layers.Conv2D(nr_kernels_first_layer, (3, 3), activation='relu', padding='same', kernel_initializer=kernel_init)(dc1) outputs = layers.Conv2D(num_classes, (1, 1), activation='sigmoid')(dc1) model = Model(inputs=[inputs], outputs=[outputs]) return model
def segnet(inputShape, nClasses, learning_rate): """ SegNet model ---------- inputShape : tuple Tuple with the dimensions of the input data (ny, nx, nBands). nClasses : int Number of classes. """ filter_size = 64 kernel = (3, 3) pad = (1, 1) pool_size = (2, 2) inputs = Input(shape=inputShape) # Encoder x = Conv2D(64, kernel, padding='same')(inputs) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Decoder x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(64, kernel, padding='same')(x) x = BatchNormalization()(x) x = Conv2D(nClasses, (1, 1), padding='valid')(x) outputs = Activation('softmax')(x) model = Model(inputs=inputs, outputs=outputs, name='segnet') ## Compile Keras model model.compile(loss='mse', optimizer=Adam(lr=learning_rate), metrics=['accuracy']) return model
def __init__(self, filters, kernel_size, octave=2, ratio_out=0.5, strides=(1, 1), data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(OctaveConv2D, self).__init__(**kwargs) self.filters = filters self.kernel_size = kernel_size self.octave = octave self.ratio_out = ratio_out self.strides = strides self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = dilation_rate self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.filters_low = int(filters * self.ratio_out) self.filters_high = filters - self.filters_low self.conv_high_to_high, self.conv_low_to_high = None, None if self.filters_high > 0: self.conv_high_to_high = self._init_conv( self.filters_high, name='{}-Conv2D-HH'.format(self.name)) self.conv_low_to_high = self._init_conv(self.filters_high, name='{}-Conv2D-LH'.format( self.name)) self.conv_low_to_low, self.conv_high_to_low = None, None if self.filters_low > 0: self.conv_low_to_low = self._init_conv(self.filters_low, name='{}-Conv2D-HL'.format( self.name)) self.conv_high_to_low = self._init_conv(self.filters_low, name='{}-Conv2D-LL'.format( self.name)) self.pooling = AveragePooling2D( pool_size=self.octave, padding='valid', data_format=data_format, name='{}-AveragePooling2D'.format(self.name), ) self.up_sampling = UpSampling2D( size=self.octave, data_format=data_format, interpolation='nearest', name='{}-UpSampling2D'.format(self.name), )
def CAC_2(input_shape=(1, 1024, 4)): model = Sequential() model.add( Conv2D(10, 11, strides=1, padding='same', activation='relu', name='conv1', input_shape=input_shape)) #model.add(MaxPooling2D(pool_size=(1,4))) model.add(Conv2D(10, 11, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(1, 64))) model.add(Conv2D(10, 11, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(1, 4))) model.add(Flatten()) model.add(Dense(units=10)) model.add(Dense(units=80, activation='relu')) model.add(Reshape((1, 16, 5))) model.add(UpSampling2D(size=(1, 4))) model.add( Conv2D(5, 11, strides=(1, 4), padding='same', activation='relu', name='deconv3')) #model.add(UpSampling2D(size=(1,4))) #model.add( Conv2DTranspose(5, 11, strides=(1,4), padding='same', activation='relu') ) #model.add(UpSampling2D(size=(1,4))) #model.add(Conv2DTranspose(4, 11, strides=(1,4), padding='same', activation='relu')) model.summary() #return 0 input_layer = Input(shape=input_shape) x = Conv2D(10, 11, strides=1, padding='same', activation='relu', input_shape=input_shape)(input_layer) #x=BatchNormalization(axis= -1)(x) #x=Activation('relu')(x) #x=MaxPooling2D(pool_size=(1,4))(x) x = Conv2D(10, 11, strides=1, padding='same', activation='relu')(x) #x=BatchNormalization(axis= -1)(x) #x=Activation('relu')(x) x = MaxPooling2D(pool_size=(1, 64))(x) #x = Conv2D(5, 11, strides=1, padding='same', activation='relu', input_shape=input_shape)(x) #x=BatchNormalization(axis= -1)(x) #x=Activation('relu')(x) #x=MaxPooling2D(pool_size=(1,4))(x) x = Flatten()(x) encoded = Dense(units=10)(x) x = Dense(units=40, activation='relu')(encoded) x = Reshape((1, 16, 10))(x) #x=UpSampling2D(size=(1,4))(x) x = Conv2DTranspose(10, 11, strides=(1, 64), padding='same', activation='relu')(x) #x=UpSampling2D(size=(1,4))(x) #x = Conv2DTranspose(5, 11, strides=(1,1), padding='same', activation='relu')(x) #x=UpSampling2D(size=(1,4))(x) #x = Conv2DTranspose(5, 11, strides=(1,1), padding='same', activation='relu')(x) decoded = Conv2DTranspose(4, 11, strides=(1, 1), padding='same', activation='sigmoid')(x) autoencoder = Model(input_layer, decoded) autoencoder.summary() encoder = Model(input_layer, encoded, name='encoder') ''' input_layer = Input(shape=input_shape) x = Conv2D(5, 11, strides=4, padding='same', activation='relu', name='conv1', input_shape=input_shape)(input_layer) x = Conv2D(64, 15, strides=4, padding='same', activation='relu', name='conv2', input_shape=input_shape)(x) x = Conv2D(128, 11, strides=4, padding='same', activation='relu', name='conv3', input_shape=input_shape)(x) x = Flatten()(x) encoded = Dense(units=10, name='embedding')(x) ### x = Dense(units=2048, activation='relu')(encoded) x = Reshape( (1, 16, 128) )(x) x = Conv2DTranspose(64, 15, strides=(1,4), padding='same', activation='relu', name='deconv3')(x) x = Conv2DTranspose(32, 15, strides=(1,4), padding='same', activation='relu', name='deconv2')(x) decoded = Conv2DTranspose(4, 15, strides=(1,4), padding='same', name='deconv1')(x) ### autoencoder = Model(input_layer, decoded) autoencoder.summary() encoder = Model(input_layer, encoded, name='encoder') encoder.summary() ''' simutation_parameters = { "PWM_file_1": "./JASPAR/MA0835.1.jaspar", "PWM_file_2": "./JASPAR/MA0515.1.jaspar", "seq_length": 1024, "center_pos": 100, "interspace": 10 } [train_X, train_Y, test_X, test_Y] = get_simulated_dataset(parameters=simutation_parameters, train_size=20000, test_size=5000) print(train_X.shape) ################################# # build autoencoder model autoencoder.compile(optimizer='adam', loss='binary_crossentropy') history_autoencoder = autoencoder.fit(x=train_X, y=train_X, batch_size=32, epochs=13, verbose=1, callbacks=[History()], validation_data=(test_X, test_X)) encoded_imgs = encoder.predict(test_X) print(encoded_imgs.shape) colors = ['#e41a1c', '#377eb8', '#4daf4a'] X_embedded = TSNE(n_components=2).fit_transform(encoded_imgs) plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=np.array(colors)[test_Y.flatten()]) plt.colorbar() plt.show()
def example_generator(): separate_dataset("True_target_with_labels_128.bed", ["chr1"], "valid.bed") separate_dataset("True_target_with_labels_128.bed", ["chr2", "chr19"], "test.bed") separate_dataset("True_target_with_labels_128.bed", [ "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr20", "chr21", "chr22" ], "train.bed") train_gen = DataGenerator( data_path="train.bed", ref_fasta= "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz", genome_size_file="./mm10.genome.size", epi_track_files=None, tasks=["TARGET"], upsample=False) valid_gen = DataGenerator( data_path="valid.bed", ref_fasta= "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz", genome_size_file="./mm10.genome.size", epi_track_files=None, tasks=["TARGET"], upsample=False) #model = initialize_model() # add functional models here input_shape = (1, 128, 4) input_layer = Input(shape=input_shape) x = Conv2D(40, 11, strides=1, padding='same', input_shape=input_shape)(input_layer) x = BatchNormalization(axis=-1)(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(1, 32))(x) encoded = Flatten()(x) x = Reshape((1, 4, 40))(encoded) x = UpSampling2D(size=(1, 32))(x) decoded = Conv2D(4, 11, padding='same', activation='sigmoid')(x) ### autoencoder = Model(input_layer, decoded) autoencoder.summary() encoder = Model(input_layer, encoded, name='encoder') encoder.summary() autoencoder.compile(optimizer='adam', loss='mse') encoder.compile(optimizer='adam', loss='mse') trainning_history = autoencoder.fit_generator( train_gen, validation_data=valid_gen, #steps_per_epoch=5000, #validation_steps=500, epochs=600, verbose=1, use_multiprocessing=True, workers=6, max_queue_size=200, callbacks=[ History(), ModelCheckpoint("ATAC_peak_autoencoder_32.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=False), CustomCheckpoint('ATAC_peak_encoder_32.h5', encoder) ])
def resblock(x, kernel_size, resample, nfilters, name, norm=BatchNormalization, is_first=False, conv_layer=Conv2D): assert resample in ["UP", "SAME", "DOWN"] feature_axis = 1 if K.image_data_format() == 'channels_first' else -1 identity = lambda x: x if norm is None: norm = lambda axis, name: identity if resample == "UP": resample_op = UpSampling2D(size=(2, 2), name=name + '_up') elif resample == "DOWN": resample_op = AveragePooling2D(pool_size=(2, 2), name=name + '_pool') else: resample_op = identity in_filters = K.int_shape(x)[feature_axis] if resample == "SAME" and in_filters == nfilters: shortcut_layer = identity else: shortcut_layer = conv_layer(kernel_size=(1, 1), filters=nfilters, kernel_initializer=he_init, name=name + 'shortcut') ### SHORTCUT PAHT if is_first: shortcut = resample_op(x) shortcut = shortcut_layer(shortcut) else: shortcut = shortcut_layer(x) shortcut = resample_op(shortcut) ### CONV PATH convpath = x if not is_first: convpath = norm(axis=feature_axis, name=name + '_bn1')(convpath) convpath = Activation('relu')(convpath) if resample == "UP": convpath = resample_op(convpath) convpath = conv_layer(filters=nfilters, kernel_size=kernel_size, kernel_initializer=he_init, use_bias=True, padding='same', name=name + '_conv1')(convpath) convpath = norm(axis=feature_axis, name=name + '_bn2')(convpath) convpath = Activation('relu')(convpath) convpath = conv_layer(filters=nfilters, kernel_size=kernel_size, kernel_initializer=he_init, use_bias=True, padding='same', name=name + '_conv2')(convpath) if resample == "DOWN": convpath = resample_op(convpath) y = Add()([shortcut, convpath]) return y
def MixModel(input_size=INPUT_SIZE, kernel_size=KERNEL_SIZE, activation=ACTIVATION, padding=PADDING, strides=STRIDES, kernel_initializer=KERNEL_INITIALIZER, pool_size=POOL_SIZE, model_depth=3, metrics=dice): inputs = tf.keras.Input(input_size, name="inputs") layer_list = list() conv2 = None current_filters = 32 # down sampling for depth in range(model_depth): if depth == 0: inputs2 = Conv2D(64, 3, padding="same", activation="relu", kernel_initializer="he_normal")(inputs) continue # resnet block if depth > 1: # cut = pol1 current_filters = current_filters * (2 * depth) # if depth >1 and depth%2 == 0: # cut2 = pol1 # print("depth and num_filters:",(depth,current_filters)) if conv2 != None: conv1 = ConvBlock(pol1, num_filters=current_filters, kernel_size=kernel_size, activation=activation, padding=padding, strides=strides, kernel_initializer=kernel_initializer, BN=True) else: conv1 = ConvBlock(inputs2, num_filters=current_filters, kernel_size=kernel_size, activation=activation, padding=padding, strides=strides, kernel_initializer=kernel_initializer, BN=True) # pol1 = MaxPooling2D(pool_size=POOL_SIZE)(conv1) # current_filters = current_filters * (depth**2) conv2 = ConvBlock(conv1, num_filters=current_filters * 2, kernel_size=kernel_size, activation=activation, padding=padding, strides=strides, kernel_initializer=kernel_initializer, BN=True) # print("conv1:",conv1.shape) # print("conv2:", conv2.shape) if depth == model_depth - 1: conv2 = tf.keras.layers.Dropout(0.5)(conv2) pol1 = MaxPooling2D(pool_size=POOL_SIZE)(conv2) # print("polshape:",pol1.shape) layer_list.append([conv1, conv2, pol1]) # print(layer_list) current_filters = pol1.shape[3] conv3 = Conv2D(1024, 3, padding="same", kernel_initializer="he_normal", activation="relu")(pol1) pol2 = MaxPooling2D(pool_size=POOL_SIZE)(conv3) conv4 = Conv2D(1024, 3, padding="same", kernel_initializer="he_normal", activation="relu")(pol2) up1 = UpSampling2D(size=POOL_SIZE)(conv4) conv5 = Conv2D(512, 3, padding="same", kernel_initializer="he_normal", activation="relu")(up1) # upsampling for depth in range(model_depth): if depth == 0: continue if depth == 1: up1 = UpSampling2D(size=POOL_SIZE)(conv5) if depth > 1: current_filters = int(current_filters / (2 * depth)) up1 = UpSampling2D(size=POOL_SIZE)(up_conv2) # print("up1shape:",up1.shape) # print("num_filters:",(current_filters,depth)) up_conv1 = upConvBlock(up1, num_filters=current_filters, kernel_size=kernel_size, activation=activation, padding=padding, strides=strides, kernel_initializer=kernel_initializer, BN=True) # print(layer_list) # print("compare:",(up_conv1.shape,layer_list[model_depth-depth-1][1].shape)) merge = tf.keras.layers.concatenate([up_conv1, layer_list[model_depth - depth - 1][0]], axis=3) up_conv2 = upConvBlock(merge, num_filters=current_filters, kernel_size=kernel_size, activation=activation, padding=padding, strides=strides, kernel_initializer=kernel_initializer, BN=True) # if depth>1: # up_conv2 += cut # if depth>1 and cut2 != None: # conv2 += cut2 # cut2 = None up_conv2 = Conv2D(64, 3, padding="same", kernel_initializer="he_normal", activation="relu")(up_conv2) up_conv2 = Conv2D(3, 3, padding="same", kernel_initializer="he_normal", activation="relu")(up_conv2) model = Model(inputs, up_conv2) if not isinstance(metrics, list): metrics = [metrics] # label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(BATCH_SIZE)] # if metrics: # metrics = metrics + label_wise_dice_metrics # else: # metrics = label_wise_dice_metrics # with tf.device('/cpu:0'): # parallel_model = multi_gpu_model(model, gpus=2) # model.compile(optimizer="adam",loss=dice_coefficient_loss, metrics=metrics) model.compile(optimizer="adam", loss=dice_loss, metrics=[dice]) # model.summary() return model