def __init__(self): super().__init__() self.model = Sequential([ Block(64), Block(64), SpatialDropout2D(0.3), MaxPooling2D((2, 2)), Block(128), Block(128), SpatialDropout2D(0.3), MaxPooling2D((2, 2)), Block(256), Block(256), Block(256), SpatialDropout2D(0.4), MaxPooling2D((2, 2)), Block(512), Block(512), SpatialDropout2D(0.4), MaxPooling2D((2, 2)), Block(512), GlobalAveragePooling2D(), Flatten(), Dropout(0.5), Dense(10, activation="softmax"), ])
def makeDropoutModel(img_height, img_width): model = Sequential([ Conv2D(32, 5, padding='same', activation='relu', input_shape=(img_height, img_width, 3)), MaxPooling2D(2, 2), SpatialDropout2D(0.25), Conv2D(64, 3, padding='same', activation='relu'), MaxPooling2D(2, 2), SpatialDropout2D(0.25), Conv2D(64, 3, padding='same', activation='relu'), MaxPooling2D(2, 2), SpatialDropout2D(0.25), Flatten(), Dropout(0.25), Dense(128, activation='relu'), Dense(3) ]) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model
def build(self, input_shape): self.model = keras.Sequential([ Conv2D(self.config["conv1"], self.config["kernel1"], kernel_regularizer=l2(self.config["l2_rate"]), input_shape=input_shape), LeakyReLU(alpha=self.config["alpha"]), BatchNormalization(), SpatialDropout2D(self.config["drop1"]), Conv2D(self.config["conv2"], self.config["kernel2"], kernel_regularizer=l2(self.config["l2_rate"])), LeakyReLU(alpha=self.config["alpha"]), BatchNormalization(), MaxPooling2D(self.config["pool1"], padding='same'), SpatialDropout2D(self.config["drop1"]), Conv2D(self.config["conv3"], self.config["kernel3"], kernel_regularizer=l2(self.config["l2_rate"])), LeakyReLU(alpha=self.config["alpha"]), BatchNormalization(), SpatialDropout2D(self.config["drop2"]), Conv2D(self.config["conv4"], self.config["kernel4"], kernel_regularizer=l2(self.config["l2_rate"])), LeakyReLU(alpha=self.config["alpha"]), BatchNormalization(), GlobalAveragePooling2D(), Dense(2, activation='softmax') ])
def conv2d_block(inputs, norm='batch', dropout=0., filters=16, kernel_size=(3, 3), kernel_initializer='zeros', padding='same', activation=LeakyReLU, max_dropout=0.5): dropout = min(dropout, max_dropout) dropout = max(dropout, 0.) c = Conv2D(filters, kernel_size, activation='linear', kernel_initializer=kernel_initializer, padding=padding)(inputs) c = norm_and_activation(c, norm, activation) if dropout > 0.0: c = SpatialDropout2D(dropout)(c) c = Conv2D(filters, kernel_size, activation='linear', kernel_initializer=kernel_initializer, padding=padding)(c) c = norm_and_activation(c, norm, activation) if dropout > 0.0: c = SpatialDropout2D(dropout)(c) return c
def model_task3_part1(Base, img_ch, img_width, img_height): model_task3_part1 = Sequential() model_task3_part1.add( Conv2D(filters=Base, input_shape=(img_width, img_height, img_ch), kernel_size=(3, 3), strides=(1, 1), padding='same')) model_task3_part1.add(Activation('relu')) model_task3_part1.add(SpatialDropout2D(0.1)) model_task3_part1.add(MaxPooling2D(pool_size=(2, 2))) model_task3_part1.add( Conv2D(filters=Base * 2, kernel_size=(3, 3), strides=(1, 1), padding='same')) model_task3_part1.add(Activation('relu')) model_task3_part1.add(SpatialDropout2D(0.4)) model_task3_part1.add(MaxPooling2D(pool_size=(2, 2))) model_task3_part1.add( Conv2D(filters=Base * 4, kernel_size=(3, 3), strides=(1, 1), padding='same')) model_task3_part1.add(Activation('relu')) model_task3_part1.add(SpatialDropout2D(0.4)) model_task3_part1.add( Conv2D(filters=Base * 4, kernel_size=(3, 3), strides=(1, 1), padding='same')) model_task3_part1.add(Activation('relu')) model_task3_part1.add(SpatialDropout2D(0.4)) model_task3_part1.add( Conv2D(filters=Base * 2, kernel_size=(3, 3), strides=(1, 1), padding='same')) model_task3_part1.add(Activation('relu')) model_task3_part1.add(SpatialDropout2D(0.4)) model_task3_part1.add(MaxPooling2D(pool_size=(2, 2))) model_task3_part1.add(Flatten()) model_task3_part1.add(Dense(64)) model_task3_part1.add(Activation('relu')) model_task3_part1.add(Dense(64)) model_task3_part1.add(Activation('relu')) model_task3_part1.add(Dense(1)) model_task3_part1.add(Activation('sigmoid')) model_task3_part1.summary() return model_task3_part1
def model_VGG16(Base, img_ch, img_width, img_height): model_VGG16 = Sequential() model_VGG16.add( Conv2D(Base, input_shape=(img_width, img_height, img_ch), kernel_size=(3, 3), padding='same', activation='relu')) model_VGG16.add(Conv2D(Base, (3, 3), activation='relu', padding='same')) model_VGG16.add(BatchNormalization()) model_VGG16.add(SpatialDropout2D(0.1)) model_VGG16.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model_VGG16.add(Conv2D(Base * 2, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 2, (3, 3), activation='relu', padding='same')) model_VGG16.add(BatchNormalization()) model_VGG16.add(SpatialDropout2D(0.4)) model_VGG16.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model_VGG16.add(Conv2D(Base * 4, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 4, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 4, (3, 3), activation='relu', padding='same')) model_VGG16.add(BatchNormalization()) model_VGG16.add(SpatialDropout2D(0.4)) model_VGG16.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(BatchNormalization()) model_VGG16.add(SpatialDropout2D(0.4)) model_VGG16.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(Conv2D(Base * 8, (3, 3), activation='relu', padding='same')) model_VGG16.add(BatchNormalization()) model_VGG16.add(SpatialDropout2D(0.4)) model_VGG16.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model_VGG16.add(Flatten()) model_VGG16.add(Dense(Base * 64, activation='relu')) model_VGG16.add(Dense(Base * 64, activation='relu')) model_VGG16.add(Dense(10, activation='softmax')) model_VGG16.summary() return model_VGG16
def Dense_ResNet_Atrous(input_shape=(HEIGHT, WIDTH, CHANNELS), num_class=NUM_CLASS, ks1=KS1, ks2=KS2, ks3=KS3, dl1=DL1, dl2=DL2, dl3=DL3, filters=NF,resblock1=NR1, r_filters=NFL, resblock2=NR2, dil_mode=DIL_MODE, sp_dropout=DR1,re_dropout=DR2): # tf.debugging.set_log_device_placement(True) strategy = tf.distribute.MirroredStrategy() with strategy.scope(): inputs = Input(shape=input_shape) for cycle in range(resblock1): if cycle == 0: d1 = stem_split_3k(inputs, filters, mode=dil_mode, kernel_size_1=ks1, kernel_size_2=ks2, kernel_size_3=ks3, dilation_1=dl1, dilation_2=dl2, dilation_3=dl3) else: if cycle == 1: d2 = residual_block_split_3k(d1, filters, mode=dil_mode, kernel_size_1=ks1, kernel_size_2=ks2, kernel_size_3=ks3, dilation_1=dl1, dilation_2=dl2, dilation_3=dl3) d2 = SpatialDropout2D(sp_dropout)(d2) dsum = Add()([d1,d2]) else: d2 = residual_block_split_3k(d2, filters, mode=dil_mode, kernel_size_1=ks1, kernel_size_2=ks2, kernel_size_3=ks3, dilation_1=dl1, dilation_2=dl2, dilation_3=dl3) d2 = SpatialDropout2D(sp_dropout)(d2) d2 += dsum dsum = d2 + 0 # for cycle in range(resblock2): # if cycle == 0: # d3 = residual_convLSTM2D_block(d2,r_filters,num_class,rd=re_dropout) # else: # d3 = residual_convLSTM2D_block(d3,r_filters,num_class,rd=re_dropout) if resblock2 > 0: for cycle in range(resblock2): if cycle == 0: d3 = residual_convLSTM2D_block(d2,r_filters,num_class,rd=re_dropout) else: d3 = residual_convLSTM2D_block(d3,r_filters,num_class,rd=re_dropout) else: d3 = shrink_block(d2,num_class) outputs = Activation("softmax", name = 'softmax')(d3) # Optionally use sigmoid activation. # outputs = Activation("sigmoid", name = 'sigmoid')(d3) model = Model(inputs, outputs, name='Res-CRD-Net') return model
def get_unet_sd(base, img_size, img_ch, batch_norm, dropout, dr_rate): layer_inp = Input(shape=(img_size, img_size, img_ch)) layer_b1 = conv_block(base, layer_inp, batch_norm) layer_mp1 = MaxPooling2D(pool_size=(2, 2))(layer_b1) if dropout: layer_d1 = SpatialDropout2D(dr_rate)(layer_mp1) layer_b2 = conv_block(base * 2, layer_d1, batch_norm) else: layer_b2 = conv_block(base * 2, layer_mp1, batch_norm) layer_mp2 = MaxPooling2D(pool_size=(2, 2))(layer_b2) if dropout: layer_d2 = SpatialDropout2D(dr_rate)(layer_mp2) layer_b3 = conv_block(base * 4, layer_d2, batch_norm) else: layer_b3 = conv_block(base * 4, layer_mp2, batch_norm) layer_mp3 = MaxPooling2D(pool_size=(2, 2))(layer_b3) if dropout: layer_d3 = SpatialDropout2D(dr_rate)(layer_mp3) layer_b4 = conv_block(base * 8, layer_d3, batch_norm) else: layer_b4 = conv_block(base * 8, layer_mp3, batch_norm) layer_mp4 = MaxPooling2D(pool_size=(2, 2))(layer_b4) # Bottle-neck layer_b5 = conv_block(base * 16, layer_mp4, batch_norm) layer_db1 = deconv_block_sd(base * 8, layer_b4, layer_b5, batch_norm, dropout, dr_rate) layer_db2 = deconv_block_sd(base * 4, layer_b3, layer_db1, batch_norm, dropout, dr_rate) layer_db3 = deconv_block_sd(base * 2, layer_b2, layer_db2, batch_norm, dropout, dr_rate) layer_db4 = deconv_block_sd(base, layer_b1, layer_db3, batch_norm, dropout, dr_rate) layer_conv2 = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same')(layer_db4) layer_out = Activation('sigmoid')(layer_conv2) model = Model(inputs=layer_inp, outputs=layer_out) model.summary() return model
def __init__(self, learning_rate=0.003): x_input = tf.keras.Input(shape=(35, 35, 3), name="x_input_node") x = SeparableConv2D(32, (3, 3), padding='same')(x_input) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = self.residual_layer(x, 32) x = SpatialDropout2D(rate=0.2)(x) x = MaxPool2D()(x) x = SeparableConv2D(32, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = self.residual_layer(x, 32) x = SpatialDropout2D(rate=0.2)(x) x = MaxPool2D()(x) x = SeparableConv2D(128, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = self.residual_layer(x, 128) x = SpatialDropout2D(rate=0.2)(x) x = MaxPool2D()(x) x = SeparableConv2D(128, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = self.residual_layer(x, 128) x = SpatialDropout2D(rate=0.2)(x) x = SeparableConv2D(2, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = GlobalAveragePooling2D()(x) x = Softmax()(x) self.model = Model(inputs=x_input, outputs=x) self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.model.summary()
def repeat_block(inp, out_filters, dropout=0.2): """ Reccurent conv block with decreasing kernel size. Makes use of atrous convolutions to make large kernel sizes computationally feasible """ skip = inp c1 = ConvBnElu(inp, out_filters, dilation_rate=4) c1 = SpatialDropout2D(dropout)(c1) c2 = ConvBnElu(add([skip, c1]), out_filters, dilation_rate=3) c2 = SpatialDropout2D(dropout)(c2) c3 = ConvBnElu(c2, out_filters, dilation_rate=2) c3 = SpatialDropout2D(dropout)(c3) c4 = ConvBnElu(add([c2, c3]), out_filters, dilation_rate=1) return c4
def create_model(input_shape, spatial_dropout_rate_1=0, spatial_dropout_rate_2=0, l2_rate=0): # Create a secquential object model = Sequential() for i in range(2): model.add( Conv2D(filters=32, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate), input_shape=input_shape)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(SpatialDropout2D(spatial_dropout_rate_1)) model.add(MaxPooling2D(pool_size=(2, 2))) for i in range(2): model.add( Conv2D(filters=64, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(SpatialDropout2D(spatial_dropout_rate_2)) model.add(MaxPooling2D(pool_size=(2, 2))) for i in range(2): model.add( Conv2D(filters=128, kernel_size=(3, 3), kernel_regularizer=l2(l2_rate))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(SpatialDropout2D(spatial_dropout_rate_2)) # Reduces each h×w feature map to a single number by taking the average of all h,w values. model.add(GlobalAveragePooling2D()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.4)) # Softmax output model.add(Dense(10, activation='softmax')) return model
def bottleneck_upsampling(x, output_depth, internal_scale=4, Momentum=0.1): internal_depth = int(output_depth / internal_scale) x_conv = Conv2D(internal_depth, (1, 1))(x) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) x_conv = Conv2DTranspose(internal_depth, (3, 3), strides=(2, 2), padding="same")(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) x_conv = Conv2D(output_depth, (1, 1), use_bias=False)(x_conv) # x_conv = Conv2D(output_depth, (1, 1))(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = SpatialDropout2D(0.01)(x_conv) x_pool = Conv2D(output_depth, (1, 1), use_bias=False)(x) # x_pool = Conv2D(output_depth, (1, 1))(x) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_pool = UpSampling2D(size=(2, 2))(x_pool) x = Concatenate()([x_conv, x_pool]) x = Conv2D(output_depth, (1, 1))(x) x = BatchNormalization(momentum=Momentum)(x) y = Activation("relu")(x) return y
def bottleneck_asymmetric(x, asymmetric, output_depth, internal_scale=4, Momentum=0.1): internal_depth = int(output_depth / internal_scale) pad_half = int(asymmetric / 2) x_conv = Conv2D(internal_depth, (1, 1))(x) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) x_conv = ZeroPadding2D(padding=((0, 0), (pad_half, pad_half)))(x_conv) x_conv = Conv2D(internal_depth, (1, asymmetric), use_bias=False)(x_conv) x_conv = ZeroPadding2D(padding=((pad_half, pad_half), (0, 0)))(x_conv) x_conv = Conv2D(internal_depth, (asymmetric, 1))(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) x_conv = Conv2D(output_depth, (1, 1), use_bias=False)(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = SpatialDropout2D(0.01)(x_conv) x_pool = MaxPooling2D(pool_size=(2, 2))(x) x_pool = UpSampling2D(size=(2, 2))(x_pool) x = Concatenate()([x_conv, x_pool]) x = Conv2D(output_depth, (1, 1))(x) x = BatchNormalization(momentum=Momentum)(x) y = Activation("relu")(x) return y
def bottleneck_dilated(x, dilated, output_depth, internal_scale=4, Momentum=0.1): internal_depth = int(output_depth / internal_scale) x_conv = Conv2D(internal_depth, (1, 1))(x) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) x_conv = Conv2D(internal_depth, (3, 3), dilation_rate=(dilated, dilated), padding="same")(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = Activation("relu")(x_conv) # dilation_rate = 2 : padding = 2 # dilation_rate = 4 : padding = 4 # dilation_rate = 8 : padding = 8 # dilation_rate = 16: padding = 16 x_conv = Conv2D(output_depth, (1, 1), use_bias=False)(x_conv) x_conv = BatchNormalization(momentum=Momentum)(x_conv) x_conv = SpatialDropout2D(0.01)(x_conv) x_pool = MaxPooling2D(pool_size=(2, 2))(x) x_pool = UpSampling2D(size=(2, 2))(x_pool) x = Concatenate()([x_conv, x_pool]) x = Conv2D(output_depth, (1, 1))(x) x = BatchNormalization(momentum=Momentum)(x) y = Activation("relu")(x) return y
def inception_unet(input_shape): inception_base = InceptionV3Same(input_shape=input_shape) conv1 = inception_base.get_layer("activation_3").output conv2 = inception_base.get_layer("activation_5").output conv3 = inception_base.get_layer("activation_29").output conv4 = inception_base.get_layer("activation_75").output conv5 = inception_base.get_layer("mixed10").output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_bn_relu(up6, 256, "conv6_1") conv6 = conv_bn_relu(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_bn_relu(up7, 192, "conv7_1") conv7 = conv_bn_relu(conv7, 192, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_bn_relu(up8, 128, "conv8_1") conv8 = conv_bn_relu(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_bn_relu(up9, 64, "conv9_1") conv9 = conv_bn_relu(conv9, 64, "conv9_2") up10 = UpSampling2D()(conv9) conv10 = conv_bn_relu(up10, 32, "conv10_1") conv10 = conv_bn_relu(conv10, 32, "conv10_2") x = SpatialDropout2D(0.5)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid", name="mask")(x) model = Model(inception_base.input, x) return model
def lflbblock(x, filters, pool_kernel, pool_stride): x = Conv2D(filters=filters, kernel_size=3, padding='same')(x) x = BatchNormalization()(x) x = Activation('elu')(x) x = SpatialDropout2D(0.5)(x) x = MaxPooling2D(pool_size=pool_kernel, strides=pool_stride)(x) return x
def resnet_50_unet(input_shape): resnet_base = resnet_50(input_shape=input_shape) conv1 = resnet_base.get_layer("activation_1").output conv2 = resnet_base.get_layer("activation_10").output conv3 = resnet_base.get_layer("activation_22").output conv4 = resnet_base.get_layer("activation_40").output conv5 = resnet_base.get_layer("activation_49").output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_bn_relu(up6, 256, "conv6_1") conv6 = conv_bn_relu(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_bn_relu(up7, 192, "conv7_1") conv7 = conv_bn_relu(conv7, 192, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_bn_relu(up8, 128, "conv8_1") conv8 = conv_bn_relu(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_bn_relu(up9, 64, "conv9_1") conv9 = conv_bn_relu(conv9, 64, "conv9_2") up10 = concatenate([UpSampling2D()(conv9), resnet_base.input], axis=-1) conv10 = conv_bn_relu(up10, 32, "conv10_1") conv10 = conv_bn_relu(conv10, 32, "conv10_2") x = SpatialDropout2D(0.5)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid")(x) model = Model(resnet_base.input, x) return model
def deconv_block_lstm_sd(base, conc_layer, layer, batch_norm, dropout, img_size, dr_rate): layer_conv_transpose = Conv2DTranspose(filters=base, kernel_size=(3, 3), strides=(2, 2), padding='same')(layer) x1 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size), base))(conc_layer) x2 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size), base))(layer_conv_transpose) layer_conc = concatenate([x1, x2], axis=1) if dropout: layer_lstm = ConvLSTM2D(np.int32(base / 2), (3, 3), padding='same', return_sequences=False, go_backwards=True)(layer_conc) layer_d = SpatialDropout2D(dr_rate)(layer_lstm) layer_b = conv_block(base, layer_d, batch_norm) else: layer_lstm = ConvLSTM2D(np.int32(base / 2), (3, 3), padding='same', return_sequences=False, go_backwards=True)(layer_conc) layer_b = conv_block(base, layer_lstm, batch_norm) return layer_b
def simple_cnn(input_shape, conv_layers, kernel_size=3, pool_size=2, regularization='dropout', reg_rate=0.2, hidden_dense=100, num_classes=5) -> Model: """ Creates cnn model with specified number of convolutional layers Parameters ---------- input_shape : 2 integer tuple Shape of the input for first convolutional layer conv_layers : list or array Vector of filters for convolutional layers. The number of layers depends on the length of the vector kernel_size : int Kernel size for convolutional layers pool_size : int Window size for pooling layers regularization : str Type of regularization layers: 'dropout' or 'spacial' reg_rate: float Regularization rate num_classes : int Number of classes for classification problem. Needed for output layer Returns ------- keras.models.Model """ model = Sequential() if regularization == 'spatial': regularization_layer = SpatialDropout2D(reg_rate) else: regularization_layer = Dropout(reg_rate) model.add( Conv2D(conv_layers[0], kernel_size, activation='relu', input_shape=input_shape + (1, ))) model.add(MaxPooling2D(pool_size)) model.add(Dropout(reg_rate)) for index, filters in enumerate(conv_layers[1:]): model.add(Conv2D(filters, kernel_size, activation='relu')) model.add(MaxPooling2D(pool_size)) model.add(Dropout(reg_rate)) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(units=num_classes, activation='softmax')) return model
def _end_block(x, spatial_dropout=0.0): if spatial_dropout > 0.0: x = SpatialDropout2D(spatial_dropout)(x) x = Conv2D(1, (1, 1))(x) x = BatchNormalization(axis=-1)(x) x = Activation('sigmoid')(x) return x
def model_AlexNet(hyperparameters): model = Sequential() model.add(Conv2D(filters=hyperparameters['base'], input_shape=hyperparameters['input_shape'], kernel_size=(3, 3), strides=(1, 1), padding='same')) if hyperparameters['batch_norm']: model.add(BatchNormalization()) model.add(Activation('relu')) if len(hyperparameters['spatial_dropout']) > 0: model.add(SpatialDropout2D(hyperparameters['spatial_dropout'][0])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=hyperparameters['base'] * 2, kernel_size=(3, 3), strides=(1, 1), padding='same')) if hyperparameters['batch_norm']: model.add(BatchNormalization()) model.add(Activation('relu')) if len(hyperparameters['spatial_dropout']) > 1: model.add(SpatialDropout2D(hyperparameters['spatial_dropout'][1])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=hyperparameters['base'] * 4, kernel_size=(3, 3), strides=(1, 1), padding='same')) if hyperparameters['batch_norm']: model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(filters=hyperparameters['base'] * 4, kernel_size=(3, 3), strides=(1, 1), padding='same')) if hyperparameters['batch_norm']: model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(filters=hyperparameters['base'] * 2, kernel_size=(3, 3), strides=(1, 1), padding='same')) if hyperparameters['batch_norm']: model.add(BatchNormalization()) model.add(Activation('relu')) if len(hyperparameters['spatial_dropout']) > 2: model.add(SpatialDropout2D(hyperparameters['spatial_dropout'][2])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) for i, unit in enumerate(hyperparameters['dense_units']): model.add(Dense(unit)) model.add(Activation(hyperparameters['dense_activation'][i])) if len(hyperparameters['dropout']) > i: model.add(Dropout(hyperparameters['dropout'][i])) print(model.summary()) model.compile(loss=hyperparameters['loss'], optimizer=hyperparameters['optimizer'](lr=hyperparameters['lr']), metrics=hyperparameters['metrics']) return model
def build_myronenko_cae(input_shape, batch_size=None, ker_reg=False): #INPUT input_img = Input(shape=input_shape, batch_size=batch_size) #128x128x1 ker_reg = l2(1e-5) if ker_reg else None #ENCODER bname = 'GB' #Convolution to input x = Conv2D(32, (3, 3), strides=1, padding="same", name='Conv1')(input_img) #128x128x32 x = SpatialDropout2D(0.1)(x) #FPRB1 (Out: 64x64x32) y = full_pre_residual_block(x, 32, ker_reg=ker_reg, name=bname + '_32_1') #128x128x32 y = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding="same", kernel_regularizer=ker_reg, name='Conv_Downsample_1')(y) #64x64x32 #GFPRB2 (Out: 32x32x64) y = full_pre_residual_block(y, 64, ker_reg=ker_reg, name=bname + '_64_1') #64x64x64 y = full_pre_residual_block(y, 64, ker_reg=ker_reg, name=bname + '_64_2') #64x64x64 y = Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding="same", kernel_regularizer=ker_reg, name='Conv_Downsample_2')(y) #32x32x64 #GFPRB3 (Out: 32x32x64) y = full_pre_residual_block(y, 128, ker_reg=ker_reg, name=bname + '_128_1') #32x32x128 y = full_pre_residual_block(y, 128, ker_reg=ker_reg, name=bname + '_128_2') #32x32x128 y = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding="same", kernel_regularizer=ker_reg, name='Conv_Downsample_3')(y) #16x16x128 #y = full_pre_residual_block(y, 256, ker_reg=ker_reg,name=bname+'_256_1') #16x16x256 #DECODER y = upsampling_block(y, 64, name='UP1') #/4*/4*64 y = upsampling_block(y, 32, name='UP2') #/2*/2*32 y = upsampling_block(y, 16, name='UP3') #1.*/1.*16 decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='OUTPUT')(y) return Model(input_img, decoded)
def create_model(x_train, y_train, pooler=MaxPooling2D): print('constructing model... ', end='', flush=True) model = Sequential() model.add(Conv2D(64, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(64, (5, 5))) model.add(Activation('relu')) if POOL: model.add(pooler(pool_size=(3, 3))) else: model.add(Conv2D(64, (3, 3), strides=(2, 2))) model.add(Activation('relu')) model.add(SpatialDropout2D(0.25)) model.add(Conv2D(128, (5, 5), padding='same')) model.add(Activation('relu')) model.add(Conv2D(128, (7, 7))) model.add(Activation('relu')) if POOL: model.add(pooler(pool_size=(2, 2))) else: model.add(convolver2(window=(8, 8), strides=(7, 7))) model.add(Activation('relu')) model.add(SpatialDropout2D(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(y_train.shape[1])) model.add(Activation(final_activator)) # RMSprop optimizer opt = optimizers.RMSprop(learning_rate=0.0001, decay=1e-6) # train the model using RMSprop model.compile(loss=loss, optimizer=opt, metrics=['accuracy']) print('done!', flush=True) print() model.summary() print() return model
def residual_layer(self, x_input, filters=32): x = SeparableConv2D(kernel_size=(3, 3), filters=filters, padding='same', use_bias=False)(x_input) x = BatchNormalization()(x) x = SpatialDropout2D(rate=0.2)(x) x = PReLU()(x) skip = x + x_input return skip
def get_dropout(model_name, input_layer, name, dropout_rate, time_dist=True): if dropout_rate > 0: if 'spatial' in model_name: if time_dist: output_layer = TimeDistributed(SpatialDropout2D(dropout_rate), name=name)(input_layer) else: output_layer = SpatialDropout2D(dropout_rate, name=name)(input_layer) else: if time_dist: output_layer = TimeDistributed(Dropout(dropout_rate), name=name)(input_layer) else: output_layer = Dropout(dropout_rate, name=name)(input_layer) else: output_layer = input_layer return output_layer
def unet_resnet( input_shape=(256, 256, 3), num_classes=8, encoder_weights='imagenet'): base_model = resnet50.ResNet50(input_shape=input_shape, include_top=False, weights=encoder_weights) for l in base_model.layers: l.trainable = True conv0 = base_model.get_layer("activation").output conv1 = base_model.get_layer("activation_1").output conv2 = base_model.get_layer("activation_10").output conv3 = base_model.get_layer("activation_22").output conv4 = base_model.get_layer("activation_40").output conv5 = base_model.get_layer("activation_48").output # (None, 128, 128, 64) (None, 64, 64, 128) (None, 32, 32, 256) (None, 16, 16, 512) (None, 16, 16, 2048) # print(conv1.shape, conv2.shape, conv3.shape, conv4.shape, conv5.shape) up6 = K.concatenate([conv5, conv4], axis=-1) conv6 = conv_block_simple(up6, 256, "conv6_1") conv6 = conv_block_simple(conv6, 256, "conv6_2") up7 = K.concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_block_simple(up7, 192, "conv7_1") conv7 = conv_block_simple(conv7, 192, "conv7_2") up8 = K.concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_block_simple(up8, 128, "conv8_1") conv8 = conv_block_simple(conv8, 128, "conv8_2") up9 = K.concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_block_simple(up9, 64, "conv9_1") conv9 = conv_block_simple(conv9, 64, "conv9_2") up9x = K.concatenate([UpSampling2D()(conv9), conv0], axis=-1) conv9x = conv_block_simple(up9x, 64, "conv9x_1") conv9x = conv_block_simple(conv9x, 64, "conv9x_2") vgg = vgg16.VGG16(input_shape=input_shape, input_tensor=base_model.input, include_top=False) for l in vgg.layers: l.trainable = False vgg_first_conv = vgg.get_layer("block1_conv2").output up10 = K.concatenate([UpSampling2D()(conv9x), vgg_first_conv], axis=-1) conv10 = conv_block_simple(up10, 32, "conv10_1") conv10 = conv_block_simple(conv10, 32, "conv10_2") conv10 = SpatialDropout2D(0.2)(conv10) x = Conv2D(num_classes, (1, 1), activation=None, name="prediction")(conv10) model = Model(base_model.input, x) return model
def f(x): x = Conv2D(num_filters, filter_size, name=name, padding=padding, kernel_initializer='he_normal')(x) x = BatchNormalization()(x, training=training) x = LeakyReLU(alpha=0.3)(x) if (dropout_rate is not None): x = SpatialDropout2D(dropout_rate)(x) return x
def deconv_block_sd(base, conc_layer, layer, batch_norm, dropout, dr_rate): layer_conv_transpose = Conv2DTranspose(filters=base, kernel_size=(3, 3), strides=(2, 2), padding='same')(layer) layer_conc = concatenate([conc_layer, layer_conv_transpose]) if dropout: layer_d = SpatialDropout2D(dr_rate)(layer_conc) layer_b = conv_block(base, layer_d, batch_norm) else: layer_b = conv_block(base, layer_conc, batch_norm) return layer_b
def f(x): x = Conv2DTranspose(num_filters, filter_size, strides=strides, padding=padding, kernel_initializer='he_normal')(x) x = Concatenate(axis=3)([x, skip_layer]) x = Conv2D(int(num_filters), filter_size, name=name, padding=padding)(x) x = BatchNormalization()(x, training=training) x = LeakyReLU(alpha=0.3)(x) if (dropout_rate is not None): x = SpatialDropout2D(dropout_rate)(x) return x
def residual_layer(self, x_input, filters=32): layer1 = BatchNormalization()(x_input) #x = Conv2D(kernel_size=(3, 3), filters=filters, padding='same', use_bias=False)(layer1) x = SeparableConv2D(kernel_size=(3, 3), filters=filters, padding='same', use_bias=False)(layer1) x = BatchNormalization()(x) x = ReLU()(x) x = SpatialDropout2D(0.3)(x) skip = x + layer1 return skip