def UResNet34(input_shape=(128, 128, 1), classes=1, decoder_filters=16, decoder_block_type='upsampling', encoder_weights="imagenet", input_tensor=None, activation='sigmoid', **kwargs): backbone = ResnetBuilder.build_resnet_34(input_shape=input_shape,input_tensor=input_tensor) input_layer = backbone.input #input = backbone.input output_layer = build_model(input_layer, 16,0.5) #x model = Model(input_layer, output_layer) c = optimizers.adam(lr = 0.01) model.compile(loss="binary_crossentropy", optimizer=c, metrics=[my_iou_metric]) model.name = 'u-resnet34' return model
def UXception(input_shape=(None, None, 3)): backbone = Xception(input_shape=input_shape, weights='imagenet', include_top=False) input = backbone.input start_neurons = 16 conv4 = backbone.layers[121].output conv4 = LeakyReLU(alpha=0.1)(conv4) pool4 = MaxPooling2D((2, 2))(conv4) pool4 = Dropout(0.1)(pool4) # Middle convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same")(pool4) convm = residual_block(convm, start_neurons * 32) convm = residual_block(convm, start_neurons * 32) convm = LeakyReLU(alpha=0.1)(convm) # 10 -> 20 deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(convm) uconv4 = concatenate([deconv4, conv4]) uconv4 = Dropout(0.1)(uconv4) uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4) uconv4 = residual_block(uconv4, start_neurons * 16) uconv4 = residual_block(uconv4, start_neurons * 16) uconv4 = LeakyReLU(alpha=0.1)(uconv4) # 10 -> 20 deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(uconv4) conv3 = backbone.layers[31].output uconv3 = concatenate([deconv3, conv3]) uconv3 = Dropout(0.1)(uconv3) uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3) uconv3 = residual_block(uconv3, start_neurons * 8) uconv3 = residual_block(uconv3, start_neurons * 8) uconv3 = LeakyReLU(alpha=0.1)(uconv3) # 20 -> 40 deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv3) conv2 = backbone.layers[21].output conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2) uconv2 = concatenate([deconv2, conv2]) uconv2 = Dropout(0.1)(uconv2) uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2) uconv2 = residual_block(uconv2, start_neurons * 4) uconv2 = residual_block(uconv2, start_neurons * 4) uconv2 = LeakyReLU(alpha=0.1)(uconv2) # 40 -> 80 deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv2) conv1 = backbone.layers[11].output conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1) uconv1 = concatenate([deconv1, conv1]) uconv1 = Dropout(0.1)(uconv1) uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1) uconv1 = residual_block(uconv1, start_neurons * 2) uconv1 = residual_block(uconv1, start_neurons * 2) uconv1 = LeakyReLU(alpha=0.1)(uconv1) # 80 -> 160 uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv1) uconv0 = Dropout(0.1)(uconv0) uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0) uconv0 = residual_block(uconv0, start_neurons * 1) uconv0 = residual_block(uconv0, start_neurons * 1) uconv0 = LeakyReLU(alpha=0.1)(uconv0) uconv0 = Dropout(0.1 / 2)(uconv0) output_layer = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(uconv0) model = Model(input, output_layer) model.name = 'u-xception' return model
def UEfficientNet(input_shape=(None, None, 3), dropout_rate=0.1): backbone = EfficientNetB4(weights='imagenet', include_top=False, input_shape=input_shape) input = backbone.input start_neurons = 16 conv4 = backbone.layers[342].output conv4 = LeakyReLU(alpha=0.1)(conv4) pool4 = MaxPooling2D((2, 2))(conv4) pool4 = Dropout(dropout_rate)(pool4) # Middle convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same")(pool4) convm = residual_block(convm, start_neurons * 32) convm = residual_block(convm, start_neurons * 32) convm = LeakyReLU(alpha=0.1)(convm) deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(convm) uconv4 = concatenate([deconv4, conv4]) uconv4 = Dropout(dropout_rate)(uconv4) uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4) uconv4 = residual_block(uconv4, start_neurons * 16) uconv4 = residual_block(uconv4, start_neurons * 16) uconv4 = LeakyReLU(alpha=0.1)(uconv4) deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(uconv4) conv3 = backbone.layers[154].output uconv3 = concatenate([deconv3, conv3]) uconv3 = Dropout(dropout_rate)(uconv3) uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3) uconv3 = residual_block(uconv3, start_neurons * 8) uconv3 = residual_block(uconv3, start_neurons * 8) uconv3 = LeakyReLU(alpha=0.1)(uconv3) deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv3) conv2 = backbone.layers[92].output uconv2 = concatenate([deconv2, conv2]) uconv2 = Dropout(0.1)(uconv2) uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2) uconv2 = residual_block(uconv2, start_neurons * 4) uconv2 = residual_block(uconv2, start_neurons * 4) uconv2 = LeakyReLU(alpha=0.1)(uconv2) deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv2) conv1 = backbone.layers[30].output uconv1 = concatenate([deconv1, conv1]) uconv1 = Dropout(0.1)(uconv1) uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1) uconv1 = residual_block(uconv1, start_neurons * 2) uconv1 = residual_block(uconv1, start_neurons * 2) uconv1 = LeakyReLU(alpha=0.1)(uconv1) uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv1) uconv0 = Dropout(0.1)(uconv0) uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0) uconv0 = residual_block(uconv0, start_neurons * 1) uconv0 = residual_block(uconv0, start_neurons * 1) uconv0 = LeakyReLU(alpha=0.1)(uconv0) uconv0 = Dropout(dropout_rate / 2)(uconv0) output_layer = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(uconv0) model = Model(input, output_layer) model.name = 'u-xception' return model
def init_resnet_baseline(input_shape, num_classes): num_hidden = 64 inp = Input(input_shape) # block 1 conv_x = Conv1D(num_hidden, 8, padding='same')(inp) conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_y = Conv1D(num_hidden, 5, padding='same')(conv_x) conv_y = BatchNormalization()(conv_y) conv_y = Activation('relu')(conv_y) conv_z = Conv1D(num_hidden, 3, padding='same')(conv_y) conv_z = BatchNormalization()(conv_z) ## expand channels shortcut_1 = Conv1D(num_hidden, 1, padding='same')(inp) shortcut_1 = BatchNormalization()(shortcut_1) outp_1 = Add()([shortcut_1, conv_z]) outp_1 = Activation('relu')(outp_1) # block 2 conv_x = Conv1D(2 * num_hidden, 8, padding='same')(outp_1) conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_y = Conv1D(2 * num_hidden, 5, padding='same')(conv_x) conv_y = BatchNormalization()(conv_y) conv_y = Activation('relu')(conv_y) conv_z = Conv1D(2 * num_hidden, 3, padding='same')(conv_y) conv_z = BatchNormalization()(conv_z) ## expand channels shortcut_2 = Conv1D(2 * num_hidden, 1, padding='same')(outp_1) shortcut_2 = BatchNormalization()(shortcut_2) outp_2 = Add()([shortcut_2, conv_z]) outp_2 = Activation('relu')(outp_2) # block 3 conv_x = keras.layers.Conv1D(2 * num_hidden, 8, padding='same')(outp_2) conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_y = Conv1D(2 * num_hidden, 5, padding='same')(conv_x) conv_y = BatchNormalization()(conv_y) conv_y = Activation('relu')(conv_y) conv_z = Conv1D(2 * num_hidden, 3, padding='same')(conv_y) conv_z = BatchNormalization()(conv_z) ## channels are equal now shortcut_y = BatchNormalization()(outp_2) outp_3 = Add()([shortcut_2, conv_z]) outp_3 = Activation('relu')(outp_3) # pooling and prediction layer gap = GlobalAveragePooling1D()(outp_3) outp = Dense(num_classes, activation='softmax')(gap) model = Model(inp, outp) model.name = 'ResNet' return model
def DeconvNet(input_shape, num_classes): inputs, outputs = build_deconvnet(input_shape, num_classes) model = Model(inputs=inputs, outputs=outputs) model.name = 'DeconvNet' return model