def conv_t_block(size, input_1, input_2, kernel, stride, activation, kinit, padding, axis, batch_normalization=False): conv1 = Convolution2DTranspose(256, (2, 2), strides=(2, 2), padding=padding)(input_1) conv1 = BatchNormalization()(conv1) if input_2 is not None: conv1 = concatenate([conv1, input_2], axis=axis) conv2 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv1) conv2 = BatchNormalization()(conv2) if batch_normalization else conv2 conv3 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv2) return conv3
def f(input): layer = Convolution2DTranspose(nb_filter, kernel_size=(rk, ck), strides=(2,2),\ padding="same", use_bias=False,\ kernel_initializer="Orthogonal",name='conv-'+name)(input) layer_norm = BatchNormalization(name='BN-'+name)(layer) layer_norm_relu = Activation(activation = "relu",name='Relu-'+name)(layer_norm) if boolean_dropout: layer_norm_relu = Dropout(0.1) (layer_norm_relu) return layer_norm_relu
def conv_t_block(size, input_1, input_2, kernel, stride, activation, kinit, padding, axis, batch_normalization=False, dropout=None): use_bias = not batch_normalization conv1 = Convolution2DTranspose(256, (2, 2), strides=(2, 2), padding=padding, use_bias=use_bias)(input_1) conv1 = BatchNormalization()(conv1) conv1 = Activation(activation)(conv1) if input_2 is not None: input_2 = BatchNormalization()(input_2) conv1 = concatenate([conv1, input_2], axis=axis) conv2 = Convolution2D(256, kernel_size=kernel, strides=stride, kernel_initializer=kinit, padding=padding, use_bias=use_bias)(conv1) conv2 = BatchNormalization()(conv2) if batch_normalization else conv2 conv2 = Activation(activation)(conv2) if dropout is not None: conv2 = Dropout(rate=dropout)(conv2) conv3 = Convolution2D(256, kernel_size=kernel, strides=stride, kernel_initializer=kinit, padding=padding, use_bias=False)(conv2) conv3 = BatchNormalization()(conv3) if batch_normalization else conv2 conv3 = Activation(activation)(conv3) return conv3
def up_conv(filters, kernel_size, stride): return Convolution2DTranspose(filters, kernel_size, stride)
def unet_v14( name, input_shapes, output_shapes, kernel=3, stride=1, activation='elu', output_channels=2, kinit='RandomUniform', batch_norm=True, padding='same', axis=3, crop=0, mpadd=0, ): nr_classes = output_channels if len(input_shapes["input_1"]) == 3: input_1_height, input_1_width, input_1_channels = input_shapes[ "input_1"] else: timestamps_1, input_1_height, input_1_width, input_1_channels = input_shapes[ "input_1"] inputs = Input((input_1_height, input_1_width, input_1_channels)) conv1 = ZeroPadding2D((crop, crop))(inputs) conv1 = Convolution2D(32, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv1) conv1 = BatchNormalization()(conv1) conv1 = Convolution2D(32, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv1) conv1 = BatchNormalization()(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Convolution2D(64, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(pool1) conv2 = BatchNormalization()(conv2) conv2 = Convolution2D(64, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv2) conv2 = BatchNormalization()(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Convolution2D(128, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(pool2) conv3 = BatchNormalization()(conv3) conv3 = Convolution2D(128, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv3) conv3 = BatchNormalization()(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(pool3) conv4 = BatchNormalization()(conv4) conv4 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv4) conv4 = BatchNormalization()(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Convolution2D(512, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(pool4) conv5 = BatchNormalization()(conv5) conv5 = Convolution2D(512, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv5) conv5 = BatchNormalization()(conv5) up6 = concatenate([ Convolution2DTranspose(256, (2, 2), strides=(2, 2), padding=padding)(conv5), conv4 ], axis=axis) conv6 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(up6) conv6 = BatchNormalization()(conv6) if batch_norm else conv6 conv6 = Convolution2D(256, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv6) conv6 = BatchNormalization()(conv6) if batch_norm else conv6 up7 = concatenate([ Convolution2DTranspose(128, (2, 2), strides=(2, 2), padding=padding)(conv6), conv3 ], axis=axis) conv7 = Convolution2D(128, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(up7) conv7 = BatchNormalization()(conv7) if batch_norm else conv7 conv7 = Convolution2D(128, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv7) conv7 = BatchNormalization()(conv7) if batch_norm else conv7 up8 = concatenate([ Convolution2DTranspose(64, (2, 2), strides=(2, 2), padding=padding)(conv7), conv2 ], axis=axis) conv8 = Convolution2D(64, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(up8) conv8 = BatchNormalization()(conv8) if batch_norm else conv8 conv8 = Convolution2D(64, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv8) conv8 = BatchNormalization()(conv8) if batch_norm else conv8 up9 = concatenate([ Convolution2DTranspose(32, (2, 2), strides=(2, 2), padding=padding)(conv8), conv1 ], axis=axis) conv9 = Convolution2D(32, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(up9) conv9 = BatchNormalization()(conv9) if batch_norm else conv9 conv9 = Convolution2D(32, kernel_size=kernel, strides=stride, activation=activation, kernel_initializer=kinit, padding=padding)(conv9) conv9 = BatchNormalization()(conv9) if batch_norm else conv9 conv9 = Cropping2D((mpadd, mpadd))(conv9) conv10 = Convolution2D(nr_classes, (1, 1), activation='softmax', name="output_1")(conv9) model = Model(inputs=[inputs], outputs=[conv10]) return model
def Net(n_classes, input): # ---------left branch ----- x = conv_block(input, 32, (3, 3), strides=1, name='L_conv1-1') L1 = conv_block(x, 32, (3, 3), strides=1, name='L_conv1-2') x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(L1) # 256 -> 128 x = conv_block(x, 64, (3, 3), strides=1, name='L_conv2-1') L2 = conv_block(x, 64, (3, 3), strides=1, name='L_conv2-2') x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(L2) # 128 -> 64 x = conv_block(x, 128, (3, 3), strides=1, name='L_conv3-1') L3 = conv_block(x, 128, (3, 3), strides=1, name='L_conv3-2') x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(L3) # 64 -> 32 x = conv_block(x, 256, (3, 3), strides=1, name='L_conv4-1') L4 = conv_block(x, 256, (3, 3), strides=1, name='L_conv4-2') x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(L4) # 32 -> 16 x = conv_block(x, 512, (3, 3), strides=1, name='bottom-1') x = conv_block(x, 512, (3, 3), strides=1, dila=2, name='bottom-2') L5 = conv_block(x, 512, (3, 3), strides=1, name='bottom-3') # 16 # ---------Right branch ----- # 16 -> 32 x = Convolution2DTranspose( 256, kernel_size=2, strides=2, padding='same', name='R_conv1-1')(L5) x = BatchNormalization(name='R_conv1-1_' + 'bn')(x) x = conv_block(Concatenate(axis=-1)([x, L4]), 256, (3, 3), strides=1, name='R_conv1-2') x = conv_block(x, 256, (3, 3), strides=1, name='R_conv1-3') # 32 -> 64 x = Convolution2DTranspose( 128, kernel_size=2, strides=2, padding='same', name='R_conv2-1')(x) x = BatchNormalization(name='R_conv2-1_' + 'bn')(x) x = conv_block(Concatenate(axis=-1)([x, L3]), 128, (3, 3), strides=1, name='R_conv2-2') x = conv_block(x, 128, (3, 3), strides=1, name='R_conv2-3') # 64 -> 128 x = Convolution2DTranspose( 64, kernel_size=2, strides=2, padding='same', name='R_conv3-1')(x) x = BatchNormalization(name='R_conv3-1_' + 'bn')(x) x = conv_block(Concatenate(axis=-1) ([x, L2]), 64, (3, 3), strides=1, name='R_conv3-2') x = conv_block(x, 64, (3, 3), strides=1, name='R_conv3-3') # 128 -> 256 x = Convolution2DTranspose( 32, kernel_size=2, strides=2, padding='same', name='R_conv4-1')(x) x = BatchNormalization(name='R_conv4-1_' + 'bn')(x) x = conv_block(Concatenate(axis=-1) ([x, L1]), 32, (3, 3), strides=1, name='R_conv4-2') x = conv_block(x, 32, (3, 3), strides=1, name='R_conv4-3') final = Conv2D(n_classes, (1, 1), name='final_out')(x) final = Activation('softmax', name='softmax_1')(final) return final
def build(self): self.inputs = Input(shape=(256, 256, 3), name="img") self.conv_1 = Conv2D(32, 3, strides=(2, 2), use_bias=True, padding='same', name="conv_1")(self.inputs) self.relu_1 = ReLU()(self.conv_1) self.residualb_1 = self.residual_block(self.relu_1, 32, "residualb_1") self.residualb_2 = self.residual_block(self.residualb_1, 32, "residualb_2") self.residualb_3 = self.residual_block(self.residualb_2, 32, "residualb_3") self.residualb_4 = self.residual_block(self.residualb_3, 32, "residualb_4") self.residualb_5 = self.residual_block(self.residualb_4, 32, "residualb_5") self.residualb_6 = self.residual_block(self.residualb_5, 32, "residualb_6") self.residualb_7 = self.residual_block(self.residualb_6, 32, "residualb_7") self.residualb_8 = self.residual_block_id(self.residualb_7, 64, "residualb_8") self.residualb_9 = self.residual_block(self.residualb_8, 64, "residualb_9") self.residualb_10 = self.residual_block(self.residualb_9, 64, "residualb_10") self.residualb_11 = self.residual_block(self.residualb_10, 64, "residualb_11") self.residualb_12 = self.residual_block(self.residualb_11, 64, "residualb_12") self.residualb_13 = self.residual_block(self.residualb_12, 64, "residualb_13") self.residualb_14 = self.residual_block(self.residualb_13, 64, "residualb_14") self.residualb_15 = self.residual_block(self.residualb_14, 64, "residualb_15") self.residualb_16 = self.residual_block_id(self.residualb_15, 128, "residualb_16") self.residualb_17 = self.residual_block(self.residualb_16, 128, "residualb_17") self.residualb_18 = self.residual_block(self.residualb_17, 128, "residualb_18") self.residualb_19 = self.residual_block(self.residualb_18, 128, "residualb_19") self.residualb_20 = self.residual_block(self.residualb_19, 128, "residualb_20") self.residualb_21 = self.residual_block(self.residualb_20, 128, "residualb_21") self.residualb_22 = self.residual_block(self.residualb_21, 128, "residualb_22") self.residualb_23 = self.residual_block(self.residualb_22, 128, "residualb_23") self.residualb_24 = self.residual_block_id(self.residualb_23, 256, "residualb_24") self.residualb_25 = self.residual_block(self.residualb_24, 256, "residualb_25") self.residualb_26 = self.residual_block(self.residualb_25, 256, "residualb_26") self.residualb_27 = self.residual_block(self.residualb_26, 256, "residualb_27") self.residualb_28 = self.residual_block(self.residualb_27, 256, "residualb_28") self.residualb_29 = self.residual_block(self.residualb_28, 256, "residualb_29") self.residualb_30 = self.residual_block(self.residualb_29, 256, "residualb_30") self.residualb_31 = self.residual_block(self.residualb_30, 256, "residualb_31") self.max_pool_1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(self.residualb_31) self.dconv_1 = DepthwiseConv2D(kernel_size=(3, 3), strides=(2, 2), padding='same', name="dconv_1")(self.residualb_31) self.conv_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=(1, 1), padding='valid', name="conv_2")(self.dconv_1) self.add_1 = Add()([self.conv_2, self.max_pool_1]) self.relu_2 = ReLU()(self.add_1) self.residualb_32 = self.residual_block(self.relu_2, 256, "residualb_32") self.residualb_33 = self.residual_block(self.residualb_32, 256, "residualb_33") self.residualb_34 = self.residual_block(self.residualb_33, 256, "residualb_34") self.residualb_35 = self.residual_block(self.residualb_34, 256, "residualb_35") self.residualb_36 = self.residual_block(self.residualb_35, 256, "residualb_36") self.residualb_37 = self.residual_block(self.residualb_36, 256, "residualb_37") #split key self.residualb_38 = self.residual_block(self.residualb_37, 256, "residualb_38") #BRANCH 1 self.conv_transpose_1 = Convolution2DTranspose(filters=256, kernel_size=(2, 2), strides=(2, 2), name="convt_1")( self.residualb_38) self.relu_3 = ReLU()(self.conv_transpose_1) self.add_2 = Add()([self.residualb_31, self.relu_3]) #split key self.residualb_39 = self.residual_block(self.add_2, 256, "residualb_39") self.conv_transpose_2 = Convolution2DTranspose(filters=128, kernel_size=(2, 2), strides=(2, 2), name="convt_2")( self.residualb_39) self.relu_4 = ReLU()(self.conv_transpose_2) self.add_3 = Add()([self.residualb_23, self.relu_4]) #split key self.residualb_40 = self.residual_block(self.add_3, 128, "residualb_40") # output block 1 self.conv_3 = Conv2D(filters=2, kernel_size=(1, 1), strides=(1, 1), padding='same', name="conv_3")(self.residualb_40) #self.reshape_1 = tf.reshape(self.conv_3,[1,2048,1]) self.reshape_1 = Reshape([-1, 1])(self.conv_3) self.conv_4 = Conv2D(filters=2, kernel_size=(1, 1), strides=(1, 1), name="conv_4")(self.residualb_39) #self.reshape_2 = tf.reshape(self.conv_4,[1,512,1]) self.reshape_2 = Reshape([-1, 1])(self.conv_4) self.conv_5 = Conv2D(filters=6, kernel_size=(1, 1), strides=(1, 1), name="conv_5")(self.residualb_38) #self.reshape_3 = tf.reshape(self.conv_5, [1,384,1]) self.reshape_3 = Reshape([-1, 1])(self.conv_5) self.concat_1 = Concatenate(axis=1)( [self.reshape_1, self.reshape_2, self.reshape_3]) #output block 2 self.conv_6 = Conv2D(filters=36, kernel_size=(1, 1), strides=(1, 1), padding='same', name="conv_6")(self.residualb_40) #self.reshape_4 = tf.reshape(self.conv_6,[1,2048,18]) self.reshape_4 = Reshape([-1, 18])(self.conv_6) self.conv_7 = Conv2D(filters=36, kernel_size=(1, 1), strides=(1, 1), padding='same', name="conv_7")(self.residualb_39) #self.reshape_5 = tf.reshape(self.conv_7,[1,512,18]) self.reshape_5 = Reshape([-1, 18])(self.conv_7) self.conv_8 = Conv2D(filters=108, kernel_size=(1, 1), strides=(1, 1), name="conv_8")(self.residualb_38) #self.reshape_6 = tf.reshape(self.conv_8,[1,384,18]) self.reshape_6 = Reshape([-1, 18])(self.conv_8) self.concat_2 = Concatenate(axis=1)( [self.reshape_4, self.reshape_5, self.reshape_6]) self.init_model() return self.concat_1, self.concat_2