示例#1
0
 def upproject(tensor, filters, name, concat_with):
     up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor)
     up_i = Concatenate(name=name+'_concat')([up_i, base_model.get_layer(concat_with).output]) # Skip connection
     up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
     up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
     up_i = LeakyReLU(alpha=0.2)(up_i)
     return up_i
示例#2
0
 def upproject(tensor, filters, name, concat_with):
     up_i = BilinearUpSampling2D((2, 2),
                                 name=name + '_upsampling2d')(tensor)
     up_i = Concatenate(name=name + '_concat')(
         [up_i,
          base_model.get_layer(concat_with).output])  # Skip Connection
     up_i = SeparableConv2D(filters=filters,
                            kernel_size=3,
                            strides=1,
                            padding='same',
                            name=name + '_convA')(
                                up_i)  # Separable Convolution
     up_i = BatchNormalization(
         axis=-1,
         momentum=0.99,
         epsilon=0.001,
         center=True,
         scale=True,
         beta_initializer="zeros",
         gamma_initializer="ones")(
             up_i)  # Batch Normalization to Avoid Overfitting
     up_i = LeakyReLU(alpha=0.2)(
         up_i)  # Leaky version of a Rectified Linear Unit
     up_i = SeparableConv2D(filters=filters,
                            kernel_size=3,
                            strides=1,
                            padding='same',
                            name=name + '_convB')(
                                up_i)  # Separable Convolution
     up_i = LeakyReLU(alpha=0.2)(up_i)
     return up_i
示例#3
0
    def out_block(self, y, aux_1, aux_2):
        if self.training_phase:
            out = Conv2D(self.n_classes, 1, activation='softmax', name='out')(y)  # conv6_cls
            aux_1 = Conv2D(self.n_classes, 1, activation='softmax', name='sub4_out')(aux_1)
            aux_2 = Conv2D(self.n_classes, 1, activation='softmax', name='sub24_out')(aux_2)

            return [out, aux_2, aux_1]
        else:
            out = Conv2D(self.n_classes, 1, activation='softmax', name='out')(y)  # conv6_cls
            out = BilinearUpSampling2D(size=(4, 4), name='out_full')(out)

            return [out]
示例#4
0
    def _create_model(self):
        inp = Input(shape=self.target_size + (3,))
        x = inp

        # (1/2)
        branch_half = self.branch_half(self.input_shape)
        z = branch_half(x)

        # (1/4)
        branch_quarter = self.branch_quarter(branch_half.output_shape[1:])
        y = branch_quarter(z)

        pyramid_block = self.pyramid_block(branch_quarter.output_shape[1:])
        aux_1 = pyramid_block(y)

        y = ZeroPadding2D(padding=2, name='padding17')(aux_1)
        y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)
        y = BatchNormalization(name='conv_sub4_bn')(y)
        y_ = Conv2D(128, 1, name='conv3_1_sub2_proj')(z)
        y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_)

        y = Add(name='sub24_sum')([y, y_])
        y = Activation('relu', name='sub24_sum/relu')(y)

        aux_2 = BilinearUpSampling2D(name='sub24_sum_interp')(y)
        y = ZeroPadding2D(padding=2, name='padding18')(aux_2)
        y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)
        y_ = BatchNormalization(name='conv_sub2_bn')(y_)

        # (1)
        block_0 = self.block_0(self.input_shape)
        y = block_0(x)

        y = Add(name='sub12_sum')([y, y_])
        y = Activation('relu', name='sub12_sum/relu')(y)
        y = BilinearUpSampling2D(name='sub12_sum_interp')(y)

        outputs = self.out_block(y, aux_1, aux_2)

        return Model(inputs=inp, outputs=outputs)
示例#5
0
    def _create_model(self):
        img_old = Input(shape=self.target_size + (3, ), name='data_old')
        img_new = Input(shape=self.target_size + (3, ), name='data_new')
        flo = Input(shape=self.target_size + (2, ), name='data_flow')

        all_inputs = [img_old, img_new, flo]
        transformed_flow = flow_cnn(self.target_size)(all_inputs)

        # -------- OLD FRAME BRANCH
        self.old_b00, self.old_b01, self.old_b03, self.old_b05, self.old_b11, self.old_b13 = self.frame_branch(
            img_old, prefix='old_')

        # -------- ACTUAL FRAME BRANCH
        self.b00, self.b01, self.b03, self.b05, self.b11, self.b13 = self.frame_branch(
            img_new)

        # -------- WARPING
        if -1 in self.warp_decoder:
            self.warped_inp = Warp(name="warp_inp")(
                [img_old, transformed_flow])
        if 0 in self.warp_decoder:
            self.warped0 = Warp(name="warp0")([self.old_b00, transformed_flow])
        if 1 in self.warp_decoder:
            self.warped1 = Warp(name="warp1")([self.old_b01, transformed_flow])
        if 2 in self.warp_decoder:
            self.warped2 = Warp(name="warp2")([self.old_b03, transformed_flow])
        if 3 in self.warp_decoder:
            self.warped3 = Warp(name="warp3")([self.old_b05, transformed_flow])
        if 4 in self.warp_decoder:
            self.warped4 = Warp(name="warp4")([self.old_b11, transformed_flow])
        if 5 in self.warp_decoder:
            self.warped5 = Warp(name="warp5")([self.old_b13, transformed_flow])

        # -------- DECODER
        x = self.decoder()

        x = Conv2D(self.n_classes, (1, 1),
                   kernel_initializer='he_normal',
                   activation='linear')(x)
        x = BilinearUpSampling2D()(x)
        x = Activation('softmax')(x)

        return Model(all_inputs, x)
示例#6
0
 def upsample2d(tensor, filters, name, concat_with):
     upsampled_layer = BilinearUpSampling2D(
         (2, 2), name=name + '_upsampling2d')(tensor)
     # Concatenated skip connection. There are two skip conns: summation and concatenation. You know the difference.
     upsampled_layer = Concatenate(name=name + '_concat')(
         [upsampled_layer,
          base_model.get_layer(concat_with).output])
     upsampled_layer = Conv2D(filters=filters,
                              kernel_size=3,
                              strides=1,
                              padding='same',
                              name=name + '_conv2A')(upsampled_layer)
     upsampled_layer = LeakyReLU(alpha=0.2)(upsampled_layer)
     upsampled_layer = Conv2D(filters=filters,
                              kernel_size=3,
                              strides=1,
                              padding='same',
                              name=name + '_conv2B')(upsampled_layer)
     upsampled_layer = LeakyReLU(alpha=0.2)(upsampled_layer)
     return upsampled_layer
示例#7
0
    def pyramid_block(self, input_shape, prefix=''):
        input = Input(input_shape)
        h, w = input.shape[1:3].as_list()
        pool1 = AveragePooling2D(pool_size=(h, w), strides=(h, w), name=prefix + 'conv5_3_pool1')(input)
        pool1 = ResizeBilinear(out_size=(h, w), name=prefix + 'conv5_3_pool1_interp')(pool1)

        pool2 = AveragePooling2D(pool_size=(h / 2, w / 2), strides=(h // 2, w // 2), name=prefix + 'conv5_3_pool2')(input)
        pool2 = ResizeBilinear(out_size=(h, w), name=prefix + 'conv5_3_pool2_interp')(pool2)

        pool3 = AveragePooling2D(pool_size=(h / 3, w / 3), strides=(h // 3, w // 3), name=prefix + 'conv5_3_pool3')(input)
        pool3 = ResizeBilinear(out_size=(h, w), name=prefix + 'conv5_3_pool3_interp')(pool3)

        pool6 = AveragePooling2D(pool_size=(h / 4, w / 4), strides=(h // 4, w // 4), name=prefix + 'conv5_3_pool6')(input)
        pool6 = ResizeBilinear(out_size=(h, w), name=prefix + 'conv5_3_pool6_interp')(pool6)

        y = Add(name=prefix + 'conv5_3_sum')([input, pool1, pool2, pool3, pool6])
        y = Conv2D(256, 1, activation='relu', name=prefix + 'conv5_4_k1')(y)
        y = BatchNormalization(name=prefix + 'conv5_4_k1_bn')(y)

        aux_1 = BilinearUpSampling2D(name=prefix + 'conv5_4_interp')(y)
        return Model(input, aux_1, name='pyramid_block')
示例#8
0
    def _create_model(self):
        img_old = Input(shape=self.input_shape, name='data_old')
        img_new = Input(shape=self.input_shape, name='data_new')
        flo = Input(shape=self.target_size + (2, ), name='data_flow')

        all_inputs = [img_old, img_new, flo]

        transformed_flow = flow_cnn(self.target_size)(all_inputs)

        x = img_new
        x_old = img_old

        # (1/2)
        branch_half = self.branch_half(self.input_shape)
        z = branch_half(x)
        z_old = branch_half(x_old)
        branch_half_out = z

        # (1/4)
        branch_quarter = self.branch_quarter(branch_half.output_shape[1:])
        y = branch_quarter(z)
        branch_quarter_out = y

        if 2 in self.warp_decoder:
            if self.training_phase:
                y_old = branch_quarter(z_old)
            else:
                input_branch_quarter = Input(branch_quarter.output_shape[1:],
                                             name='prev_branch_14')
                y_old = input_branch_quarter
            y = netwarp(y_old, y, transformed_flow)

        pyramid_block = self.pyramid_block(branch_quarter.output_shape[1:])
        aux_1 = pyramid_block(y)

        y = ZeroPadding2D(padding=2, name='padding17')(aux_1)
        y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)
        y = BatchNormalization(name='conv_sub4_bn')(y)

        conv3_1_sub2_proj = Conv2D(128, 1, name='conv3_1_sub2_proj')
        conv3_1_sub2_proj_bn = BatchNormalization(name='conv3_1_sub2_proj_bn')

        y_ = conv3_1_sub2_proj_bn(conv3_1_sub2_proj(z))

        if 1 in self.warp_decoder:
            if self.training_phase:
                y_old_ = conv3_1_sub2_proj_bn(conv3_1_sub2_proj(z_old))
            else:
                input_branch_half = Input(
                    conv3_1_sub2_proj_bn.output_shape[1:],
                    name='prev_conv3_1_sub2_proj_bn')
                y_old_ = input_branch_half

            y_ = netwarp(y_old_, y_, transformed_flow)

        y = Add(name='sub24_sum')([y, y_])
        y = Activation('relu', name='sub24_sum/relu')(y)

        aux_2 = BilinearUpSampling2D(name='sub24_sum_interp')(y)
        y = ZeroPadding2D(padding=2, name='padding18')(aux_2)
        y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)
        y_ = BatchNormalization(name='conv_sub2_bn')(y_)

        # (1)
        block_0 = self.block_0(self.input_shape)
        y = block_0(x)
        branch_full_out = y

        if 0 in self.warp_decoder:
            if self.training_phase:
                y_old = block_0(x_old)
            else:
                input_branch_full = Input(block_0.output_shape[1:],
                                          name='prev_branch_1')
                y_old = input_branch_full

            y = netwarp(y_old, y, transformed_flow)

        y = Add(name='sub12_sum')([y, y_])
        y = Activation('relu', name='sub12_sum/relu')(y)
        y = BilinearUpSampling2D(name='sub12_sum_interp')(y)

        outputs = self.out_block(y, aux_1, aux_2)

        if not self.training_phase:
            if 0 in self.warp_decoder:
                all_inputs.append(input_branch_full)
                outputs.append(branch_full_out)
            if 1 in self.warp_decoder:
                all_inputs.append(input_branch_half)
                outputs.append(branch_half_out)
            if 2 in self.warp_decoder:
                all_inputs.append(input_branch_quarter)
                outputs.append(branch_quarter_out)

        return Model(inputs=all_inputs, outputs=outputs)
示例#9
0
def BisenetV2(include_top=True,
              input_tensor=None,
              input_shape=(224, 224, 3),
              weights=None
              ):
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only tensorflow supported for now')
    name = "bisenetv2"
    input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=28, require_flatten=include_top,
                                      data_format=K.image_data_format())
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    img_input1 = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False, name="stem_block/conv_block_1")(img_input)
    img_input1 = BatchNormalization(axis=-1, name="stem_block/conv_block_1/bn_1")(img_input1)
    img_input1 = Activation(activation="relu", name="stem_block/conv_block_1/activate_1")(img_input1)

    branch_left_output = Conv2D(int(16/2), kernel_size=(1, 1), strides=1, padding="same", use_bias=False, name="stem_block/downsample_branch_left/1x1_conv_block")(img_input1)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/1x1_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/1x1_conv_block/activate_1")(branch_left_output)


    branch_left_output = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False,
                                name="stem_block/downsample_branch_left/3x3_conv_block")(branch_left_output)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/3x3_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/3x3_conv_block/activate_1")(branch_left_output)


    branch_right_output = MaxPool2D(pool_size=(3, 3), strides=2, padding='same', name="stem_block/downsample_branch_right/maxpooling_block")(img_input1)
    stem_result = Concatenate(axis=-1, name="stem_block/concate_features")([branch_left_output, branch_right_output])
    stem_result = Conv2D(16, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="stem_block/final_conv_block")(stem_result)
    stem_result = BatchNormalization(axis=-1, name="stem_block/final_conv_block/bn_1")(stem_result)
    stem_result = Activation(activation="relu", name="stem_block/final_conv_block/activate_1")(stem_result)

    # k_reduce_mean = Lambda(lambda x: tf.reduce_mean(x, axis=[1, 2], keepdims=True, name='global_avg_pooling'))
    # embedding_result=k_reduce_mean(stem_result)
    # embedding_result = K.mean(stem_result, axis=[1, 2], keepdims=True)
    embedding_result = KerasReduceMean(axis=(1, 2), keep_dim=True, name="global_avg_pooling")(stem_result)

    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/bn")(embedding_result)
    output_channels = stem_result.get_shape().as_list()[-1]
    embedding_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                              name="context_embedding_block/conv_block_1")(embedding_result)
    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/conv_block_1/bn_1")(embedding_result)
    embedding_result = Activation(activation="relu", name="context_embedding_block/conv_block_1/activate_1")(embedding_result)
    embedding_result = Add(name="context_embedding_block/fused_features")([embedding_result, stem_result])
    embedding_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="context_embedding_block/final_conv_block")(embedding_result)


    output_channels = embedding_result.get_shape().as_list()[-1]
    gather_expansion_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block")(embedding_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/bn_1")(gather_expansion_result)
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/activate_1")(gather_expansion_result)

    gather_expansion_result = DepthwiseConv2D(kernel_size=3, strides=1, depth_multiplier=6, padding='same',
                                              name="ge_block_with_stride_1/stride_equal_one_module/depthwise_conv_block")(gather_expansion_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/dw_bn")(gather_expansion_result)

    gather_expansion_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/1x1_conv_block")(gather_expansion_result)
    gather_expansion_result = Add(name="ge_block_with_stride_1/stride_equal_one_module/fused_features")([embedding_result, gather_expansion_result])
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/ge_output")(gather_expansion_result)

    gather_expansion_proj_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_result)
    gather_expansion_proj_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/input_project_bn")(gather_expansion_proj_result)
    gather_expansion_proj_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj_result)
    input_tensor_channels = gather_expansion_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1, padding="same",
                                             use_bias=False, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block")(gather_expansion_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result)

    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result)
    gather_expansion_total_result = Add(name="ge_block_with_stride_2/stride_equal_two_module/fused_features")([gather_expansion_proj_result, gather_expansion_stride2_result])
    gather_expansion_total_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/ge_output")(gather_expansion_total_result)


    gather_expansion_proj2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_total_result)
    gather_expansion_proj2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_bn")(gather_expansion_proj2_result)
    gather_expansion_proj2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj2_result)
    input_tensor_channels = gather_expansion_total_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result_repeat = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1,  padding="same",
                                             use_bias=False, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block")(gather_expansion_total_result)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result_repeat)

    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2_repeat/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result_repeat)
    gather_expansion_total_result_repeat = Add(name="ge_block_with_stride_2_repeat/stride_equal_two_module/fused_features")([gather_expansion_proj2_result, gather_expansion_stride2_result_repeat])
    gather_expansion_total_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/ge_output")(gather_expansion_total_result_repeat)

    detail_input_tensor = stem_result
    semantic_input_tensor = gather_expansion_total_result_repeat
    output_channels = stem_result.get_shape().as_list()[-1]
    detail_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                           name="guided_aggregation_block/detail_branch/3x3_dw_conv_block")(detail_input_tensor)
    detail_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/detail_branch/bn_1")(detail_branch_remain)
    detail_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), padding="same", strides=1, use_bias=False,
                                  name="guided_aggregation_block/detail_branch/1x1_conv_block")(detail_branch_remain)

    detail_branch_downsample = Conv2D(output_channels, kernel_size=(3, 3), strides=2, use_bias=False, activation=None,
                                      padding="same", name="guided_aggregation_block/detail_branch/3x3_conv_block")(detail_input_tensor)

    detail_branch_downsample = AveragePooling2D(pool_size=(3, 3), strides=2, padding="same", name="guided_aggregation_block/detail_branch/avg_pooling_block")(detail_branch_downsample)

    semantic_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                             name="guided_aggregation_block/semantic_branch/3x3_dw_conv_block")(semantic_input_tensor)
    semantic_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/semantic_branch/bn_1")(semantic_branch_remain)
    semantic_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), strides=1, use_bias=False, activation=None, padding="same",
                                    name="guided_aggregation_block/semantic_branch/1x1_conv_block")(semantic_branch_remain)
    # semantic_branch_remain = sigmoid(semantic_branch_remain)
    # keras_sigmoid = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid"))
    # semantic_branch_remain = keras_sigmoid(semantic_branch_remain)
    semantic_branch_remain = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid")(semantic_branch_remain)

    semantic_branch_upsample = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                      activation=None, name="guided_aggregation_block/semantic_branch/3x3_conv_block")(semantic_input_tensor)
    # semantic_branch_upsample = resize_images(semantic_branch_upsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    # upsample_bilinear0 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                                               name="guided_aggregation_block/semantic_branch/semantic_upsample_features"))
    # semantic_branch_upsample = upsample_bilinear0(semantic_branch_upsample)
    semantic_branch_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/semantic_branch/semantic_upsample_features")(semantic_branch_upsample)
    semantic_branch_upsample = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid")(semantic_branch_upsample)
    # keras_sigmoid_1 = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid"))
    # semantic_branch_upsample = keras_sigmoid_1(semantic_branch_upsample)
    # semantic_branch_upsample = sigmoid(semantic_branch_upsample)

    guided_features_remain = Multiply(name="guided_aggregation_block/aggregation_features/guided_detail_features")([detail_branch_remain, semantic_branch_upsample])
    guided_features_downsample = Multiply(name="guided_aggregation_block/aggregation_features/guided_semantic_features")([detail_branch_downsample, semantic_branch_remain])

    # upsample_bilinear1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                        name="guided_aggregation_block/aggregation_features/guided_upsample_features"))
    #
    # guided_features_upsample = upsample_bilinear1(guided_features_downsample)
    guided_features_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/aggregation_features/guided_upsample_features")(guided_features_downsample)
    # guided_features_upsample = resize_images(guided_features_downsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    guided_features = Add(name="guided_aggregation_block/aggregation_features/fused_features")([guided_features_remain, guided_features_upsample])
    guided_features = Conv2D(output_channels, kernel_size=(3, 3), strides=1, use_bias=False, padding="same",
                             name="guided_aggregation_block/aggregation_features/aggregation_feature_output")(guided_features)
    guided_features = BatchNormalization(axis=-1, name="guided_aggregation_block/aggregation_features/aggregation_feature_output/bn_1")(guided_features)
    guided_features = Activation(activation="relu", name="guided_aggregation_block/aggregation_features/aggregation_feature_output/activate_1")(guided_features)

    # input_tensor_size = [int(tmp * 4)for tmp in guided_features.get_shape().as_list()[1:3]]
    result = Conv2D(8, kernel_size=(3, 3), strides=1, use_bias=False, padding="same", name="seg_head_block/3x3_conv_block")(guided_features)
    result = BatchNormalization(axis=-1, name="seg_head_block/bn_1")(result)
    result = Activation("relu", name="seg_head_block/activate_1")(result)

    # upsample_bilinear2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=input_tensor_size, name="seg_head_block/segmentation_head_logits"))
    # result = upsample_bilinear2(result)
    result = BilinearUpSampling2D((4, 4), name="seg_head_block/segmentation_head_upsample")(result)
    # result = resize_images(result, 4, 4, data_format="channels_last", interpolation='bilinear')

    result = Conv2D(1, kernel_size=(1, 1), strides=1, use_bias=False, padding="same",
                    name="seg_head_block/1x1_conv_block")(result)
    if input_tensor:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, result, name=name)

    if weights:
        model.load_weights(weights, by_name=True)

    return model