コード例 #1
0
 def wrapper(x):
     if(not only_resize):
         # segmentation head
         x = Conv2dBn(
             channels * 8,
             kernel_size=3,
             activation='relu',
             kernel_initializer='he_uniform',
             padding='same',
             strides = 1,
             use_batchnorm=True,
             use_bias=False,
             name=name + 'seghead_con3_1',
             kernel_regularizer=l2(wd)
         )(x)
         
     x = Conv2dBn(
         classes,
         kernel_size=1,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=False,
         use_bias=False,
         name=name + 'seghead_conv_end',
         kernel_regularizer=l2(wd)
     )(x)
     
     # if(not self.scale == 1):
         # x = layers.UpSampling2D(
         #     size=(self.scale, self.scale),  interpolation='bilinear'
         # )(x)
     x = resize_bilinear(size=(h, w),name=name + 'resize_bil_end')(x)
     return x
コード例 #2
0
 def wrapper(input_tensor):
     x = backend.mean(input_tensor, axis=[1,2], keepdims=True) #layers.GlobalAveragePooling2D(axis=-1)(input_tensor)
     # print(1, np.shape(x))
     x = Conv2dBn(
         out_channels,
         kernel_size=1,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         name=name+"ce_block_cnn1_1",
         use_bias=False,
         kernel_regularizer=l2(wd)
         
     )(x)
     # print(2, np.shape(x))
     x = layers.Add()([x, input_tensor])
     # print(3, np.shape(x))
     x= Conv2dBn(
         out_channels,
         kernel_size=3,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=False,
         use_bias=False,
         name=name+"ge_block_cnn3_1",
         kernel_regularizer=l2(wd)
     )(x)
     # print(4, np.shape(x))
     return x
コード例 #3
0
ファイル: BisenetV3.py プロジェクト: ixtiyoruz/PSPnet
 def wrapper(input_tensor):
     x = Conv2dBn(out_channels,
                  kernel_size=3,
                  activation='relu',
                  kernel_initializer='he_uniform',
                  padding='same',
                  strides=1,
                  use_batchnorm=True,
                  use_bias=False,
                  name=name + "ge_block_cnn3_1",
                  kernel_regularizer=l2(wd))(input_tensor)
     x = layers.DepthwiseConv2D(3,
                                strides=(stride, stride),
                                padding='same',
                                depth_multiplier=e,
                                use_bias=False,
                                name=name + "geblock_dwconv3_1")(x)
     if (stride == 2):
         x = layers.DepthwiseConv2D(3,
                                    strides=(1, 1),
                                    padding='same',
                                    use_bias=False,
                                    name=name + "geblock_dwconv3_2",
                                    kernel_regularizer=l2(wd))(x)
     x = Conv2dBn(out_channels,
                  kernel_size=1,
                  activation=None,
                  kernel_initializer='he_uniform',
                  padding='same',
                  strides=1,
                  use_batchnorm=True,
                  use_bias=False,
                  name=name + "ge_block_cnn1_1",
                  kernel_regularizer=l2(wd))(x)
     if (stride == 2):
         x1 = layers.DepthwiseConv2D(
             3,
             strides=(stride, stride),
             padding='same',
             use_bias=False,
             name=name + "geblock_dwconv3_3_shortcut")(input_tensor)
         x1 = Conv2dBn(out_channels,
                       kernel_size=1,
                       activation=None,
                       kernel_initializer='he_uniform',
                       padding='same',
                       strides=1,
                       use_batchnorm=True,
                       name=name + "ge_block_cnn1_2_shortcut",
                       use_bias=False,
                       kernel_regularizer=l2(wd))(x1)
         x = layers.Add()([x, x1])
     else:
         x = layers.Add()([x, input_tensor])
     return x
コード例 #4
0
ファイル: BisenetV3.py プロジェクト: ixtiyoruz/PSPnet
    def wrapper(input_tensor):
        x = Conv2dBn(out_channels,
                     kernel_size=3,
                     activation='relu',
                     kernel_initializer='he_uniform',
                     padding='same',
                     strides=2,
                     use_batchnorm=True,
                     use_bias=False,
                     name="stem_block_cnn",
                     kernel_regularizer=l2(wd))(input_tensor)
        #----------------------------
        x1 = Conv2dBn(out_channels / 2,
                      kernel_size=1,
                      activation='relu',
                      kernel_initializer='he_uniform',
                      padding='same',
                      strides=1,
                      use_batchnorm=True,
                      use_bias=False,
                      name="stem_block_cnndown_cnn1",
                      kernel_regularizer=l2(wd))(x)
        x1 = Conv2dBn(out_channels,
                      kernel_size=3,
                      activation='relu',
                      kernel_initializer='he_uniform',
                      padding='same',
                      strides=2,
                      use_batchnorm=True,
                      use_bias=False,
                      name="stem_block_cnndown_cnn3",
                      kernel_regularizer=l2(wd))(x1)
        #---------------------------------
        #--------maxpooling--------------
        x2 = layers.MaxPool2D(pool_size=(3, 3),
                              strides=2,
                              padding='same',
                              name="stem_block_maxpool")(x)
        # x2, pooling_indices = tf.nn.max_pool_with_argmax(x1,
        #                 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")

        # concatenate
        x = layers.Concatenate(axis=-1)([x1, x2])
        x = Conv2dBn(out_channels,
                     kernel_size=1,
                     activation='relu',
                     kernel_initializer='he_uniform',
                     padding='same',
                     strides=1,
                     use_batchnorm=True,
                     use_bias=False,
                     name="stem_block_cnn_out",
                     kernel_regularizer=l2(wd))(x)
        return x
コード例 #5
0
 def wrapper(input_tensor):
     return Conv2dBn(
         filters,
         kernel_size=1,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         use_batchnorm=use_batchnorm,
         name=name,
     )(input_tensor)
コード例 #6
0
 def wrapper(detailed_in, semantic_in):
     #--------------preparing detailed_in----------------------------------
     _, w, h, _ = np.shape(detailed_in)
     x_d = layers.DepthwiseConv2D(
         3, strides=(1, 1), padding='same',depth_multiplier=1, use_bias=False, name=name+"agglayer_dwconv1_1"
     )(detailed_in)
     x_d = Conv2dBn(
         out_channels,
         kernel_size=1,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=False,
         name=name+"agglayer_cnn1_1",
         use_bias=False,
         kernel_regularizer=l2(wd)
     )(x_d)
     # now prepare detailed in to be the mixed with semantic
     x_d2s= Conv2dBn(
         out_channels,
         kernel_size=3,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 2,
         use_batchnorm=True,
         use_bias=False,
         name=name+"agglayer_cnn3_1",
         kernel_regularizer=l2(wd)
     )(detailed_in)
     x_d2s = layers.AveragePooling2D(
         pool_size=(3, 3), strides=(2,2), padding='same'
     )(x_d2s)
     #------------------------prepare semantic in--------------------------
     x_s = layers.DepthwiseConv2D(
         3, strides=(1, 1), padding='same',depth_multiplier=1, use_bias=False, name=name+"agglayer_dwconv1_2"
     )(semantic_in)
     # print(5, np.shape(x_s))
     x_s = Conv2dBn(
         out_channels,
         kernel_size=1,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=False,
         name=name+"agglayer_cnn1_2",
         use_bias=False,
         kernel_regularizer=l2(wd)
     )(x_s)
     #now prepare semantic in to be mixed with detailed
     x_s2d = Conv2dBn(
         out_channels,
         kernel_size=3,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name=name+"agglayer_cnn3_2",
         kernel_regularizer=l2(wd)
     )(semantic_in)
     x_s2d = resize_bilinear(size=(w,h),name=name+'resize_bil_1')(x_s2d)
     # x_s2d = layers.UpSampling2D(
         # size=(4, 4),  interpolation='bilinear'
     # )(x_s2d)
     # mixing-----------------------------------------------
     x_d = layers.Multiply()([x_d , x_s2d])
     x_s = layers.Multiply()([x_s , x_d2s])
     # x_s = layers.UpSampling2D(
         # size=(4, 4),  interpolation='bilinear'
     # )(x_s)
     x_s = resize_bilinear(size=(w,h),name=name+'resize_bil_1')(x_s)
     x = layers.Add()([x_s, x_d])
     
     x = Conv2dBn(
         out_channels,
         kernel_size=3,
         activation=None,
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name=name+"agglayer_output",
         kernel_regularizer=l2(wd)
     )(x)
     return x
コード例 #7
0
 def wrapper(input_tensor):
     x= Conv2dBn(
         64,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 2,
         use_batchnorm=True, 
         use_bias=False,
         name="detail_branch_cnn_1_1",
         kernel_regularizer=l2(wd),
     )(input_tensor)
     
     x= Conv2dBn(
         64,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_1_2",
         kernel_regularizer=l2(wd)
     )(x)
     #---------------------------------------------------------------
     x= Conv2dBn(
         64,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 2,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_2_1",
         kernel_regularizer=l2(wd)
     )(x)
     
     x= Conv2dBn(
         64,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_2_2",
         kernel_regularizer=l2(wd)
     )(x)
     x= Conv2dBn(
         64,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_2_3",
         kernel_regularizer=l2(wd)
     )(x)
     #---------------------------------------------------------------
     x= Conv2dBn(
         128,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 2,
         use_batchnorm=True,
         name="detail_branch_cnn_3_1",
     )(x)
     
     x= Conv2dBn(
         128,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_3_2",
         kernel_regularizer=l2(wd)
     )(x)
     x= Conv2dBn(
         128,
         kernel_size=3,
         activation='relu',
         kernel_initializer='he_uniform',
         padding='same',
         strides = 1,
         use_batchnorm=True,
         use_bias=False,
         name="detail_branch_cnn_3_3",
         kernel_regularizer=l2(wd)
     )(x)
     # x = layers.Dropout(0.3)(x)
     return x