Beispiel #1
0
def sub_net_1(input):
    with scope("conv1_sub1"):
        tmp = conv(input, 32, 3, 2, padding=1)
        tmp = bn(tmp, act='relu')
    with scope("conv2_sub1"):
        tmp = conv(tmp, 32, 3, 2, padding=1)
        tmp = bn(tmp, act='relu')
    with scope("conv3_sub1"):
        tmp = conv(tmp, 64, 3, 2, padding=1)
        tmp = bn(tmp, act='relu')
    with scope("conv3_sub1_proj"):
        tmp = conv(tmp, 128, 1, 1)
        tmp = bn(tmp)
    return tmp
Beispiel #2
0
def psp_module(input, out_features):

    cat_layers = []
    sizes = (1, 2, 3, 6)
    for size in sizes:
        psp_name = "psp" + str(size)
        with scope(psp_name):
            pool = fluid.layers.adaptive_pool2d(input,
                                                pool_size=[size, size],
                                                pool_type='avg',
                                                name=psp_name + '_adapool')
            data = conv(pool,
                        out_features,
                        filter_size=1,
                        bias_attr=False,
                        name=psp_name + '_conv')
            data_bn = bn(data, act='relu')
            interp = fluid.layers.resize_bilinear(data_bn,
                                                  out_shape=input.shape[2:],
                                                  name=psp_name + '_interp',
                                                  align_mode=0)
        cat_layers.append(interp)
    cat_layers = [input] + cat_layers
    out = fluid.layers.concat(cat_layers, axis=1, name='psp_cat')

    return out
Beispiel #3
0
def sub_net_4(input, input_shape):
    tmp = pyramis_pooling(input, input_shape)
    with scope("conv5_4_k1"):
        tmp = conv(tmp, 256, 1, 1)
        tmp = bn(tmp, act='relu')
    tmp = interp(tmp, out_shape=np.ceil(input_shape / 16))
    return tmp
Beispiel #4
0
def CCF24(sub2_out, sub4_out, input_shape):
    with scope("conv_sub4"):
        tmp = conv(sub4_out, 128, 3, dilation=2, padding=2)
        tmp = bn(tmp)
    tmp = tmp + sub2_out
    tmp = fluid.layers.relu(tmp)
    tmp = interp(tmp, np.ceil(input_shape / 8))
    return tmp
Beispiel #5
0
def CCF124(sub1_out, sub24_out, input_shape):
    tmp = zero_padding(sub24_out, padding=2)
    with scope("conv_sub2"):
        tmp = conv(tmp, 128, 3, dilation=2)
        tmp = bn(tmp)
    tmp = tmp + sub1_out
    tmp = fluid.layers.relu(tmp)
    tmp = interp(tmp, input_shape // 4)
    return tmp
Beispiel #6
0
    def net(self, higher_res_feature, lower_res_feature):
        h, w = higher_res_feature.shape[2:]
        lower_res_feature = fluid.layers.resize_bilinear(lower_res_feature,
                                                         [h, w],
                                                         align_mode=0)

        with scope('dwconv'):
            lower_res_feature = relu(
                bn(conv(lower_res_feature, self.out_channels,
                        1)))  #(lower_res_feature)
        with scope('conv_lower_res'):
            lower_res_feature = bn(
                conv(lower_res_feature, self.out_channels, 1, bias_attr=True))
        with scope('conv_higher_res'):
            higher_res_feature = bn(
                conv(higher_res_feature, self.out_channels, 1, bias_attr=True))
        out = higher_res_feature + lower_res_feature

        return relu(out)
Beispiel #7
0
 def xception_block(self,
                    input,
                    channels,
                    strides=1,
                    filters=3,
                    dilation=1,
                    skip_conv=True,
                    has_skip=True,
                    activation_fn_in_separable_conv=False):
     repeat_number = 3
     channels = check_data(channels, repeat_number)
     filters = check_data(filters, repeat_number)
     strides = check_data(strides, repeat_number)
     data = input
     results = []
     for i in range(repeat_number):
         with scope('separable_conv' + str(i + 1)):
             if not activation_fn_in_separable_conv:
                 data = relu(data)
                 data = separate_conv(
                     data,
                     channels[i],
                     strides[i],
                     filters[i],
                     dilation=dilation)
             else:
                 data = separate_conv(
                     data,
                     channels[i],
                     strides[i],
                     filters[i],
                     dilation=dilation,
                     act=relu)
             results.append(data)
     if not has_skip:
         return data, results
     if skip_conv:
         param_attr = fluid.ParamAttr(
             name=name_scope + 'weights',
             regularizer=None,
             initializer=fluid.initializer.TruncatedNormal(
                 loc=0.0, scale=0.09))
         with scope('shortcut'):
             skip = bn(
                 conv(
                     input,
                     channels[-1],
                     1,
                     strides[-1],
                     groups=1,
                     padding=0,
                     param_attr=param_attr))
     else:
         skip = input
     return data + skip, results
Beispiel #8
0
def psp_module(input, out_features):
    # Pyramid Scene Parsing 金字塔池化模块
    # 输入:backbone输出的特征
    # 输出:对输入进行不同尺度pooling, 卷积操作后插值回原始尺寸,并concat
    #       最后进行一个卷积及BN操作

    cat_layers = []
    sizes = (1, 2, 3, 6)
    for size in sizes:
        psp_name = "psp" + str(size)
        with scope(psp_name):
            pool = fluid.layers.adaptive_pool2d(
                input,
                pool_size=[size, size],
                pool_type='avg',
                name=psp_name + '_adapool')
            data = conv(
                pool,
                out_features,
                filter_size=1,
                bias_attr=True,
                name=psp_name + '_conv')
            data_bn = bn(data, act='relu')
            interp = fluid.layers.resize_bilinear(
                data_bn, out_shape=input.shape[2:], name=psp_name + '_interp')
        cat_layers.append(interp)
    cat_layers = [input] + cat_layers[::-1]
    cat = fluid.layers.concat(cat_layers, axis=1, name='psp_cat')

    psp_end_name = "psp_end"
    with scope(psp_end_name):
        data = conv(
            cat,
            out_features,
            filter_size=3,
            padding=1,
            bias_attr=True,
            name=psp_end_name)
        out = bn(data, act='relu')

    return out
Beispiel #9
0
 def net(self, x):
     x, _ = inverted_blocks(x, self.in_channels, self.t,
                            self.block_channels[0], self.num_blocks[0], 2,
                            'inverted_block_1')
     x, _ = inverted_blocks(x, self.block_channels[0], self.t,
                            self.block_channels[1], self.num_blocks[1], 2,
                            'inverted_block_2')
     x, _ = inverted_blocks(x, self.block_channels[1], self.t,
                            self.block_channels[2], self.num_blocks[2], 1,
                            'inverted_block_3')
     x = psp_module(x, self.block_channels[2] // 4)
     with scope('out'):
         x = relu(bn(conv(x, self.out_channels, 1)))
     return x
Beispiel #10
0
def learning_to_downsample(x,
                           dw_channels1=32,
                           dw_channels2=48,
                           out_channels=64):
    x = relu(bn(conv(x, dw_channels1, 3, 2)))
    with scope('dsconv1'):
        x = separate_conv(x,
                          dw_channels2,
                          stride=2,
                          filter=3,
                          act=fluid.layers.relu)
    with scope('dsconv2'):
        x = separate_conv(x,
                          out_channels,
                          stride=2,
                          filter=3,
                          act=fluid.layers.relu)
    return x
Beispiel #11
0
def sub_net_2(input):
    with scope("conv3_1_sub2_proj"):
        tmp = conv(input, 128, 1, 1)
        tmp = bn(tmp)
    return tmp
Beispiel #12
0
def aux_layer(x, num_classes):
    x = relu(bn(conv(x, 32, 3, padding=1)))
    x = dropout2d(x, 0.1, is_train=(cfg.PHASE == 'train'))
    with scope('logit'):
        x = conv(x, num_classes, 1, bias_attr=True)
    return x