def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(True)
        ]

        #TODO: 这里加一个不带ReLU的有何意义
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
    def __init__(self, num_channels, num_filters):
        super(Encoder, self).__init__()
        #TODO: encoder contains:
        #       1 3x3conv + 1bn + relu +
        #       1 3x3conc + 1bn + relu +
        #       1 2x2 pool
        # return features before and after pool
        self.conv1 = Conv2D(num_channels,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn1 = BatchNorm(num_filters, act='relu')

        self.conv2 = Conv2D(num_filters,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn2 = BatchNorm(num_filters, act='relu')

        self.pool = Pool2D(pool_size=2,
                           pool_stride=2,
                           pool_type='max',
                           ceil_mode=True)
Beispiel #3
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(inplace=True)
        ]

        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Beispiel #4
0
    def __init__(self,
                 input_feature,
                 block,
                 num_blocks,
                 intermediate_feature=64,
                 dense=True):
        super(MultipleBasicBlock, self).__init__()
        self.dense = dense
        self.num_block = num_blocks
        self.intermediate_feature = intermediate_feature

        param_attr = fluid.ParamAttr(
            initializer=fluid.initializer.NormalInitializer(
                loc=0.0, scale=1.0, seed=0))

        self.block1 = Conv2D(input_feature,
                             intermediate_feature,
                             filter_size=7,
                             stride=1,
                             padding=3,
                             bias_attr=True,
                             param_attr=param_attr)

        dim = intermediate_feature
        self.block2 = block(dim, dim, dilation=1) if num_blocks >= 2 else None
        self.block3 = block(dim, dim, dilation=1) if num_blocks >= 3 else None
        self.block4 = block(dim, dim, dilation=1) if num_blocks >= 4 else None
        self.block5 = Conv2D(dim, 3, 3, 1, 1)
Beispiel #5
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=True),
            InstanceNorm(dim),
            ReLU(inplace=True)
        ]

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=True),
            InstanceNorm(dim)
        ]

        self.conv_block = fluid.dygraph.Sequential(*conv_block)
Beispiel #6
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            PRelu(mode="all")
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Beispiel #7
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   bias_attr=use_bias),
            Instancenorm(),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   bias_attr=use_bias),
            Instancenorm()
        ]

        self.conv_block = Sequential(*conv_block)
Beispiel #8
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [ReflectionPad2d([1,1,1,1]),
                # TODO 谱归一化
                 Conv2D(input_nc, ndf, filter_size=4, stride=2, padding=0, bias_attr=True),
                 LeakyReLU(0.2, True)]

        for i in range(1, n_layers - 2):
            mult = 2 ** (i - 1)
            model += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=2, padding=0, bias_attr=True),
                      LeakyReLU(0.2, True)]

        mult = 2 ** (n_layers - 2 - 1)
        model += [ReflectionPad2d([1,1,1,1]),
                  Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=1, padding=0, bias_attr=True),
                  LeakyReLU(0.2, True)]

        # Class Activation Map
        mult = 2 ** (n_layers - 2)
        self.gap_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ndf * mult * 2, ndf * mult, filter_size=1, stride=1, bias_attr=True)
        self.leaky_relu = LeakyReLU(0.2, True)

        self.pad = ReflectionPad2d([1,1,1,1])
        self.conv = Conv2D(ndf * mult, 1, filter_size=4, stride=1, padding=0, bias_attr=False)

        self.model = Sequential(*model)
Beispiel #9
0
    def __init__(self, num_classes=1):
        super(CNN_AllTricks, self).__init__()

        self.conv1 = Conv2D(3, 64, 3, padding=1, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 3, padding=1, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 3, padding=1, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 3, padding=1, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.block5 = ConcatConv((512, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.block6 = ConcatConv((1024, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.pool_global = Pool2D(pool_stride=1,
                                  global_pooling=True,
                                  pool_type='avg')
        self.fc = Linear(input_dim=1024, output_dim=num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.dropout = Dropout(p=0.5)
Beispiel #10
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 use_bn=True,
                 act='relu',
                 name=None):
        super(ConvBNLayer, self).__init__(name)

        self.use_bn = use_bn
        if use_bn:
            self.conv = Conv2D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size-1)//2,
                                groups=groups,
                                act=None,
                                bias_attr=None)
            self.bn = BatchNorm(num_filters, act=act)
        else:
            self.conv = Conv2D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size-1)//2,
                                groups=groups,
                                act=act,
                                bias_attr=None)
Beispiel #11
0
    def __init__(self, dim, use_bias=True):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Beispiel #12
0
    def __init__(self, dim, use_bias):
        super(ResnetAdaILNBlock, self).__init__()
        #self.pad1 = fluid.layers.pad2d()
        #        self.conv1 = Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)
        #        self.norm1 = adaILN(dim)
        #self.pad2 = fluid.layers.pad2d()
        #        self.conv2 = Conv2D(dim, dim, filter_size=3, stride=1, padding=1, bias_attr=use_bias)
        #        self.norm2 = adaILN(dim)

        self.pad1 = ReflectionPad2d(1)
        self.conv1 = Conv2D(dim,
                            dim,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            bias_attr=use_bias)
        self.norm1 = adaILN(dim)
        self.relu1 = ReLU(False)

        self.pad2 = ReflectionPad2d(1)
        self.conv2 = Conv2D(dim,
                            dim,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            bias_attr=use_bias)
        self.norm2 = adaILN(dim)
Beispiel #13
0
 def __init__(self, num_channels, num_classes):
     super(DeepLabHead, self).__init__(
             ASPPModule(num_channels, 256, [12, 24, 36]),
             Conv2D(256, 256, 3, padding=1),
             BatchNorm(256, act='relu'),
             Conv2D(256, num_classes, 1)
             )
Beispiel #14
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(False)
            #BatchNorm(dim,act='relu')
        ]

        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Beispiel #15
0
    def __init__(self, num_channels, num_filters):
        super(Decoder, self).__init__()
        # TODO: decoder contains:
        #       1 2x2 transpose conv (makes feature map 2x larger)
        #       1 3x3 conv + 1bn + 1relu +
        #       1 3x3 conv + 1bn + 1relu

        self.up = Conv2DTranspose(num_channels=num_channels,
                                  num_filters=num_filters,
                                  filter_size=2,
                                  stride=2)

        self.conv1 = Conv2D(num_channels,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn1 = BatchNorm(num_filters, act='relu')

        self.conv2 = Conv2D(num_filters,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn2 = BatchNorm(num_filters, act='relu')
Beispiel #16
0
    def __init__(self, name_scope):
        super(G, self).__init__(name_scope)
        name_scope = self.full_name()
        # My_G的代码
        """
        模型流程:2次全连接+1向上采样+1卷积+1向上采样+1卷积
        注意:除最后一次卷积运算后,其余的输出做一次归一化层;其余用 leaky_relu
        """
        self.fc1 = Linear(input_dim=100, output_dim=1024)
        self.bn1 = fluid.dygraph.BatchNorm(num_channels=1024, act='relu')

        self.fc2 = Linear(input_dim=1024, output_dim=128 * 8 * 8)
        self.bn2 = fluid.dygraph.BatchNorm(num_channels=128 * 8 * 8,
                                           act='relu')

        self.conv1 = Conv2D(num_channels=128,
                            num_filters=64,
                            filter_size=5,
                            padding=2)
        self.bn3 = fluid.dygraph.BatchNorm(num_channels=64, act='relu')

        self.conv2 = Conv2D(num_channels=64,
                            num_filters=3,
                            filter_size=5,
                            padding=2,
                            act='tanh')
    def __init__(self, num_classes=59, backbone='resnet50'):
        super(PSPNet, self).__init__()

        res = ResNet50(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = fluid.dygraph.Sequential(
            res.conv,
            res.pool2d_max
        )
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        num_channels = 2048
        # psp: 2048 -> 2048*2
        self.pspmodule = PSPModule(num_channels, [1, 2, 3, 6])
        num_channels *= 2

        # cls: 2048*2 -> 512 -> num_classes
        self.classifier = fluid.dygraph.Sequential(
            Conv2D(num_channels, num_filters=512, filter_size=3, padding=1),
            BatchNorm(512, act='relu'),
            Dropout(0.1),
            Conv2D(512, num_classes, filter_size=1)
        )
Beispiel #18
0
 def __init__(self, in_chs, inter_chs=None):
     super(PAM_module, self).__init__()
     self.in_chs = in_chs
     self.inter_chs = inter_chs if inter_chs else in_chs
     self.conv_query = Conv2D(self.in_chs, self.inter_chs, 1)
     self.conv_key = Conv2D(self.in_chs, self.inter_chs, 1)
     self.conv_value = Conv2D(self.in_chs, self.inter_chs, 1)
     self.gamma = create_parameter([1], dtype='float32')
Beispiel #19
0
    def _build_weights(self, dim_in, dim_out, style_dim=64):

        self.conv1 = Conv2D(dim_in, dim_out, 3, 1, 1)
        self.conv2 = Conv2D(dim_out, dim_out, 3, 1, 1)
        self.norm1 = AdaIN(style_dim, dim_in)
        self.norm2 = AdaIN(style_dim, dim_out)
        if self.learned_sc:
            self.conv1x1 = Conv2D(dim_in, dim_out, 1, 1, 0)
 def __init__(self):
     super(MyLeNet, self).__init__()
     self.hidden1_1 = Conv2D(1, 28, 5, 1)
     self.hidden1_2 = Pool2D(pool_size=2, pool_type='max', pool_stride=1)
     self.hidden2_1 = Conv2D(28, 32, 3, 1)
     self.hidden2_2 = Pool2D(pool_size=2, pool_type='max', pool_stride=1)
     self.hidden3 = Conv2D(32, 32, 3, 1)
     self.hidden4 = Linear(32 * 10 * 10, 65, act='softmax')
Beispiel #21
0
 def __init__(self):
     super(MyLeNet,self).__init__()
     self.hidden1_1 = Conv2D()
     self.hidden1_2 = Pool2D()
     self.hidden2_1 = Conv2D()
     self.hidden2_2 = Pool2D()
     self.hidden3 = Conv2D()
     self.hidden4 = Linear()
Beispiel #22
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(input_nc,
                                      num_filters=ndf,
                                      filter_size=4,
                                      stride=2,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2D(1),
                Spectralnorm(layer=Conv2D(ndf * mult,
                                          num_filters=ndf * mult * 2,
                                          filter_size=4,
                                          stride=2,
                                          bias_attr=True)),
                LeakyReLU(alpha=0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(ndf * mult,
                                      num_filters=ndf * mult * 2,
                                      filter_size=4,
                                      stride=1,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.gmp_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(ndf * mult * 2,
                              num_filters=ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = LeakyReLU(alpha=0.2)

        self.pad = ReflectionPad2D(1)

        self.conv = Spectralnorm(layer=Conv2D(ndf * mult,
                                              num_filters=1,
                                              filter_size=4,
                                              stride=1,
                                              bias_attr=False))

        self.model = Sequential(*model)
Beispiel #23
0
    def __init__(self, in_channels, out_channels, filter_size=1, stride=1, dilation=1, act=None):
        super(SeparateConvBN, self).__init__()

        self.conv = Conv2D(num_channels=in_channels, num_filters=in_channels,
            filter_size=filter_size, stride=stride, padding=(filter_size // 2) * dilation,
            groups=in_channels, dilation=dilation)
        self.pointwise = Conv2D(num_channels=in_channels, num_filters=out_channels,
            filter_size=1, stride=1, padding=0, groups=1, dilation=1)
        self.bn = BatchNorm(out_channels, act=act)
Beispiel #24
0
 def __init__(self):
     super(MyLeNet, self).__init__()
     self.c1 = Conv2D(3, 6, 5, 1)
     self.s2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c3 = Conv2D(6, 16, 5, 1)
     self.s4 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c5 = Conv2D(16, 120, 5, 1)
     self.f6 = Linear(120, 84, act='relu')
     self.f7 = Linear(84, 10, act='softmax')
Beispiel #25
0
 def __init__(self):
     super(Invertor,self).__init__()
     self.conv1 = Conv2D(num_channels=1,num_filters=64,filter_size=3,padding=1,stride=2,act='leaky_relu')
     self.conv2 = Conv2D(num_channels=64,num_filters=128,filter_size=3,padding=1,stride=2)
     self.bn2 = BatchNorm(num_channels=128,act='leaky_relu')
     self.conv3 = Conv2D(num_channels=128,num_filters=192,filter_size=3,padding=1,stride=2)
     self.bn3 = BatchNorm(num_channels=192,act='leaky_relu')
     self.conv4 = Conv2D(num_channels=192,num_filters=256,filter_size=3,padding=1,stride=2)
     self.bn4 = BatchNorm(num_channels=256,act='leaky_relu')
     self.fc = Linear(input_dim=1024,output_dim=64,act='tanh')
Beispiel #26
0
    def __init__(self, dim, use_bias):
        super(ResnetAdaILNBlock, self).__init__()
        self.pad1 = ReflectionPad2D(1)
        self.conv1 = Conv2D(num_channels=dim, num_filters=dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)
        self.norm1 = adaILN(dim)
        self.relu1 = Relu()

        self.pad2 = ReflectionPad2D(1)
        self.conv2 = Conv2D(num_channels=dim, num_filters=dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)
        self.norm2 = adaILN(dim)
Beispiel #27
0
    def __init__(self, dim, use_bias):
        super(ResnetAdaILNBlock, self).__init__()
        self.pad1 = ReflectionPad2d([1,1,1,1])
        self.conv1 = Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)
        self.norm1 = adaILN(dim)
        self.relu1 = ReLU(True)

        self.pad2 = ReflectionPad2d([1,1,1,1])
        self.conv2 = Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)
        self.norm2 = adaILN(dim)
Beispiel #28
0
    def __init__(self):
        super(GoogLeNet, self).__init__()
        # 1
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=64,
                            filter_size=7,
                            padding=3,
                            act='relu')
        self.pool1 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 2
        self.conv2_1 = Conv2D(num_channels=64,
                              num_filters=64,
                              filter_size=1,
                              act='relu')
        self.conv2_2 = Conv2D(num_channels=64,
                              num_filters=192,
                              filter_size=3,
                              padding=1,
                              act='relu')
        self.pool2 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 3
        self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
        self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 4
        self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
        self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
        self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
        self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
        self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 5
        self.block5_1 = Inception(832, 256, (160, 320), (32, 128), 128)
        self.block5_2 = Inception(832, 384, (192, 384), (48, 128), 128)
        self.pool5 = Pool2D(pool_stride=1,
                            global_pooling=True,
                            pool_type='avg')

        self.fc = Linear(input_dim=1024, output_dim=1, act=None)
Beispiel #29
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = Conv2D(inplanes, planes, filter_size=1, bias_attr=False)
     self.bn1 = BatchNorm(planes)
     self.conv2 = Conv2D(planes, planes, filter_size=3, stride=stride,
                            padding=1, bias_attr=False)
     self.bn2 = BatchNorm(planes)
     self.conv3 = Conv2D(planes, planes * self.expansion, filter_size=1, bias_attr=False)
     self.bn3 = BatchNorm(planes * self.expansion)
     self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Beispiel #30
0
class UNet(Layer):
    self.down1 = Encoder(num_channels = 3, num_filter=64)
    self.down2 = Encoder(num_channels = 64, num_filter=64)
    self.down3 = Encoder(num_channels = 128, num_filter=64)
    self.down4 = Encoder(num_channels = 256, num_filter=64)

    self.mid_conv1 = Conv2D(512, 1024, filter_size=1, paddle=0, stride=1)
    self.mid_bn1 = BatchNorm(1024, act="relu")
    self.mid_conv2 = Conv2D(1024, 1024, filter_size=1, paddle=0, stride=1)
    self.mid_bn2 = BatchNorm(1024, act="relu")

    self.down1 = Encoder(num_channels=3, num_filter=64)