Esempio n. 1
0
    def __init__(self):
        super(M_net, self).__init__()
        self.scale1 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)
        self.scale2 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)
        self.scale3 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)

        self.block1_conv1 = Conv_Block(3, 32, 3, 1)
        self.block1_conv2 = Conv_Block(32, 32, 3, 1)
        self.block1_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block2_input1 = Conv_Block(3, 64, 3, 1)
        self.block2_conv1 = Conv_Block(96, 64, 3, 1)
        self.block2_conv2 = Conv_Block(64, 64, 3, 1)
        self.block2_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block3_input1 = Conv_Block(3, 128, 3, 1)
        self.block3_conv1 = Conv_Block(192, 128, 3, 1)
        self.block3_conv2 = Conv_Block(128, 128, 3, 1)
        self.block3_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block4_input1 = Conv_Block(3, 256, 3, 1)
        self.block4_conv1 = Conv_Block(384, 256, 3, 1)
        self.block4_conv2 = Conv_Block(256, 256, 3, 1)
        self.block4_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block5_conv1 = Conv_Block(256, 512, 3, 1)
        self.block5_conv2 = Conv_Block(512, 512, 3, 1)

        self.block6_dconv = Conv2DTranspose(512, 256, 2, stride=2)
        self.block6_conv1 = Conv_Block(512, 256, 3, 1)
        self.block6_conv2 = Conv_Block(256, 256, 3, 1)

        self.block7_dconv = Conv2DTranspose(256, 128, 2, stride=2)
        self.block7_conv1 = Conv_Block(256, 128, 3, 1)
        self.block7_conv2 = Conv_Block(128, 128, 3, 1)

        self.block8_dconv = Conv2DTranspose(128, 64, 2, stride=2)
        self.block8_conv1 = Conv_Block(128, 64, 3, 1)
        self.block8_conv2 = Conv_Block(64, 64, 3, 1)

        self.block9_dconv = Conv2DTranspose(64, 32, 2, stride=2)
        self.block9_conv1 = Conv_Block(64, 32, 3, 1)
        self.block9_conv2 = Conv_Block(32, 32, 3, 1)

        self.side63 = Conv2D(256, 2, 1, act='sigmoid')
        self.side73 = Conv2D(128, 2, 1, act='sigmoid')
        self.side83 = Conv2D(64, 2, 1, act='sigmoid')
        self.side93 = Conv2D(32, 2, 1, act='sigmoid')
Esempio n. 2
0
    def __init__(self, name_scope):
        super(generator, self).__init__(name_scope)

        self.down1 = downsample(self.full_name(), 64, 4, False)
        self.down2 = downsample(self.full_name(), 128, 4)
        self.down3 = downsample(self.full_name(), 256, 4)
        self.down4 = downsample(self.full_name(), 512, 4)
        self.down5 = downsample(self.full_name(), 512, 4)
        self.down6 = downsample(self.full_name(), 512, 4)
        self.down7 = downsample(self.full_name(), 512, 4)
        self.down8 = downsample(self.full_name(), 512, 4)

        self.up1 = upsample(self.full_name(), 512, 4, True)
        self.up2 = upsample(self.full_name(), 512, 4, True)
        self.up3 = upsample(self.full_name(), 512, 4, True)
        self.up4 = upsample(self.full_name(), 512, 4)
        self.up5 = upsample(self.full_name(), 256, 4)
        self.up6 = upsample(self.full_name(), 128, 4)
        self.up7 = upsample(self.full_name(), 64, 4)
        self.last = Conv2DTranspose(
            self.full_name(),
            num_filters=3,
            filter_size=4,
            stride=2,
            padding=1,
            use_cudnn=use_cudnn,
            param_attr=fluid.initializer.Normal(0.0, 0.2),
            act='tanh')
Esempio n. 3
0
    def __init__(self, name_scope):
        super(VAE_CNN, self).__init__(name_scope)

        self.conv1 = Conv2D(num_channels=1,
                            num_filters=16,
                            filter_size=4,
                            stride=2,
                            padding=(15, 15),
                            act="relu")
        self.conv2 = Conv2D(num_channels=16,
                            num_filters=16,
                            filter_size=4,
                            stride=2,
                            padding=(15, 15),
                            act="relu")
        # self.conv3 = Conv2D(num_channels=64, num_filters=64, filter_size=4, stride=2, padding=(15,15), act="relu")
        self.fc_encode1 = Linear(12544, 1024, act="relu")
        self.fc_encode2 = Linear(12544, 1024, act="relu")

        self.fc_decode1 = Linear(1024, 12544, act="relu")
        # self.fc_decode1 = Linear(784, 25088, act="relu")
        # self.fc_decode2 = Linear(25088, 25088, act="relu")

        self.dconv1 = Conv2DTranspose(num_channels=16,
                                      num_filters=16,
                                      filter_size=4,
                                      stride=2,
                                      padding=(15, 15),
                                      act="relu")
        self.dconv2 = Conv2DTranspose(num_channels=16,
                                      num_filters=1,
                                      filter_size=4,
                                      stride=2,
                                      padding=(15, 15),
                                      act="relu")

        self.fc_decode3 = Linear(12544, 12544, act="relu")
        self.fc_decode4 = Linear(784, 784, act="sigmoid")
    def __init__ (self, input_channel, output_nc, config, norm_layer=InstanceNorm, dropout_rate=0, n_blocks=9, padding_type='reflect'):
        super(SubMobileResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == InstanceNorm
        else:
            use_bias = norm_layer == InstanceNorm

        self.model = fluid.dygraph.LayerList([Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
                           Conv2D(input_channel, config['channels'][0], filter_size=7, padding=0, use_cudnn=use_cudnn, bias_attr=use_bias),
                           norm_layer(config['channels'][0]),
                           ReLU()])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2 ** i
            in_c = config['channels'][i]
            out_c = config['channels'][i + 1]
            self.model.extend([Conv2D(in_c * mult, out_c * mult * 2, filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(out_c * mult * 2),
                               ReLU()])

        mult = 2 ** n_downsampling

        in_c = config['channels'][2]
        for i in range(n_blocks):
            if len(config['channels']) == 6:
                offset = 0
            else:
                offset = i // 3
            out_c = config['channels'][offset + 3]
            self.model.extend([MobileResnetBlock(in_c * mult, out_c * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        if len(config['channels']) == 6:
            offset = 4
        else:
            offset = 6
        for i in range(n_downsampling):
            out_c = config['channels'][offset + i]
            mult = 2 ** (n_downsampling - i)
            output_size = (i + 1) * 128
            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, output_size=output_size, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#                               Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
                               norm_layer(int(out_c * mult / 2)),
                               ReLU()])
            in_c = out_c

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend([Conv2D(in_c, output_nc, filter_size=7, padding=0)])
Esempio n. 5
0
    def __init__(self,
                 name_scope,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 relu=True,
                 norm=True,
                 relufactor=0.0,
                 use_bias=False):
        super(DeConv2D, self).__init__(name_scope)

        if use_bias == False:
            de_bias_attr = False
        else:
            de_bias_attr = fluid.ParamAttr(
                name="de_bias", initializer=fluid.initializer.Constant(0.0))

        self._deconv = Conv2DTranspose(
            self.full_name(),
            num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            param_attr=fluid.ParamAttr(
                name="this_is_deconv_weights",
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=de_bias_attr)

        if norm:
            self.bn = BatchNorm(
                self.full_name(),
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(
                    name="de_wights",
                    initializer=fluid.initializer.NormalInitializer(1.0,
                                                                    0.02)),
                bias_attr=fluid.ParamAttr(
                    name="de_bn_bias",
                    initializer=fluid.initializer.Constant(0.0)),
                trainable_statistics=True)
        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.relu = relu
Esempio n. 6
0
    def __init__(self,
                 num_channels,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 relu=True,
                 norm=True,
                 relufactor=0.0,
                 use_bias=False):
        super(DeConv2D, self).__init__()

        if use_bias == False:
            de_bias_attr = False
        else:
            de_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self._deconv = Conv2DTranspose(
            num_channels,
            num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            use_cudnn=use_cudnn,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=de_bias_attr)
        if fluid.is_compiled_with_cuda():
            norm = False
        if norm:
            self.bn = BatchNorm(
                use_global_stats=True,  # set True to use deterministic algorithm
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
                trainable_statistics=True)

        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.relu = relu
Esempio n. 7
0
    def __init__ (self, input_channel, output_nc, ngf, norm_layer=InstanceNorm, dropout_rate=0, n_blocks=9, padding_type='reflect'):
        super(MobileResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == InstanceNorm
        else:
            use_bias = norm_layer == InstanceNorm

        self.model = fluid.dygraph.LayerList([Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
                           Conv2D(input_channel, int(ngf), filter_size=7, padding=0, use_cudnn=use_cudnn, bias_attr=use_bias),
                           norm_layer(ngf),
                           ReLU()])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2 ** i
            self.model.extend([Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(ngf * mult * 2),
                               ReLU()])

        mult = 2 ** n_downsampling

        n_blocks1 = n_blocks // 3
        n_blocks2 = n_blocks1
        n_blocks3 = n_blocks - n_blocks1 - n_blocks2

        for i in range(n_blocks1):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_blocks2):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_blocks3):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_downsampling):
            mult = 2 ** (n_downsampling - i)
            output_size = (i + 1) * 128
            self.model.extend([Conv2DTranspose(ngf * mult, int(ngf * mult / 2), filter_size=3, output_size=output_size, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(int(ngf * mult / 2)),
                               ReLU()])

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend([Conv2D(ngf, output_nc, filter_size=7, padding=0)])
Esempio n. 8
0
    def __init__(self,
                 name_scope,
                 num_filters=64,
                 filter_size=5,
                 stride=1,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 relu=False,
                 norm=False,
                 relufactor=0.3,
                 testing=False,
                 use_bias=True):
        super(DeConv2D, self).__init__(name_scope)

        if use_bias == False:
            de_bias_attr = False
        else:
            de_bias_attr = fluid.initializer.Xavier()

        self._deconv = Conv2DTranspose(self.full_name(),
                                       num_filters,
                                       filter_size=filter_size,
                                       stride=stride,
                                       padding=padding,
                                       param_attr=fluid.initializer.Xavier(),
                                       bias_attr=de_bias_attr)

        if norm:
            self.bn = BatchNorm(self.full_name(),
                                momentum=0.99,
                                is_test=testing,
                                num_channels=num_filters,
                                param_attr=fluid.initializer.Xavier(),
                                bias_attr=fluid.initializer.Xavier(),
                                trainable_statistics=True)

        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.relu = relu
        self.testing = testing
Esempio n. 9
0
    def __init__(self, name_scope, filters, size, apply_dropout=False):
        super(upsample, self).__init__(name_scope)

        self.deconv1 = Conv2DTranspose(
            self.full_name(),
            num_filters=filters,
            filter_size=size,
            stride=2,
            padding=1,
            use_cudnn=use_cudnn,
            param_attr=fluid.initializer.Normal(),
            bias_attr=False)

        self.bn1 = BatchNorm(
            self.full_name(), 
            num_channels=filters)

        self.filters = filters
        self.size = size
        self.apply_dropout = apply_dropout
Esempio n. 10
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 stddev=0.02,
                 act='leaky_relu',
                 norm=True,
                 is_test=False,
                 relufactor=0.0,
                 use_bias=False):
        super(DeConvBN, self).__init__()

        pattr = fluid.ParamAttr(
            initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                            scale=stddev))
        self._deconv = Conv2DTranspose(num_channels,
                                       num_filters,
                                       filter_size=filter_size,
                                       stride=stride,
                                       padding=padding,
                                       param_attr=pattr,
                                       bias_attr=use_bias)
        if norm:
            self.bn = BatchNorm(
                num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
                is_test=False,
                trainable_statistics=True)
        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.act = act
Esempio n. 11
0
    def __init__(self,
                 num_channels,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 relu=True,
                 norm=True,
                 norm_layer=InstanceNorm,
                 relufactor=0.0,
                 use_bias=False):
        super(DeConv2D, self).__init__()

        if use_bias == False:
            de_bias_attr = False
        else:
            de_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self._deconv = Conv2DTranspose(
            num_channels,
            num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=de_bias_attr)

        self.pad = Pad2D(paddings=outpadding, mode='constant', pad_value=0.0)
        if norm_layer == InstanceNorm:
            self.bn = InstanceNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(1.0),
                    trainable=False),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0),
                    trainable=False),
            )
        elif norm_layer == BatchNorm:
            self.bn = BatchNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
            )
        else:
            raise NotImplementedError

        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.relu = relu
        if relu:
            if relufactor == 0.0:
                self.lrelu = ReLU()
            else:
                self.lrelu = Leaky_ReLU(self.relufactor)