Example #1
0
    def __init__(self, conv_dim=64, repeat_num=3, w=0.01):
        super(MANet, self).__init__()
        self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num)
        curr_dim = conv_dim * 4
        self.w = w
        self.beta = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
        self.gamma = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
        self.simple_spade = GetMatrix(curr_dim, 1)  # get the makeup matrix
        self.repeat_num = repeat_num
        for i in range(repeat_num):
            setattr(self, "bottlenecks_" + str(i),
                    ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
        # Up-Sampling
        self.upsamplers = []
        self.up_betas = []
        self.up_gammas = []
        self.up_acts = []
        y_dim = curr_dim
        for i in range(2):
            layers = []
            layers.append(
                nn.Conv2DTranspose(curr_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias_attr=False))
            layers.append(
                nn.InstanceNorm2D(curr_dim // 2,
                                  weight_attr=False,
                                  bias_attr=False))

            setattr(self, "up_acts_" + str(i), nn.ReLU())
            setattr(
                self, "up_betas_" + str(i),
                nn.Conv2DTranspose(y_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1))
            setattr(
                self, "up_gammas_" + str(i),
                nn.Conv2DTranspose(y_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1))
            setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers))
            curr_dim = curr_dim // 2
        self.img_reg = [
            nn.Conv2D(curr_dim,
                      3,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias_attr=False)
        ]
        self.img_reg = nn.Sequential(*self.img_reg)
Example #2
0
 def __init__(self, in_channels=3, num_classes=2):
     super().__init__()
     self.backbone = Vgg16Base(in_channels=in_channels)
     self.sa1 = SAM()
     self.sa2 = SAM()
     self.sa3 = SAM()
     self.sa4 = SAM()
     self.sa5 = SAM()
     # branch1
     self.ca1 = CAM(in_channels=1024, ratio=8)
     self.bn_ca1 = nn.BatchNorm(1024)
     self.o1_conv1 = CPBD(1024, 512)
     self.o1_conv2 = CPBD(512, 512)
     self.bn_sa1 = nn.BatchNorm(512)
     self.o1_conv3 = nn.Conv2D(512, num_classes, 1)
     self.trans_conv1 = nn.Conv2DTranspose(512,
                                           512,
                                           kernel_size=2,
                                           stride=2)
     # branch 2
     self.ca2 = CAM(in_channels=1536, ratio=8)
     self.bn_ca2 = nn.BatchNorm(1536)
     self.o2_conv1 = CPBD(1536, 512)
     self.o2_conv2 = CPBD(512, 256)
     self.o2_conv3 = CPBD(256, 256)
     self.bn_sa2 = nn.BatchNorm(256)
     self.o2_conv4 = nn.Conv2D(256, num_classes, 1)
     self.trans_conv2 = nn.Conv2DTranspose(256,
                                           256,
                                           kernel_size=2,
                                           stride=2)
     # branch 3
     self.ca3 = CAM(in_channels=768, ratio=8)
     self.o3_conv1 = CPBD(768, 256)
     self.o3_conv2 = CPBD(256, 128)
     self.o3_conv3 = CPBD(128, 128)
     self.bn_sa3 = nn.BatchNorm(128)
     self.o3_conv4 = nn.Conv2D(128, num_classes, 1)
     self.trans_conv3 = nn.Conv2DTranspose(128,
                                           128,
                                           kernel_size=2,
                                           stride=2)
     # branch 4
     self.ca4 = CAM(in_channels=384, ratio=8)
     self.o4_conv1 = CPBD(384, 128)
     self.o4_conv2 = CPBD(128, 64)
     self.o4_conv3 = CPBD(64, 64)
     self.bn_sa4 = nn.BatchNorm(64)
     self.o4_conv4 = nn.Conv2D(64, num_classes, 1)
     self.trans_conv4 = nn.Conv2DTranspose(64, 64, kernel_size=2, stride=2)
     # branch 5
     self.ca5 = CAM(in_channels=192, ratio=8)
     self.o5_conv1 = CPBD(192, 64)
     self.o5_conv2 = CPBD(64, 32)
     self.o5_conv3 = CPBD(32, 16)
     self.bn_sa5 = nn.BatchNorm(16)
     self.o5_conv4 = nn.Conv2D(16, num_classes, 1)
Example #3
0
 def __init__(self):
     super(Generator, self).__init__()
     self.gen = nn.Sequential(
         nn.Conv2DTranspose(100, 64 * 4, 4, 1, 0, bias_attr=False),
         nn.BatchNorm2D(64 * 4), nn.ReLU(True),
         nn.Conv2DTranspose(64 * 4, 64 * 2, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 2), nn.ReLU(True),
         nn.Conv2DTranspose(64 * 2, 64, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64), nn.ReLU(True),
         nn.Conv2DTranspose(64, 1, 4, 2, 1, bias_attr=False), nn.Tanh())
Example #4
0
 def __init__(self, in_channels, name_list):
     super(Head, self).__init__()
     self.conv1 = nn.Conv2D(in_channels=in_channels,
                            out_channels=in_channels // 4,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(name=name_list[0] +
                                                  '.w_0'),
                            bias_attr=False)
     self.conv_bn1 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[1] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[1] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[1] + '.w_1',
         moving_variance_name=name_list[1] + '.w_2',
         act='relu')
     self.conv2 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=in_channels // 4,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[2] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv2"))
     self.conv_bn2 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(
             name=name_list[3] + '.w_0',
             initializer=paddle.nn.initializer.Constant(value=1.0)),
         bias_attr=ParamAttr(
             name=name_list[3] + '.b_0',
             initializer=paddle.nn.initializer.Constant(value=1e-4)),
         moving_mean_name=name_list[3] + '.w_1',
         moving_variance_name=name_list[3] + '.w_2',
         act="relu")
     self.conv3 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=1,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             name=name_list[4] + '.w_0',
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4, name_list[-1] + "conv3"),
     )
Example #5
0
    def __init__(self,
                 num_classes,
                 in_channels=3,
                 level2_depth=2,
                 level3_depth=3,
                 pretrained=None):
        super().__init__()
        self.encoder = ESPNetEncoder(num_classes, in_channels, level2_depth,
                                     level3_depth)

        self.level3_up = nn.Conv2DTranspose(num_classes,
                                            num_classes,
                                            2,
                                            stride=2,
                                            padding=0,
                                            output_padding=0,
                                            bias_attr=False)
        self.br3 = layers.SyncBatchNorm(num_classes)
        self.level2_proj = nn.Conv2D(in_channels + 128,
                                     num_classes,
                                     1,
                                     bias_attr=False)
        self.combine_l2_l3 = nn.Sequential(
            BNPReLU(2 * num_classes),
            DilatedResidualBlock(2 * num_classes, num_classes, residual=False),
        )
        self.level2_up = nn.Sequential(
            nn.Conv2DTranspose(num_classes,
                               num_classes,
                               2,
                               stride=2,
                               padding=0,
                               output_padding=0,
                               bias_attr=False),
            BNPReLU(num_classes),
        )
        self.out_proj = layers.ConvBNPReLU(16 + in_channels + num_classes,
                                           num_classes,
                                           3,
                                           padding='same',
                                           stride=1)
        self.out_up = nn.Conv2DTranspose(num_classes,
                                         num_classes,
                                         2,
                                         stride=2,
                                         padding=0,
                                         output_padding=0,
                                         bias_attr=False)
        self.pretrained = pretrained
Example #6
0
    def __init__(self):
        super(ModelConv, self).__init__()
        with supernet(kernel_size=(3, 5, 7),
                      channel=((4, 8, 12), (8, 12, 16), (8, 12, 16),
                               (8, 12, 16))) as ofa_super:
            models = []
            models += [nn.Conv2D(3, 4, 3, padding=1)]
            models += [nn.InstanceNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3, groups=4)]
            models += [nn.InstanceNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2DTranspose(4, 4, 3, groups=4, padding=1)]
            models += [nn.BatchNorm2D(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 3, 3)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        models += [
            Block(SuperSeparableConv2D(3,
                                       6,
                                       1,
                                       padding=1,
                                       candidate_config={'channel': (3, 6)}),
                  fixed=True)
        ]
        with supernet(kernel_size=(3, 5, 7),
                      expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Conv2D(6, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2D(4, 4, 3, groups=2)]
            models1 += [nn.InstanceNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3, groups=2)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 1)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)

        models += models1
        self.models = paddle.nn.Sequential(*models)
Example #7
0
    def __init__(
            self,
            in_channels,
            out_channel,
            up_f,  # todo: what is up_f here?
            norm_func):
        super().__init__()

        for i in range(1, len(in_channels)):
            in_channel = in_channels[i]
            f = int(up_f[i])

            #USE_DEFORMABLE_CONV = False

            # so far only support  normal convolution
            proj = NormalConv(in_channel, out_channel, norm_func)
            node = NormalConv(out_channel, out_channel, norm_func)

            up = nn.Conv2DTranspose(out_channel,
                                    out_channel,
                                    kernel_size=f * 2,
                                    stride=f,
                                    padding=f // 2,
                                    output_padding=0,
                                    groups=out_channel,
                                    bias_attr=False)
            # todo: uncommoment later
            # _fill_up_weights(up)

            setattr(self, 'proj_' + str(i), proj)
            setattr(self, 'up_' + str(i), up)
            setattr(self, 'node_' + str(i), node)
Example #8
0
    def __init__(self, input_nc=3, output_nc=3, ngf=64):
        super(UnetGenerator, self).__init__()

        self.down1 = nn.Conv2D(input_nc, ngf, kernel_size=4, stride=2, padding=1)
        self.down2 = Downsample(ngf, ngf*2)
        self.down3 = Downsample(ngf*2, ngf*4)
        self.down4 = Downsample(ngf*4, ngf*8)
        self.down5 = Downsample(ngf*8, ngf*8)
        self.down6 = Downsample(ngf*8, ngf*8)
        self.down7 = Downsample(ngf*8, ngf*8)

        self.center = Downsample(ngf*8, ngf*8)

        self.up7 = Upsample(ngf*8, ngf*8, use_dropout=True)
        self.up6 = Upsample(ngf*8*2, ngf*8, use_dropout=True)
        self.up5 = Upsample(ngf*8*2, ngf*8, use_dropout=True)
        self.up4 = Upsample(ngf*8*2, ngf*8)
        self.up3 = Upsample(ngf*8*2, ngf*4)
        self.up2 = Upsample(ngf*4*2, ngf*2)
        self.up1 = Upsample(ngf*2*2, ngf)

        self.output_block = nn.Sequential(
            nn.ReLU(),
            nn.Conv2DTranspose(ngf*2, output_nc, kernel_size=4, stride=2, padding=1),
            nn.Tanh()
        )
Example #9
0
    def __init__(self, num_convs=0, in_channels=2048, out_channels=256):
        super(MaskFeat, self).__init__()
        self.num_convs = num_convs
        self.in_channels = in_channels
        self.out_channels = out_channels
        fan_conv = out_channels * 3 * 3
        fan_deconv = out_channels * 2 * 2

        mask_conv = nn.Sequential()
        for i in range(self.num_convs):
            conv_name = 'mask_inter_feat_{}'.format(i + 1)
            mask_conv.add_sublayer(
                conv_name,
                nn.Conv2D(in_channels=in_channels if i == 0 else out_channels,
                          out_channels=out_channels,
                          kernel_size=3,
                          padding=1,
                          weight_attr=paddle.ParamAttr(
                              initializer=KaimingNormal(fan_in=fan_conv))))
            mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        mask_conv.add_sublayer(
            'conv5_mask',
            nn.Conv2DTranspose(
                in_channels=self.in_channels,
                out_channels=self.out_channels,
                kernel_size=2,
                stride=2,
                weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                    fan_in=fan_deconv))))
        mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
        self.upsample = mask_conv
Example #10
0
    def __init__(self, ch_in, ch_out, norm_type='bn'):
        super(DeConv, self).__init__()
        self.deconv = nn.Sequential()
        conv1 = ConvNormLayer(ch_in=ch_in,
                              ch_out=ch_out,
                              stride=1,
                              filter_size=1,
                              norm_type=norm_type,
                              initializer=XavierUniform())
        conv2 = nn.Conv2DTranspose(
            in_channels=ch_out,
            out_channels=ch_out,
            kernel_size=4,
            padding=1,
            stride=2,
            groups=ch_out,
            weight_attr=ParamAttr(initializer=XavierUniform()),
            bias_attr=False)
        bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
        conv3 = ConvNormLayer(ch_in=ch_out,
                              ch_out=ch_out,
                              stride=1,
                              filter_size=1,
                              norm_type=norm_type,
                              initializer=XavierUniform())

        self.deconv.add_sublayer('conv1', conv1)
        self.deconv.add_sublayer('relu6_1', nn.ReLU6())
        self.deconv.add_sublayer('conv2', conv2)
        self.deconv.add_sublayer('bn', bn)
        self.deconv.add_sublayer('relu6_2', nn.ReLU6())
        self.deconv.add_sublayer('conv3', conv3)
        self.deconv.add_sublayer('relu6_3', nn.ReLU6())
Example #11
0
def ConvTranspose2d(in_channels,
                    out_channels,
                    kernel_size,
                    stride=1,
                    padding=0,
                    output_padding=0,
                    groups=1,
                    bias=True,
                    dilation=1,
                    weight_init=Normal(std=0.001),
                    bias_init=Constant(0.)):
    weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
    if bias:
        bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
    else:
        bias_attr = False
    conv = nn.Conv2DTranspose(in_channels,
                              out_channels,
                              kernel_size,
                              stride,
                              padding,
                              output_padding,
                              dilation,
                              groups,
                              weight_attr=weight_attr,
                              bias_attr=bias_attr)
    return conv
Example #12
0
 def __init__(self, z_dim, channels_img, features_g):
     super(Generator, self).__init__()
     self.gen = nn.Sequential(
         # Input: N x z_dim x 1 x 1
         self._block(z_dim, features_g * 64, 4, 1, 0),  # N x f_g x 4 x 4
         self._block(features_g * 64, features_g * 32, 4, 2,
                     1),  # N x f_g x 8  x 8
         self._block(features_g * 32, features_g * 16, 4, 2,
                     1),  # N x f_g x 16 x 16
         self._block(features_g * 16, features_g * 8, 4, 2,
                     1),  # N x f_g x 32 x 32
         self._block(features_g * 8, features_g * 4, 4, 2,
                     1),  # N x f_g x 64 x 64
         self._block(features_g * 4, features_g * 2, 4, 2,
                     1),  # N x f_g x 128 x 128
         nn.Conv2DTranspose(
             features_g * 2,
             channels_img,
             kernel_size=4,
             stride=2,
             padding=1,
             bias_attr=False,
             weight_attr=paddle.ParamAttr(initializer=conv_initializer())),
         nn.Tanh()  # [-1, 1]
     )
Example #13
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              padding,
              groups=1,
              if_act=True,
              act=None,
              name=None):
     super(DeConvBNLayer, self).__init__()
     self.if_act = if_act
     self.act = act
     self.deconv = nn.Conv2DTranspose(
         in_channels=in_channels,
         out_channels=out_channels,
         kernel_size=kernel_size,
         stride=stride,
         padding=padding,
         groups=groups,
         weight_attr=ParamAttr(name=name + '_weights'),
         bias_attr=False)
     self.bn = nn.BatchNorm(
         num_channels=out_channels,
         act=act,
         param_attr=ParamAttr(name="bn_" + name + "_scale"),
         bias_attr=ParamAttr(name="bn_" + name + "_offset"),
         moving_mean_name="bn_" + name + "_mean",
         moving_variance_name="bn_" + name + "_variance")
Example #14
0
 def __init__(self, in_channels, bilinear=False):
     super(UpSample, self).__init__()
     if bilinear:
         self.up = nn.Upsample(scale_factor=2,
                               mode='bilinear',
                               align_corners=True)
     else:
         self.up = nn.Conv2DTranspose(in_channels, in_channels, kernel_size=2, stride=2)
Example #15
0
    def __init__(self):
        super(ModelConv2, self).__init__()
        with supernet(expand_ratio=(1, 2, 4)) as ofa_super:
            models = []
            models += [
                nn.Conv2DTranspose(
                    4, 4, 3, weight_attr=paddle.ParamAttr(name='conv1_w'))
            ]
            models += [
                nn.BatchNorm2D(
                    4,
                    weight_attr=paddle.ParamAttr(name='bn1_w'),
                    bias_attr=paddle.ParamAttr(name='bn1_b'))
            ]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3)]
            models += [nn.BatchNorm2D(4)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        with supernet(channel=((4, 6, 8), (4, 6, 8))) as ofa_super:
            models1 = []
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm2D(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)
        models += models1

        with supernet(kernel_size=(3, 5, 7)) as ofa_super:
            models2 = []
            models2 += [nn.Conv2D(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 += [nn.Conv2DTranspose(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 += [nn.Conv2D(4, 4, 3)]
            models2 += [nn.BatchNorm2D(4)]
            models2 += [ReLU()]
            models2 = ofa_super.convert(models2)

        models += models2
        self.models = paddle.nn.Sequential(*models)
Example #16
0
 def __init__(self, ):
     super(Generator, self).__init__()
     self.gen = nn.Sequential(
         # input is Z, [B, 100, 1, 1] -> [B, 64 * 4, 4, 4]
         nn.Conv2DTranspose(100, 64 * 4, 4, 1, 0, bias_attr=False),
         nn.BatchNorm2D(64 * 4),
         nn.ReLU(True),
         # state size. [B, 64 * 4, 4, 4] -> [B, 64 * 2, 8, 8]
         nn.Conv2DTranspose(64 * 4, 64 * 2, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 2),
         nn.ReLU(True),
         # state size. [B, 64 * 2, 8, 8] -> [B, 64, 16, 16]
         nn.Conv2DTranspose(64 * 2, 64, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64),
         nn.ReLU(True),
         # state size. [B, 64, 16, 16] -> [B, 1, 32, 32]
         nn.Conv2DTranspose(64, 1, 4, 2, 1, bias_attr=False),
         nn.Tanh())
Example #17
0
 def __init__(self, in_channels, n_filters):
     super(DecoderBlock, self).__init__()
     self.conv1 = nn.Conv2D(in_channels, in_channels//4, 1)
     self.norm1 = nn.BatchNorm(in_channels//4)
     self.scse = SCSEBlock(in_channels//4)
     self.deconv2 = nn.Conv2DTranspose(in_channels//4, in_channels//4, 3, stride=2, padding=1, output_padding=1)
     self.norm2 = nn.BatchNorm(in_channels//4)
     self.conv3 = nn.Conv2D(in_channels//4, n_filters, 1)
     self.norm3 = nn.BatchNorm(n_filters)
 def test_case(self):
     paddle.disable_static(paddle.NPUPlace(0))
     x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
     conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2)
     print(conv)
     y_var = conv(x_var)
     y_np = y_var.numpy()
     self.assertIsNotNone(y_np)
     paddle.enable_static()
Example #19
0
    def __init__(self, input_nc, output_nc, ngf=64, norm_type='instance', use_dropout=False, n_blocks=6, padding_type='reflect'):
        """Construct a Resnet-based generator

        Args:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()

        norm_layer = build_norm_layer(norm_type)
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm
        else:
            use_bias = norm_layer == nn.InstanceNorm

        model = [ReflectionPad2d(3),
                 nn.Conv2D(input_nc, ngf, filter_size=7, padding=0, bias_attr=use_bias),
                 norm_layer(ngf),
                 nn.ReLU()]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2 ** i
            model += [
                      nn.Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=1, bias_attr=use_bias),
                      norm_layer(ngf * mult * 2),
                      nn.ReLU()]

        mult = 2 ** n_downsampling
        for i in range(n_blocks):       # add ResNet blocks

            model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2 ** (n_downsampling - i)
            model += [
                      nn.Conv2DTranspose(ngf * mult, int(ngf * mult / 2),
                                         filter_size=3, stride=2,
                                         padding=1, 
                                         bias_attr=use_bias),
                      Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
                      norm_layer(int(ngf * mult / 2)),
                      nn.ReLU()]
        model += [ReflectionPad2d(3)]
        model += [nn.Conv2D(ngf, output_nc, filter_size=7, padding=0)]
        model += [Tanh()]

        self.model = nn.Sequential(*model)
    def __init__(self,
                 num_convs=4,
                 in_channels=256,
                 out_channels=256,
                 norm_type=None):
        super(MaskFeat, self).__init__()
        self.num_convs = num_convs
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.norm_type = norm_type
        fan_conv = out_channels * 3 * 3
        fan_deconv = out_channels * 2 * 2

        mask_conv = nn.Sequential()
        if norm_type == 'gn':
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                mask_conv.add_sublayer(
                    conv_name,
                    ConvNormLayer(
                        ch_in=in_channels if i == 0 else out_channels,
                        ch_out=out_channels,
                        filter_size=3,
                        stride=1,
                        norm_type=self.norm_type,
                        norm_name=conv_name + '_norm',
                        initializer=KaimingNormal(fan_in=fan_conv),
                        name=conv_name))
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        else:
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                mask_conv.add_sublayer(
                    conv_name,
                    nn.Conv2D(
                        in_channels=in_channels if i == 0 else out_channels,
                        out_channels=out_channels,
                        kernel_size=3,
                        padding=1,
                        weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                            fan_in=fan_conv))))
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        mask_conv.add_sublayer(
            'conv5_mask',
            nn.Conv2DTranspose(
                in_channels=self.in_channels,
                out_channels=self.out_channels,
                kernel_size=2,
                stride=2,
                weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                    fan_in=fan_deconv))))
        mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
        self.upsample = mask_conv
Example #21
0
    def __init__(self,input_channels = 1):
        super(GGCNN,self).__init__()

        self.conv1 = nn.Conv2D(input_channels,filter_sizes[0],kernel_sizes[0],stride=strides[0],padding=3,
                                weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.conv2 = nn.Conv2D(filter_sizes[0], filter_sizes[1],kernel_sizes[1], stride=strides[1], padding=2,
                                weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.conv3 = nn.Conv2D(filter_sizes[1], filter_sizes[2],kernel_sizes[2], stride=strides[2], padding=1,
                                weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        
        self.convt1 = nn.Conv2DTranspose(filter_sizes[2], filter_sizes[3], kernel_sizes[3], stride=strides[3], padding=1,
                                output_padding = 1, weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.convt2 = nn.Conv2DTranspose(filter_sizes[3], filter_sizes[4], kernel_sizes[4], stride=strides[4], padding=2,
                                output_padding = 1, weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.convt3 = nn.Conv2DTranspose(filter_sizes[4], filter_sizes[5], kernel_sizes[5], stride=strides[5], padding=3,
                                output_padding = 1, weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        
        self.pos_output = nn.Conv2D(filter_sizes[5], 1, kernel_size=2,weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.cos_output = nn.Conv2D(filter_sizes[5], 1, kernel_size=2,weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.sin_output = nn.Conv2D(filter_sizes[5], 1, kernel_size=2,weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
        self.width_output = nn.Conv2D(filter_sizes[5], 1, kernel_size=2,weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()))
Example #22
0
 def __init__(self, in_channels, block, layers, num_classes=2):
     super(CDNet, self).__init__()
     filters = [64, 128, 256, 512]
     self.in_planes = 64
     self.firstconv = nn.Conv2D(in_channels, 64, kernel_size=7, stride=2, padding=3)
     self.firstbn = nn.BatchNorm(64)
     self.firstmaxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
     # encode
     self.encoder1 = self._make_layer(block, 64, layers[0])
     self.encoder2 = self._make_layer(block, 128, layers[1], stride=2)
     self.encoder3 = self._make_layer(block, 256, layers[2], stride=2)
     self.encoder4 = self._make_layer(block, 512, layers[3], stride=2)
     # decode
     self.decoder4 = DecoderBlock(filters[3], filters[2])
     self.decoder3 = DecoderBlock(filters[2], filters[1])
     self.decoder2 = DecoderBlock(filters[1], filters[0])
     self.decoder1 = DecoderBlock(filters[0], filters[0])
     # --
     self.dblock_master = Dblock(512)
     self.dblock = Dblock(512)
     self.decoder4_master = DecoderBlock(filters[3], filters[2])
     self.decoder3_master = DecoderBlock(filters[2], filters[1])
     self.decoder2_master = DecoderBlock(filters[1], filters[0])
     self.decoder1_master = DecoderBlock(filters[0], filters[0])
     # final
     self.finaldeconv1_master = nn.Conv2DTranspose(filters[0], 32, 4, 2, 1)
     self.finalconv2_master = nn.Conv2D(32, 32, 3, padding=1)
     self.finalconv3_master = nn.Conv2D(32, num_classes, 3, padding=1)
     self.finaldeconv1 = nn.Conv2DTranspose(filters[0], 32, 4, 2, 1)
     self.finalconv2 = nn.Conv2D(32, 32, 3, padding=1)
     self.finalconv3 = nn.Conv2D(32, num_classes, 3, padding=1)
     # init
     for sublayer in self.sublayers():
         if isinstance(sublayer, nn.Conv2D):
             n = sublayer._kernel_size[0] * sublayer._kernel_size[1] * sublayer._out_channels
             normal_init(sublayer.weight, mean=0, std=math.sqrt(2. / n))
         elif isinstance(sublayer, nn.BatchNorm):
             constant_init(sublayer.weight, value=0)
             constant_init(sublayer.bias, value=1)
Example #23
0
 def __init__(self, in_channels, name_list):
     super(Head, self).__init__()
     self.conv1 = nn.Conv2D(in_channels=in_channels,
                            out_channels=in_channels // 4,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(),
                            bias_attr=False)
     self.conv_bn1 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(initializer=paddle.nn.initializer.Constant(
             value=1.0)),
         bias_attr=ParamAttr(initializer=paddle.nn.initializer.Constant(
             value=1e-4)),
         act='relu')
     self.conv2 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=in_channels // 4,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4))
     self.conv_bn2 = nn.BatchNorm(
         num_channels=in_channels // 4,
         param_attr=ParamAttr(initializer=paddle.nn.initializer.Constant(
             value=1.0)),
         bias_attr=ParamAttr(initializer=paddle.nn.initializer.Constant(
             value=1e-4)),
         act="relu")
     self.conv3 = nn.Conv2DTranspose(
         in_channels=in_channels // 4,
         out_channels=1,
         kernel_size=2,
         stride=2,
         weight_attr=ParamAttr(
             initializer=paddle.nn.initializer.KaimingUniform()),
         bias_attr=get_bias_attr(in_channels // 4),
     )
Example #24
0
    def __init__(self, in_dim, out_dim, kernel_size=4, stride=2, padding=1, use_dropout=False):
        super(Upsample, self).__init__()

        sequence = [
            nn.ReLU(),                    # ReLU
            nn.Conv2DTranspose(in_dim, out_dim, kernel_size, stride, padding, bias_attr=False),                    # Conv2DTranspose
            nn.BatchNorm2D(out_dim)                    # nn.BatchNorm2D
        ]

        if use_dropout:
            sequence.append(nn.Dropout(p=0.5))

        self.layers = nn.Sequential(*sequence)
Example #25
0
 def _block(self, in_channels, out_channels, kernel_size, stride, padding):
     return nn.Sequential(
         nn.Conv2DTranspose(
             in_channels, out_channels, kernel_size, stride, padding, bias_attr=False, 
             weight_attr=paddle.ParamAttr(initializer=conv_initializer() )
         ),
         nn.BatchNorm2D(
             out_channels, 
             weight_attr=paddle.ParamAttr(initializer=bn_initializer() ) ,
             momentum=0.8
         ),
         nn.ReLU(),
     )
Example #26
0
 def __init__(self):
     super(Generator, self).__init__()
     self.conv_1 = nn.Conv2DTranspose(
         100,512,4,1,0,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_1_",initializer=conv_initializer)
         )
     self.bn_1 = nn.BatchNorm2D(
         512,
         weight_attr=paddle.ParamAttr(name="g_1_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_2 = nn.Conv2DTranspose(
         512,256,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_2_",initializer=conv_initializer)
         )
     self.bn_2 = nn.BatchNorm2D(
         256,
         weight_attr=paddle.ParamAttr(name="g_2_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_3 = nn.Conv2DTranspose(
         256,128,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_3_",initializer=conv_initializer)
         )
     self.bn_3 = nn.BatchNorm2D(
         128,
         weight_attr=paddle.ParamAttr(name="g_3_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_4 = nn.Conv2DTranspose(
         128,64,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_4_",initializer=conv_initializer)
         )
     self.bn_4 = nn.BatchNorm2D(
         64,
         weight_attr=paddle.ParamAttr(name="g_4_bn_weight_",initializer=bn_initializer),momentum=0.8
         )
     self.conv_5 = nn.Conv2DTranspose(
         64,3,4,2,1,
         bias_attr=False,weight_attr=paddle.ParamAttr(name="g_dconv_weight_5_",initializer=conv_initializer)
         )
     self.tanh = paddle.nn.Tanh()
Example #27
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 internal_ratio=4,
                 dropout_prob=0,
                 bias=False,
                 relu=True):
        super(UpsamplingBottleneck, self).__init__()

        if internal_ratio <= 1 or internal_ratio > in_channels:
            raise RuntimeError(
                "Value out of range. Expected value in the "
                "interval [1, {0}], got internal_scale={1}. ".format(
                    in_channels, internal_ratio))

        internal_channels = in_channels // internal_ratio

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.main_conv1 = nn.Sequential(
            nn.Conv2D(in_channels, out_channels, kernel_size=1,
                      bias_attr=bias), layers.SyncBatchNorm(out_channels))

        self.ext_conv1 = nn.Sequential(
            nn.Conv2D(in_channels,
                      internal_channels,
                      kernel_size=1,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        self.ext_tconv1 = nn.Conv2DTranspose(internal_channels,
                                             internal_channels,
                                             kernel_size=2,
                                             stride=2,
                                             bias_attr=bias)
        self.ext_tconv1_bnorm = layers.SyncBatchNorm(internal_channels)
        self.ext_tconv1_activation = activation()

        self.ext_conv2 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      out_channels,
                      kernel_size=1,
                      bias_attr=bias), layers.SyncBatchNorm(out_channels))

        self.ext_regul = nn.Dropout2D(p=dropout_prob)

        self.out_activation = activation()
Example #28
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride,
              padding,
              output_padding=0,
              *args,
              **kwargs):
     super().__init__(*args, **kwargs)
     self.conv_block = nn.Sequential(
         nn.Conv2DTranspose(cin, cout, kernel_size, stride, padding,
                            output_padding), nn.BatchNorm2D(cout))
     self.act = nn.ReLU()
Example #29
0
    def __init__(self, upscale_factors=[16, 16]):
        super(UpsampleNet, self).__init__()
        self.upscale_factors = list(upscale_factors)
        self.upscale_factor = 1
        for item in upscale_factors:
            self.upscale_factor *= item

        for factor in self.upscale_factors:
            self.append(
                nn.utils.weight_norm(
                    nn.Conv2DTranspose(1,
                                       1,
                                       kernel_size=(3, 2 * factor),
                                       stride=(1, factor),
                                       padding=(1, factor // 2))))
Example #30
0
 def __init__(self):
     super(ModelCase1, self).__init__()
     models = [SuperConv2D(3, 4, 3, bias_attr=False)]
     models += [
         SuperConv2D(
             4,
             4,
             7,
             candidate_config={
                 'expand_ratio': (0.5, 1.0),
                 'kernel_size': (3, 5, 7)
             },
             transform_kernel=True)
     ]
     models += [SuperConv2D(4, 4, 3, groups=4)]
     models += [SuperConv2D(4, 4, 3, groups=2)]
     models += [SuperBatchNorm(4)]
     models += [SuperConv2DTranspose(4, 4, 3, bias_attr=False)]
     models += [
         SuperConv2DTranspose(
             4,
             4,
             7,
             candidate_config={
                 'expand_ratio': (0.5, 1.0),
                 'kernel_size': (3, 5, 7)
             },
             transform_kernel=True)
     ]
     models += [SuperConv2DTranspose(4, 4, 3, groups=4)]
     models += [SuperInstanceNorm(4)]
     models += [nn.Conv2DTranspose(4, 4, 3, groups=2)]
     models += [SuperConv2DTranspose(4, 4, 3, groups=2)]
     models += [
         SuperSeparableConv2D(
             4,
             4,
             1,
             padding=1,
             bias_attr=False,
             candidate_config={'expand_ratio': (0.5, 1.0)}),
     ]
     models += [
         SuperSeparableConv2D(
             4, 4, 1, padding=1, candidate_config={'channel': (2, 4)}),
     ]
     self.models = paddle.nn.Sequential(*models)