Esempio n. 1
0
    def __init__(self, n_gen):
        super(MNISTDiscriminator, self).__init__(n_gen)

        self.latent = nn.Sequential(
            sn(nn.Conv2d(1, 64, 4, 2, 1)),  # B X 64 X 14 X 14
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2),
            sn(nn.Conv2d(64, 128, 4, 2, 1)),  # B X 128 X 7 X 7
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            Lambda(lambda x: x.view(-1, 128 * 7 * 7)),
        )

        self.score = nn.Sequential(
            sn(nn.Linear(128 * 7 * 7, 1024)),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            sn(nn.Linear(1024, 1)),
        )

        self.posterior = nn.Sequential(
            nn.Linear(128 * 7 * 7, 1024),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            nn.Linear(1024, n_gen),
        )
Esempio n. 2
0
 def __init__(self, ch):
     super().__init__()
     # Channel multiplier
     self.ch = ch
     self.theta = sn(
         nn.Conv2d(self.ch,
                   self.ch // 8,
                   kernel_size=1,
                   padding=0,
                   bias=False))
     self.phi = sn(
         nn.Conv2d(self.ch,
                   self.ch // 8,
                   kernel_size=1,
                   padding=0,
                   bias=False))
     self.g = sn(
         nn.Conv2d(self.ch,
                   self.ch // 2,
                   kernel_size=1,
                   padding=0,
                   bias=False))
     self.o = sn(
         nn.Conv2d(self.ch // 2,
                   self.ch,
                   kernel_size=1,
                   padding=0,
                   bias=False))
     # Learnable gain parameter
     self.gamma = nn.Parameter(torch.tensor(0.), requires_grad=True)
Esempio n. 3
0
    def __init__(self, n_gen):
        super(ChairsDiscriminator, self).__init__(n_gen)

        self.latent = nn.Sequential(
            sn(nn.Conv2d(1, 64, 4, 2, 1)),  # B X 64 X 32 X 32
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2),
            sn(nn.Conv2d(64, 128, 4, 2, 1)),  # B X 128 X 16 X 16
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            sn(nn.Conv2d(128, 128, 4, 2, 1)),  # B X 128 X 8 X 8
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            Lambda(lambda x: x.view(-1, 128 * 8 * 8)),
        )

        self.score = nn.Sequential(
            sn(nn.Linear(128 * 8 * 8, 1024)),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            sn(nn.Linear(1024, 1)),
        )

        self.posterior = nn.Sequential(
            nn.Linear(128 * 8 * 8, 1024),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            nn.Linear(1024, n_gen),
        )
    def __init__(self, dim, padding_type, norm_layer, use_bias, nz=6):
        """Initialize the Resnet block
        A resnet block is a conv block with skip connections
        We construct a conv block with build_conv_block function,
        and implement skip connections in <forward> function.
        Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
        """
        super(ResnetBlock, self).__init__()
        p = 0
        if padding_type == 'reflect':
            self.replication_pad = nn.ReflectionPad2d(1)
        elif padding_type == 'replicate':
            self.replication_pad = nn.ReplicationPad2d(1)
        elif padding_type == 'zero':
            p = 1
            self.replication_pad = nn.Sequential([])
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        self.conv1 = sn(
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias))
        self.norm1 = nn.BatchNorm2d(dim)
        # self.norm1 = Norm(dim, nz)
        self.conv2 = sn(
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias))
        # self.norm2 = Norm(dim, nz)
        self.norm2 = nn.BatchNorm2d(dim)
Esempio n. 5
0
 def __init__(self, num_features):
     super().__init__()
     self.norm = nn.BatchNorm2d(num_features, affine=False)
     hidden_features = num_features // 4
     self.gamma_f1 = sn(nn.Linear(64, hidden_features))
     self.beta_f1 = sn(nn.Linear(64, hidden_features))
     self.gamma_f2 = sn(nn.Linear(hidden_features, num_features))
     self.beta_f2 = sn(nn.Linear(hidden_features, num_features))
 def __init__(self, n_features):
     super().__init__()
     self.layers = nn.Sequential(
         sn(nn.Conv2d(in_channels=n_features, out_channels=n_features, kernel_size=3, stride=1, padding=1)),
         nn.BatchNorm2d(num_features=n_features),
         nn.PReLU(),
         sn(nn.Conv2d(in_channels=n_features, out_channels=n_features, kernel_size=3, stride=1, padding=1)),
         nn.BatchNorm2d(num_features=n_features))
Esempio n. 7
0
    def __init__(self):
        super().__init__()
        main = torch.nn.Sequential()

        # We need to know how many layers we will use at the beginning
        mult = 64 // 8

        ### Start block
        # Z_size random numbers
        main.add_module(
            'Start-ConvTranspose2d',
            sn(
                torch.nn.ConvTranspose2d(100,
                                         128 * mult,
                                         kernel_size=4,
                                         stride=1,
                                         padding=0,
                                         bias=False)))
        main.add_module('Start-BatchNorm2d', torch.nn.BatchNorm2d(128 * mult))
        main.add_module('Start-ReLU', torch.nn.ReLU())
        # Size = (G_h_size * mult) x 4 x 4

        ### Middle block (Done until we reach ? x image_size/2 x image_size/2)
        i = 1
        while mult > 1:
            main.add_module(
                'Middle-ConvTranspose2d [%d]' % i,
                sn(
                    torch.nn.ConvTranspose2d(128 * mult,
                                             128 * (mult // 2),
                                             kernel_size=4,
                                             stride=2,
                                             padding=1,
                                             bias=False)))
            main.add_module('Middle-BatchNorm2d [%d]' % i,
                            torch.nn.BatchNorm2d(128 * (mult // 2)))
            main.add_module('Middle-ReLU [%d]' % i, torch.nn.ReLU())
            # Size = (G_h_size * (mult/(2*i))) x 8 x 8
            mult = mult // 2
            i += 1

        ### End block
        # Size = G_h_size x image_size/2 x image_size/2
        main.add_module(
            'End-ConvTranspose2d',
            sn(
                torch.nn.ConvTranspose2d(128,
                                         3,
                                         kernel_size=4,
                                         stride=2,
                                         padding=1,
                                         bias=False)))
        main.add_module('End-TanH', torch.nn.Tanh())
        # Size = n_colors x image_size x image_size
        self.main = main
        self.reshape = layer.Reshape((-1, 100, 1, 1))
Esempio n. 8
0
    def __init__(self):
        super().__init__()
        main = torch.nn.Sequential()

        ### Start block
        # Size = n_colors x image_size x image_size
        main.add_module(
            'Start-Conv2d',
            sn(
                torch.nn.Conv2d(3,
                                128,
                                kernel_size=4,
                                stride=2,
                                padding=1,
                                bias=False)))
        main.add_module('Start-LeakyReLU', torch.nn.LeakyReLU(0.2,
                                                              inplace=True))
        image_size_new = 64 // 2
        # Size = D_h_size x image_size/2 x image_size/2

        ### Middle block (Done until we reach ? x 4 x 4)
        mult = 1
        i = 0
        while image_size_new > 4:
            main.add_module(
                'Middle-Conv2d [%d]' % i,
                sn(
                    torch.nn.Conv2d(128 * mult,
                                    128 * (2 * mult),
                                    kernel_size=4,
                                    stride=2,
                                    padding=1,
                                    bias=False)))
            main.add_module('Middle-BatchNorm2d [%d]' % i,
                            torch.nn.BatchNorm2d(128 * (2 * mult)))
            main.add_module('Middle-LeakyReLU [%d]' % i,
                            torch.nn.LeakyReLU(0.2, inplace=True))
            # Size = (D_h_size*(2*i)) x image_size/(2*i) x image_size/(2*i)
            image_size_new = image_size_new // 2
            mult *= 2
            i += 1

        ### End block
        # Size = (D_h_size * mult) x 4 x 4
        main.add_module(
            'End-Conv2d',
            sn(
                torch.nn.Conv2d(128 * mult,
                                1,
                                kernel_size=4,
                                stride=1,
                                padding=0,
                                bias=False)))
        # Size = 1 x 1 x 1 (Is a real cat or not?)
        self.main = main
 def __init__(self, ni, no, z_dim, upsample=False):
     super().__init__()
     self.bn0 = ConditionalBatchNorm(ni, z_dim)
     self.conv0 = torch.nn.Conv2d(ni, no, 3, 1, 1, bias=False)
     self.conv0 = sn(self.conv0)
     self.bn1 = ConditionalBatchNorm(no, z_dim)
     self.conv1 = torch.nn.Conv2d(no, no, 3, 1, 1, bias=False)
     self.conv1 = sn(self.conv1)
     self.upsample = upsample
     self.reduce = ni != no
     if self.reduce:
         self.conv_short = sn(torch.nn.Conv2d(ni, no, 1, 1, 0, bias=False))
Esempio n. 10
0
    def __init__(self, nf, up=False):
        super(ResBlock_G, self).__init__()

        self.nf = nf
        self.up = up

        self.SubBlock1 = nn.Sequential(nn.ReLU(True))

        self.SubBlock2 = nn.Sequential(
            sn(nn.Conv2d(nf, nf, 3, 1, 1, bias=False), n_power_iterations=5),
            nn.BatchNorm2d(nf), nn.ReLU(True),
            sn(nn.Conv2d(nf, nf, 3, 1, 1, bias=False), n_power_iterations=5))

        self.conv_shortcut = sn(nn.Conv2d(nf, nf, 1, 1, 0, bias=False),
                                n_power_iterations=5)
Esempio n. 11
0
    def __init__(self, nc=3, ngf=128, nz=128):
        super(G_resnet, self).__init__()

        self.nc = nc
        self.ngf = ngf
        self.nz = nz

        self.linear = sn(nn.Linear(nz, 16 * ngf), n_power_iterations=5)
        self.block1 = ResBlock_G(ngf, True)
        self.block2 = ResBlock_G(ngf, True)
        self.block3 = ResBlock_G(ngf, True)
        self.block4 = nn.Sequential(
            nn.ReLU(True),
            sn(nn.Conv2d(ngf, nc, 3, 1, 1, bias=False), n_power_iterations=5),
            nn.Tanh())
Esempio n. 12
0
    def add_norm_layer(layer):
        nonlocal norm_type
        if norm_type.startswith('spectral'):
            layer = sn(layer)
            subnorm_type = norm_type[len('spectral'):]

        if subnorm_type == 'none' or len(subnorm_type) == 0:
            return layer

        # remove bias in the previous layer, which is meaningless
        # since it has no effect after normalization
        if getattr(layer, 'bias', None) is not None:
            delattr(layer, 'bias')
            layer.register_parameter('bias', None)

        if subnorm_type == 'batch':
            norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
        elif subnorm_type == 'syncbatch':
            norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
        elif subnorm_type == 'instance':
            norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
        else:
            raise ValueError('normalization layer %s is not recognized' % subnorm_type)

        return nn.Sequential(layer, norm_layer)
Esempio n. 13
0
    def __init__(self, in_dim, out_dim, id=0):
        super().__init__()
        self.id = id

        self.in_dim = in_dim
        self.out_dim = out_dim

        hidden_dim = in_dim // 4
        self.conv1 = sn(torch.nn.Conv2d(in_dim, hidden_dim, 1))
        self.conv2 = sn(torch.nn.Conv2d(hidden_dim, hidden_dim, 3, 1, 1))
        self.conv3 = sn(torch.nn.Conv2d(hidden_dim, hidden_dim, 3, 1, 1))
        self.conv4 = sn(torch.nn.Conv2d(hidden_dim, out_dim, 1))

        self.act = nn.LeakyReLU(0.2, inplace=True)
        self.downsample = nn.AvgPool2d(2)

        self.conv_sc = sn(torch.nn.Conv2d(in_dim, out_dim - in_dim, 1))
Esempio n. 14
0
    def __init__(self, nf, down=False, nc=3, first=False):
        super(ResBlock_D, self).__init__()

        self.nf = nf
        self.down = down
        self.nc = nc
        self.first = first
        nf_in = nc if first else nf

        self.relu1 = nn.ReLU(True)
        self.conv1 = sn(nn.Conv2d(nf_in, nf, 3, 1, 1, bias=False),
                        n_power_iterations=5)
        self.relu2 = nn.ReLU(True)
        self.conv2 = sn(nn.Conv2d(nf, nf, 3, 1, 1, bias=False),
                        n_power_iterations=5)

        self.conv_shortcut = sn(nn.Conv2d(nf_in, nf, 1, 1, 0, bias=False),
                                n_power_iterations=5)
Esempio n. 15
0
    def __init__(self, img_size, base_hidden=16, pack=2, attention=64):
        super().__init__()
        self.pack = pack

        self.attention = False
        if attention is not None:
            self.attention = True
            self.attention_resolution = attention

        _block = block_d

        main = torch.nn.Sequential()
        ### Start block
        # Size = n_colors x image_size x image_size
        main.add_module('Start-block',
                        sn(nn.Conv2d(3 * pack, base_hidden, 3, 1, 1)))
        image_size_new = img_size
        # Size = D_h_size x image_size/2 x image_size/2

        ### Middle block (Done until we reach ? x 4 x 4)
        mult = 1
        i = 1
        while image_size_new > 1:
            if self.attention and image_size_new == self.attention_resolution:
                print('Attention!')
                main.add_module('Start-Attention',
                                SelfAttention(base_hidden * mult))
            main.add_module(
                'Millde-block [%d]' % i,
                _block(base_hidden * mult, base_hidden * (2 * mult), id=i))
            # Size = (D_h_size*(2*i)) x image_size/(2*i) x image_size/(2*i)
            image_size_new = image_size_new // 2
            mult *= 2
            i += 1
        ### End block
        # Size = (D_h_size * mult) x 4 x 4
        main.add_module('End-Feature', ToFeature())
        # Size = (bs, base_hidden * mult)
        self.out = nn.Sequential(*[
            #MinibatchDiscrimination(base_hidden*mult, base_hidden*mult+128),
            sn(nn.Linear(base_hidden * mult, 1))
        ])

        self.main = main
 def __init__(self,
              ni,
              no,
              stride,
              activation,
              spectral_norm=False,
              dp_prob=.3):
     super().__init__()
     self.activation = activation
     self.bn0 = torch.nn.BatchNorm2d(ni)
     self.conv0 = torch.nn.Conv2d(ni,
                                  no,
                                  3,
                                  stride=1,
                                  padding=1,
                                  bias=False)
     self.bn1 = torch.nn.BatchNorm2d(no)
     self.conv1 = torch.nn.Conv2d(no,
                                  no,
                                  3,
                                  stride=1,
                                  padding=1,
                                  bias=False)
     self.dropout1 = torch.nn.Dropout2d(dp_prob)
     if stride > 1:
         self.downsample = True
     else:
         self.downsample = False
     if ni != no:
         self.reduce = True
         self.conv_reduce = torch.nn.Conv2d(ni,
                                            no,
                                            1,
                                            stride=1,
                                            padding=0,
                                            bias=False)
     else:
         self.reduce = False
     if spectral_norm:
         self.conv0 = sn(self.conv0)
         self.conv1 = sn(self.conv1)
         if self.reduce:
             self.conv_reduce = sn(self.conv_reduce)
 def __init__(self, n_in, n_out, stride):
     super().__init__()
     self.layers = nn.Sequential(
         sn(
             nn.Conv2d(in_channels=n_in,
                       out_channels=n_out,
                       kernel_size=3,
                       stride=stride,
                       padding=1)), nn.BatchNorm2d(num_features=n_out),
         nn.LeakyReLU())
    def __init__(self,
                 dim,
                 padding_type,
                 norm_layer,
                 use_bias,
                 nz=6,
                 type_up='nearest',
                 type_norm='batch'):
        """Initialize the Resnet block
        A resnet block is a conv block with skip connections
        We construct a conv block with build_conv_block function,
        and implement skip connections in <forward> function.
        Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
        """
        super().__init__()

        p = 0
        if padding_type == 'reflect':
            self.replication_pad = nn.ReflectionPad2d(1)
        elif padding_type == 'replicate':
            self.replication_pad = nn.ReplicationPad2d(1)
        elif padding_type == 'zero':
            p = 1
            self.replication_pad = nn.Sequential([])
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        self.conv1 = UpsampleConvLayer(dim,
                                       int(dim / 2),
                                       kernel_size=3,
                                       stride=2,
                                       padding=0,
                                       bias=use_bias,
                                       upsample=2,
                                       type_up=type_up)
        self.conv_up = UpsampleConvLayer(dim,
                                         int(dim / 2),
                                         kernel_size=3,
                                         stride=2,
                                         padding=0,
                                         bias=use_bias,
                                         upsample=2,
                                         type_up=type_up)
        # self.norm1 = BatchNormZ(int(dim / 2), nz)
        # self.norm1 = Norm(int(dim / 2), nz)
        self.norm1 = nn.BatchNorm2d(int(dim / 2), nz)
        self.conv2 = sn(
            nn.Conv2d(int(dim / 2),
                      int(dim / 2),
                      kernel_size=3,
                      padding=1,
                      bias=use_bias))
        # self.norm2 = Norm(int(dim / 2), nz)
        self.norm2 = nn.BatchNorm2d(int(dim / 2), nz)
Esempio n. 19
0
    def __init__(self, nc, ndf):
        super(D_resnet, self).__init__()
        self.nc = nc
        self.ndf = ndf

        self.block1 = ResBlock_D(ndf, True, nc, True)
        self.block2 = ResBlock_D(ndf, True)
        self.block3 = ResBlock_D(ndf)
        self.block4 = ResBlock_D(ndf)
        self.relu = nn.ReLU(True)
        self.linear = sn(nn.Linear(ndf, 1), n_power_iterations=5)
 def __init__(self, n_blocks, n_features_block, n_features_last, list_scales, use_sn=False, input_channels=3):
     """n_blocks, n_features : ~expressivité du modèle
     input_channels: nombre de couleurs en entrée et en sortie
     scale_twice: False: x4 pixels, True: x16  pixels"""
     super().__init__()
     
     assert n_features_last % 4 == 0
     self.n_features_last = n_features_last
     
     self.first_layers = nn.Sequential(
         sn(nn.Conv2d(in_channels=input_channels, out_channels=n_features_block, kernel_size=9, stride=1, padding=4)),
         nn.PReLU())
     
     self.block_list = nn.Sequential(*[BasicBlock(n_features_block) for _ in range(n_blocks)])
     
     self.block_list_end = nn.Sequential(
         sn(nn.Conv2d(in_channels=n_features_block, out_channels=n_features_block, kernel_size=3, stride=1, padding=1)),
         nn.BatchNorm2d(num_features=n_features_block),
     )
     
     if use_sn:
         self.upscale = nn.Sequential(*[
                     nn.Sequential(sn(nn.Conv2d(in_channels=n_features_block if i==0 else n_features_last//list_scales[i-1]**2,
                                            out_channels=n_features_last, kernel_size=3, stride=1, padding=1)),
                                 nn.PixelShuffle(upscale_factor=list_scales[i]),
                                 nn.PReLU())
                 for i in range(len(list_scales))])
         self.end = nn.Sequential(
                 # sortie
                 sn(nn.Conv2d(in_channels=n_features_last//list_scales[-1]**2, out_channels=input_channels, kernel_size=3, stride=1, padding=1)),
                 nn.Tanh())
     else:
         self.upscale = nn.Sequential(*[
                     nn.Sequential(nn.Conv2d(in_channels=n_features_block if i==0 else n_features_last//list_scales[i-1]**2,
                                            out_channels=n_features_last, kernel_size=3, stride=1, padding=1),
                                 nn.PixelShuffle(upscale_factor=list_scales[i]),
                                 nn.PReLU())
                 for i in range(len(list_scales))])
         self.end = nn.Sequential(
                 nn.Conv2d(in_channels=n_features_last//list_scales[-1]**2, out_channels=input_channels, kernel_size=3, stride=1, padding=1),
                 nn.Tanh())
Esempio n. 21
0
    def __init__(self, in_channel, out_channel, kernel_size=[3, 3],
                 padding=1, stride=1, n_class=None, bn=False,
                 activation=F.relu, upsample=True, downsample=False, SN=False, emb=None, rate=1.0):
        super().__init__()

        gain = 2 ** 0.5

        self.emb = emb

        if SN:
            self.conv1 = sn(nn.Conv2d(in_channel, out_channel,
                                                 kernel_size, stride, padding,
                                                 bias=False if bn else True))
            self.conv2 = sn(nn.Conv2d(out_channel, out_channel,
                                                 kernel_size, stride, padding,
                                                 bias=False if bn else True))
        else:
            self.conv1 = nn.Conv2d(in_channel, out_channel,
                                                 kernel_size, stride, padding,
                                                 bias=False if bn else True)
            self.conv2 = nn.Conv2d(out_channel, out_channel,
                                                 kernel_size, stride, padding,
                                                 bias=False if bn else True)

        self.skip_proj = False
        if in_channel != out_channel or upsample or downsample:
            if SN == True:
                self.conv_skip = sn(nn.Conv2d(in_channel, out_channel,
                                                         1, 1, 0))
            else:
                self.conv_skip = nn.Conv2d(in_channel, out_channel,
                                                         1, 1, 0)

            self.skip_proj = True

        self.upsample = upsample
        self.downsample = downsample
        self.activation = activation
        self.bn = bn
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 bias,
                 upsample=None,
                 type_up='transpose'):
        super(UpsampleConvLayer, self).__init__()
        self.upsample = upsample

        self.reflection_pad = nn.ReflectionPad2d(1)
        self.type_up = type_up

        if type_up == 'pixel':
            in_channels = in_channels // 4

        if type_up == 'transpose':
            self.conv = sn(
                nn.ConvTranspose2d(in_channels,
                                   out_channels,
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1,
                                   bias=bias))
        else:
            self.conv = sn(
                nn.Conv2d(in_channels,
                          out_channels,
                          3,
                          stride=1,
                          padding=0,
                          bias=bias))

        if type_up == 'pixel':
            self.interp = nn.PixelShuffle(upscale_factor=2)
 def __init__(self, prefix, freeze_prefix=False, **kwargs):
     super().__init__()
     self.base = prefix
     self.n_features_last = prefix.n_features_last
     self.upscale = nn.Sequential(*[
                     sn(nn.Conv2d(in_channels=self.n_features_last // 4, out_channels=self.n_features_last,
                                  kernel_size=3, stride=1, padding=1)),
                     nn.PixelShuffle(upscale_factor=2),
                     nn.PReLU()])
     # cache le parametre dans une liste pour qu'il ne soit vu qu'une seule fois
     self.end = [prefix.end[0] if type(prefix.end)==list else prefix.end]
     
     if freeze_prefix:
         prefix.freeze(**kwargs)
Esempio n. 24
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 id=0,
                 do_upsample=True,
                 inject_noise=False):
        super().__init__()
        self.id = id
        self.inject_noise = inject_noise
        self.in_dim = in_dim
        self.out_dim = out_dim

        hidden_dim = in_dim // 4
        self.conv1 = sn(torch.nn.Conv2d(in_dim, hidden_dim, 1, bias=False))
        self.conv2 = sn(
            torch.nn.Conv2d(hidden_dim, hidden_dim, 3, 1, 1, bias=False))
        self.conv3 = sn(
            torch.nn.Conv2d(hidden_dim, hidden_dim, 3, 1, 1, bias=False))
        self.conv4 = sn(torch.nn.Conv2d(hidden_dim, out_dim, 1, bias=False))

        self.n1 = NoiseInjection(in_dim, inject_noise)
        self.n2 = NoiseInjection(hidden_dim, inject_noise)
        self.n3 = NoiseInjection(hidden_dim, inject_noise)
        self.n4 = NoiseInjection(hidden_dim, inject_noise)

        #         self.bn1 = SelfModNorm2d(in_dim)
        #         self.bn2 = SelfModNorm2d(hidden_dim)
        #         self.bn3 = SelfModNorm2d(hidden_dim)
        #         self.bn4 = SelfModNorm2d(hidden_dim)
        self.bn1 = AdaIn2d(in_dim)
        self.bn2 = AdaIn2d(hidden_dim)
        self.bn3 = AdaIn2d(hidden_dim)
        self.bn4 = AdaIn2d(hidden_dim)

        self.act = nn.LeakyReLU(0.2, inplace=True)
        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.do_upsample = do_upsample
    def __init__(self, ch_hidden, embed_ks, spade_ks, n_fc_layers,
                 n_adaptive_layers, ch, adap_embed):
        super().__init__()

        # parameters for model
        for i in range(n_adaptive_layers):
            ch_in, ch_out = ch[i], ch[i + 1]
            embed_ks2 = embed_ks**2
            spade_ks2 = spade_ks**2
            ch_h = ch_hidden[i][0]

            fc_names, fc_outs = [], []
            fc0_out = fcs_out = (ch_h * spade_ks2 + 1) * 2
            fc1_out = (ch_h * spade_ks2 + 1) * (1 if ch_in != ch_out else 2)
            fc_names += ['fc_spade_0', 'fc_spade_1', 'fc_spade_s']
            fc_outs += [fc0_out, fc1_out, fcs_out]
            if adap_embed:
                fc_names += ['fc_spade_e']
                fc_outs += [ch_in * embed_ks2 + 1]

            # define weight for fully connected layers
            for n, l in enumerate(fc_names):
                fc_in = ch_out
                fc_layer = [sn(nn.Linear(fc_in, ch_out))]
                for k in range(1, n_fc_layers):
                    fc_layer += [sn(nn.Linear(ch_out, ch_out))]
                fc_layer += [sn(nn.Linear(ch_out, fc_outs[n]))]
                setattr(self, '%s_%d' % (l, i), nn.Sequential(*fc_layer))

        # other parameters
        self.ch = ch
        self.ch_hidden = ch_hidden
        self.embed_ks = embed_ks
        self.spade_ks = spade_ks
        self.adap_embed = adap_embed
        self.n_adaptive_layers = n_adaptive_layers
Esempio n. 26
0
    def __init__(self,
                 fin,
                 fout,
                 norm='batch',
                 hidden_nc=0,
                 kernel_size=3,
                 padding=1,
                 stride=1):
        super().__init__()
        self.conv = sn(
            nn.Conv2d(fin,
                      fout,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=padding))

        Norm = generalNorm(norm)
        self.bn = Norm(fout, hidden_nc=hidden_nc, norm=norm, ks=3)
Esempio n. 27
0
    def __init__(self, hdim, kernel_size, dilation=1, adj_dim=False):
        super(BVAE_layer, self).__init__()
        self.softplus = CustomSoftplus()

        ####################### BOTTOM_UP #########################
        if adj_dim == True:
            self.pre_conv = Conv1d(2 * hdim,
                                   hdim,
                                   kernel_size,
                                   activation=F.elu,
                                   dilation=dilation)
        else:
            self.pre_conv = Conv1d(hdim,
                                   hdim,
                                   kernel_size,
                                   activation=F.elu,
                                   dilation=dilation)

        self.up_conv_a = nn.ModuleList([
            sn(Conv1d(hdim, hdim, kernel_size, activation=F.elu)),
            sn(Conv1d(hdim, 3 * hdim, kernel_size, bias=False))
        ])
        self.up_conv_b = sn(Conv1d(hdim, hdim, kernel_size, activation=F.elu))

        ######################## TOP_DOWN ##########################
        self.down_conv_a = nn.ModuleList([
            sn(Conv1d(hdim, hdim, kernel_size, activation=F.elu)),
            sn(Conv1d(hdim, 5 * hdim, kernel_size, bias=False))
        ])
        self.down_conv_b = nn.ModuleList([
            sn(Conv1d(2 * hdim, hdim, kernel_size, bias=False)),
            sn(Conv1d(hdim, hdim, kernel_size, activation=F.elu))
        ])

        if adj_dim == True:
            self.post_conv = Conv1d(hdim,
                                    2 * hdim,
                                    kernel_size,
                                    activation=F.elu,
                                    dilation=dilation)
        else:
            self.post_conv = Conv1d(hdim,
                                    hdim,
                                    kernel_size,
                                    activation=F.elu,
                                    dilation=dilation)
    def __init__(self, input_shape, list_n_features, list_stride):
        """
        features et strides utilisés dans SRGAN :
            [64, 64, 128, 128, 256, 256, 512, 512],
            [1,   2,   1,   2,   1,   2,   1,   2]"""
        super().__init__()
        w = input_shape[1]
        h = input_shape[2]
        for x in list_stride:
            assert x in (1,
                         2), "l'article utilise des stride de 1 ou 2 seulement"
        assert w * h % 4 ** (sum(list_stride) - len(list_stride)) == 0,\
            "chaque stride à 2 divisise la taille par 2, il faut que ca soit divisible"

        assert len(list_n_features) == len(list_stride)

        # taille des vecteurs d'entrée de la couche FC
        self.fc_in = w * h * list_n_features[-1] // (4**(sum(list_stride) -
                                                         len(list_stride)))
        self.fc_mid = list_n_features[-1] * 2
        self.conv = nn.Sequential(
            # entrée
            sn(
                nn.Conv2d(in_channels=input_shape[0],
                          out_channels=list_n_features[0],
                          kernel_size=3,
                          stride=list_stride[0],
                          padding=1)),
            nn.LeakyReLU(),

            # liste de blocks
            nn.Sequential(*[
                BasicBlock(list_n_features[i - 1], list_n_features[i],
                           list_stride[i])
                for i in range(1, len(list_n_features))
            ]))

        self.fc = nn.Sequential(
            # sortie
            nn.Linear(self.fc_in, self.fc_mid),
            nn.LeakyReLU(),
            nn.Linear(self.fc_mid, 1),
            nn.Sigmoid())
def snlinear(eps=1e-12, **kwargs):
    return sn(nn.Linear(**kwargs), eps=eps)
    def __init__(self,
                 input_nc,
                 ngf=64,
                 add_input=False,
                 nz=0,
                 n_blocks=9,
                 n_down=1,
                 padding_type='reflect',
                 norm='batch',
                 self_attention=False,
                 type_up='nearest',
                 type_z='concat'):
        """Construct a Resnet-based generator
        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert (n_blocks >= 0)
        norm_layer = get_norm_layer(norm_type=norm)
        self.n_down = n_down

        use_bias = False

        self.nz = nz
        self.num_classes = 0
        output_nc = input_nc
        self.self_attention = self_attention
        self.type_up = type_up
        self.type_z = type_z
        if type_z == 'concat':
            input_nc = input_nc + nz

        super(ResnetGenerator, self).__init__()
        self.add_input = add_input

        # self.replic1 = nn.ReflectionPad2d(3)
        # self.down1 = sn(nn.Conv2d(input_nc + nz, ngf, kernel_size=7, padding=0, bias=use_bias))
        # self.norm1 = norm_layer(ngf)

        model_down = [
            nn.ReflectionPad2d(3),
            sn(
                nn.Conv2d(input_nc,
                          ngf,
                          kernel_size=7,
                          padding=0,
                          bias=use_bias)),
            norm_layer(ngf),
            nn.ReLU(True)
        ]

        # mult = 1
        # self.down2 = sn(nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias))
        # self.norm2 = norm_layer(ngf * mult * 2)

        n_downsampling = n_down
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            model_down += [
                sn(
                    nn.Conv2d(ngf * mult,
                              ngf * mult * 2,
                              kernel_size=3,
                              stride=2,
                              padding=1,
                              bias=use_bias)),
                norm_layer(ngf * mult * 2),
                nn.ReLU(True)
            ]

        self.model_down = nn.Sequential(*model_down)
        model_block = []
        mult = 2**n_downsampling
        self.block1 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)
        self.block2 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)
        self.block3 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)
        self.block4 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)
        if self_attention:
            self.attention = SelfAttention(ngf * mult)
        self.block5 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)
        self.block6 = ResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz)

        self.up_1 = UpResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz,
                                  type_up=type_up)
        self.up_2 = UpResnetBlock(ngf * mult,
                                  padding_type=padding_type,
                                  norm_layer=norm_layer,
                                  use_bias=use_bias,
                                  nz=nz,
                                  type_up=type_up)
        model_up = []
        model_up += [nn.ReflectionPad2d(3)]
        model_up += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model_up += [nn.Tanh()]

        self.model_up = nn.Sequential(*model_up)