コード例 #1
0
ファイル: biggan.py プロジェクト: sholderbach/torchsupport
  def __init__(self, in_size, out_size, cond_size,
               up=2, activation=func.relu_,
               normalization=nn.BatchNorm2d):
    r"""Single residual block of a BigGAN generator.

    Args:
      in_size (int): number of input features.
      out_size (int): number of output features.
      cond_size (int): number of condition features.
      up (int or None): upsampling scale. If None, does
        not perform upsampling.
      activation (function): nonlinear activation function.
        Defaults to ReLU.
      normalization (function): normalization function.
        Defaults to BatchNorm2d.
    """
    super().__init__()
    self.up = up
    self.activation = activation
    self.in_mod = spectral_norm(nn.Linear(cond_size, 2 * in_size, bias=False))
    self.out_mod = spectral_norm(nn.Linear(cond_size, 2 * in_size, bias=False))
    self.in_norm = normalization(in_size, affine=False)
    self.out_norm = normalization(in_size, affine=False)
    self.in_conv = spectral_norm(nn.Conv2d(in_size, in_size, 3, padding=1, bias=False))
    self.out_conv = spectral_norm(nn.Conv2d(in_size, out_size, 3, padding=1, bias=False))
    self.skip_conv = spectral_norm(nn.Conv2d(in_size, out_size, 1, bias=False))
コード例 #2
0
    def __init__(self,
                 input_size,
                 hidden_sizes,
                 output_size,
                 hidden_act=nn.ReLU,
                 output_act=None,
                 hid_layer_norm=False,
                 hid_spectral_norm=False,
                 out_layer_norm=False,
                 out_spectral_norm=False):
        super().__init__()
        if not isinstance(hidden_sizes, list):
            raise TypeError('hidden_sizes should be a list')
        in_size = input_size
        self.fcs = nn.ModuleList()
        for i, hid_size in enumerate(hidden_sizes):
            fc = nn.Linear(in_size, hid_size)
            if hid_spectral_norm:
                fc = spectral_norm(fc)
            in_size = hid_size
            self.fcs.append(fc)
            if hid_layer_norm:
                self.fcs.append(nn.LayerNorm(hid_size))
            self.fcs.append(hidden_act())

        last_fc = nn.Linear(in_size, output_size)
        if out_spectral_norm:
            last_fc = spectral_norm(last_fc)
        self.fcs.append(last_fc)
        if out_layer_norm:
            self.fcs.append(nn.LayerNorm(output_size))
        if output_act is not None:
            self.fcs.append(output_act())
コード例 #3
0
 def __init__(self,
              input_nc=3,
              nef=64,
              n_style=1,
              n_blocks=4,
              norm_type='cbin'):
     super(StyleEncoder, self).__init__()
     norm_layer, _ = get_norm_layer(layer_type=norm_type)
     nl_layer = get_nl_layer(layer_type='lrelu')
     block = [
         Conv2dBlock(input_nc,
                     nef,
                     kernel_size=4,
                     stride=2,
                     padding=1,
                     bias=True)
     ]
     for n in range(1, n_blocks):
         input_nef = min(nef * n, 256)
         output_nef = min(nef * (n + 1), 256)
         block.append(
             DownResidualBlock(input_nef, output_nef, norm_layer, nl_layer))
     block += [nl_layer(), nn.AdaptiveAvgPool2d(1)]
     self.encode = nn.Sequential(*block)
     self.fc = spectral_norm(nn.Linear(output_nef, n_style))
     self.fcVar = spectral_norm(nn.Linear(output_nef, n_style))
コード例 #4
0
ファイル: networks.py プロジェクト: touyu/CartoonSNGAN2
    def __init__(self, in_nc, out_nc, nf=32):
        super(Discriminator, self).__init__()
        self.input_nc = in_nc
        self.output_nc = out_nc
        self.nf = nf
        self.activation = nn.LeakyReLU(0.2, True)

        self.conv1 = nn.Sequential(nn.Conv2d(in_nc, nf, 3, 1, 1),
                                   nn.LeakyReLU(0.2, True))

        self.conv2 = nn.Sequential(
            nn.Conv2d(nf, nf * 2, 3, 2, 1), nn.LeakyReLU(0.2, True),
            spectral_norm(nn.Conv2d(nf * 2, nf * 4, 3, 1, 1)),
            nn.LeakyReLU(0.2, True))

        self.conv3 = nn.Sequential(
            nn.Conv2d(nf * 4, nf * 4, 3, 2, 1), nn.LeakyReLU(0.2, True),
            spectral_norm(nn.Conv2d(nf * 4, nf * 8, 3, 1, 1)),
            nn.LeakyReLU(0.2, True))

        self.conv4 = nn.Sequential(
            spectral_norm(nn.Conv2d(nf * 8, nf * 8, 3, 1, 1)),
            nn.LeakyReLU(0.2, True))

        self.conv5 = nn.Sequential(nn.Conv2d(nf * 8, out_nc, 3, 1, 1))
コード例 #5
0
ファイル: blocks.py プロジェクト: savadikarc/ISIC2019
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel=3,
                 use_spectral_norm=True):

        super(OptimizedD_Block, self).__init__()

        self.relu0 = nn.ReLU()

        conv1 = conv_layer(in_channels,
                           out_channels,
                           kernel,
                           padding=1,
                           stride=2)
        self.conv1 = spectral_norm(conv1) if use_spectral_norm else conv1
        self.act1 = nn.ReLU()

        # same i/p -> o/p dimensions
        conv = conv_layer(out_channels,
                          out_channels,
                          kernel,
                          padding=1,
                          stride=1)
        self.conv = spectral_norm(conv) if use_spectral_norm else conv

        conv2 = conv_layer(in_channels,
                           out_channels,
                           kernel,
                           padding=1,
                           stride=2)
        self.conv2 = spectral_norm(conv2) if use_spectral_norm else conv2
        '''
コード例 #6
0
def conv_block(in_channels,
               out_channels,
               kernel_size=3,
               stride=1,
               dilation=1,
               bias=True):
    """Conv block used in MSDilationBlock."""

    return nn.Sequential(
        spectral_norm(
            nn.Conv2d(in_channels,
                      out_channels,
                      kernel_size=kernel_size,
                      stride=stride,
                      dilation=dilation,
                      padding=((kernel_size - 1) // 2) * dilation,
                      bias=bias)),
        nn.LeakyReLU(0.2),
        spectral_norm(
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=kernel_size,
                      stride=stride,
                      dilation=dilation,
                      padding=((kernel_size - 1) // 2) * dilation,
                      bias=bias)),
    )
コード例 #7
0
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResBlockDiscriminator, self).__init__()

        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
        nn.init.xavier_uniform(self.conv1.weight.data, 1.)
        nn.init.xavier_uniform(self.conv2.weight.data, 1.)

        if stride == 1:
            self.model = nn.Sequential(nn.ReLU(), spectral_norm(self.conv1),
                                       nn.ReLU(), spectral_norm(self.conv2))
        else:
            self.model = nn.Sequential(
                nn.ReLU(), spectral_norm(self.conv1), nn.ReLU(),
                spectral_norm(self.conv2),
                nn.AvgPool2d(2, stride=stride, padding=0))
        self.bypass = nn.Sequential()
        if stride != 1:

            self.bypass_conv = nn.Conv2d(in_channels,
                                         out_channels,
                                         1,
                                         1,
                                         padding=0)
            nn.init.xavier_uniform(self.bypass_conv.weight.data, np.sqrt(2))

            self.bypass = nn.Sequential(
                spectral_norm(self.bypass_conv),
                nn.AvgPool2d(2, stride=stride, padding=0))
コード例 #8
0
ファイル: blocks.py プロジェクト: savadikarc/ISIC2019
    def __init__(self,
                 in_channels,
                 out_channels,
                 n_classes,
                 kernel=3,
                 use_spectral_norm=True,
                 upsample=True,
                 negative_slope=0.1):

        super(res_block_g, self).__init__()

        # self.bn0 = nn.BatchNorm2d(in_channels) # ConditionalBN(n_classes, in_channels)
        self.bn0 = ConditionalBN(n_classes, in_channels)
        self.relu0 = nn.ReLU()

        conv1 = conv_layer(in_channels, out_channels, kernel, padding=1)
        self.conv1 = spectral_norm(conv1) if use_spectral_norm else conv1
        # self.bn1 = nn.BatchNorm2d(out_channels) # ConditionalBN(n_classes, num_features=out_channels)
        self.bn1 = ConditionalBN(n_classes, num_features=out_channels)
        self.act1 = nn.ReLU()

        conv2 = conv_layer(out_channels, out_channels, kernel, padding=1)
        self.conv2 = spectral_norm(conv2) if use_spectral_norm else conv2

        self.learnable_sc = (in_channels != out_channels) or upsample
        if self.learnable_sc:
            conv_sc = conv_layer(in_channels, out_channels, 1, padding=0)
            self.conv_sc = spectral_norm(
                conv_sc) if use_spectral_norm else conv_sc

        if upsample:
            self.upsample0 = nn.Upsample(scale_factor=2)
            self.upsample1 = nn.Upsample(scale_factor=2)

        self.upsample = upsample
コード例 #9
0
ファイル: blocks.py プロジェクト: savadikarc/ISIC2019
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel=3,
                 use_spectral_norm=True,
                 downsample=True,
                 negative_slope=0.1):

        super(res_block_d, self).__init__()

        self.relu0 = nn.ReLU()

        conv1 = conv_layer(in_channels, out_channels, kernel, padding=1)
        self.conv1 = spectral_norm(conv1) if use_spectral_norm else conv1
        self.act1 = nn.ReLU()

        conv2 = conv_layer(out_channels, out_channels, 1, padding=0)
        self.conv2 = spectral_norm(conv2) if use_spectral_norm else conv2

        self.learnable_sc = (in_channels != out_channels) or downsample
        if self.learnable_sc:
            conv_sc = conv_layer(in_channels, out_channels, kernel, padding=1)
            self.conv_sc = spectral_norm(
                conv_sc) if use_spectral_norm else conv_sc

        if downsample:
            self.downsample0 = nn.AvgPool2d(kernel_size=2)
            self.downsample1 = nn.AvgPool2d(kernel_size=2)

        self.downsample = downsample
コード例 #10
0
ファイル: blocks.py プロジェクト: savadikarc/ISIC2019
    def __init__(self, C, use_spectral_norm=True, downsample=True):
        super(SelfAttention, self).__init__()
        self.C = C

        self.downsample = downsample

        self.gamma = nn.Parameter(torch.zeros(1), requires_grad=True)
        theta_conv = conv_layer(self.C, self.C // 8, kernel_size=1)
        self.theta_conv = spectral_norm(
            theta_conv) if use_spectral_norm else theta_conv

        phi_conv = conv_layer(self.C, self.C // 8, kernel_size=1)

        if self.downsample:
            self.mPool1 = nn.MaxPool2d(2)
            self.mPool2 = nn.MaxPool2d(2)

        self.phi_conv = spectral_norm(
            phi_conv) if use_spectral_norm else phi_conv

        g_conv = conv_layer(self.C, self.C // 2, kernel_size=1)
        self.g_conv = spectral_norm(g_conv) if use_spectral_norm else g_conv

        self.softmax = nn.Softmax(dim=-1)

        final_conv = conv_layer(self.C // 2, self.C, kernel_size=1)
        self.final_conv = spectral_norm(
            final_conv) if use_spectral_norm else final_conv
コード例 #11
0
ファイル: main.py プロジェクト: shilongshen/DCGAN
 def __init__(self, ngpu):
     super(Generator, self).__init__()
     self.ngpu = ngpu
     self.main = nn.Sequential(
         # input is Z, going into a convolution
         spectral_norm(nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0,
                                          bias=False)),
         nn.BatchNorm2d(ngf * 8),
         nn.ReLU(True),
         # state size. (ngf*8) x 4 x 4
         spectral_norm(
             nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False)),
         nn.BatchNorm2d(ngf * 4),
         nn.ReLU(True),
         # state size. (ngf*4) x 8 x 8
         spectral_norm(
             nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False)),
         nn.BatchNorm2d(ngf * 2),
         nn.ReLU(True),
         # state size. (ngf*2) x 16 x 16
         spectral_norm(nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1,
                                          bias=False)),
         nn.BatchNorm2d(ngf),
         nn.ReLU(True),
         # state size. (ngf) x 32 x 32
         spectral_norm(nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False)),
         nn.Tanh()
         # state size. (nc) x 64 x 64
     )
コード例 #12
0
    def __init__(self,
                 num_classes,
                 use_spectral_norm=True,
                 use_attention=False,
                 base_filters=64):

        super(Discriminator32_FakeClass, self).__init__()

        self.num_classes = num_classes
        self.use_attention = use_attention

        self.block1 = res_block_d(3,
                                  base_filters,
                                  use_spectral_norm=use_spectral_norm)  # 16x16
        self.block2 = res_block_d(base_filters,
                                  base_filters,
                                  use_spectral_norm=use_spectral_norm)  # 8x8
        self.block3 = res_block_d(base_filters,
                                  base_filters,
                                  use_spectral_norm=use_spectral_norm)  # 4x4

        final_linear = linear_layer(in_features=base_filters, out_features=1)
        self.final_linear = spectral_norm(
            final_linear) if use_spectral_norm else final_linear

        embedding = embedding_layer(num_embeddings=num_classes,
                                    embedding_dim=base_filters)
        self.embedding = spectral_norm(
            embedding) if use_spectral_norm else embedding

        if use_attention:

            self.att = SelfAttention(base_filters)
コード例 #13
0
 def __init__(self, depth=4, down=2, shape=64):
     super().__init__()
     self.preprocess = spectral_norm(
         nn.Conv2d(1 + 1 + 42, 128, 3, padding=1))
     self.blocks = nn.ModuleList([
         nn.Sequential(
             spectral_norm(
                 nn.Conv2d(128 * 2**(idx // down),
                           128 * 2**(idx // down),
                           3,
                           padding=1)), nn.LeakyReLU(),
             spectral_norm(
                 nn.Conv2d(128 * 2**(idx // down),
                           128 * 2**(idx // down),
                           3,
                           padding=1)), nn.LeakyReLU(),
             spectral_norm(
                 nn.Conv2d(128 * 2**(idx // down),
                           128 * 2**((idx + 1) // down),
                           3,
                           padding=1)), nn.LeakyReLU())
         for idx in range(depth)
     ])
     self.predict = spectral_norm(nn.Linear(128 * 2**(depth // down), 1))
     self.shape = shape
     self.down = down
コード例 #14
0
    def __init__(self, ndf=512, nc=3):
        super(NetD, self).__init__()

        self.layer1 = self.layer(3, ndf // 8)
        self.layer2 = self.layer(ndf // 8, ndf // 4)
        self.layer3 = self.layer(ndf // 4, ndf // 2)
        self.layer4 = spectral_norm(nn.Conv2d(ndf // 2, ndf, nc, 1, 1))
        self.linear = spectral_norm(nn.Linear(ndf * 4 * 4, 1))
コード例 #15
0
 def __init__(self, in_size, size):
     super().__init__()
     self.preprocess = spectral_norm(nn.Conv1d(in_size, size, 3, padding=1))
     self.blocks = nn.ModuleList([
         spectral_norm(
             nn.Conv1d(size, size, 3, dilation=2**idx, padding=2**idx))
         for idx in range(4)
     ])
コード例 #16
0
    def __init__(self,
                 input_nc,
                 ndf=64,
                 n_layers=3,
                 norm_layer=nn.BatchNorm2d,
                 use_sigmoid=False,
                 getIntermFeat=False):
        super(NLayerDiscriminator, self).__init__()
        self.getIntermFeat = getIntermFeat
        self.n_layers = n_layers

        kw = 4
        padw = int(np.ceil((kw - 1.0) / 2))
        sequence = [[
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]]

        nf = ndf
        for n in range(1, n_layers):
            nf_prev = nf
            nf = min(nf * 2, 512)
            sequence += [[
                spectral_norm(
                    nn.Conv2d(nf_prev,
                              nf,
                              kernel_size=kw,
                              stride=2,
                              padding=padw)),
                norm_layer(nf),
                nn.LeakyReLU(0.2, True)
            ]]

        nf_prev = nf
        nf = min(nf * 2, 512)
        sequence += [[
            spectral_norm(
                nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1,
                          padding=padw)),
            norm_layer(nf),
            nn.LeakyReLU(0.2, True)
        ]]

        sequence += [[
            nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)
        ]]

        if use_sigmoid:
            sequence += [[nn.Sigmoid()]]

        if getIntermFeat:
            for n in range(len(sequence)):
                setattr(self, 'model' + str(n), nn.Sequential(*sequence[n]))
        else:
            sequence_stream = []
            for n in range(len(sequence)):
                sequence_stream += sequence[n]
            self.model = nn.Sequential(*sequence_stream)
コード例 #17
0
    def build_discriminator(self):

        D = nn.Sequential(
            # 3 -> 8
            spectral_norm(
                nn.Conv2d(3,
                          self.ndf * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False)),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),

            # 8 -> 16
            spectral_norm(
                nn.Conv2d(self.ndf * 2,
                          self.ndf * 4,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False)),
            nn.BatchNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),

            # 16 -> 32
            spectral_norm(
                nn.Conv2d(self.ndf * 4,
                          self.ndf * 8,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False)),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            SelfAttention(C=self.ndf * 8),

            # 32 -> 64
            spectral_norm(
                nn.Conv2d(self.ndf * 8,
                          self.ndf * 16,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False)),
            nn.BatchNorm2d(self.ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),
            SelfAttention(C=self.ndf * 16),
            spectral_norm(
                nn.Conv2d(self.ndf * 16,
                          1,
                          kernel_size=2,
                          stride=1,
                          padding=0,
                          bias=True)))

        return D
コード例 #18
0
    def build_generator(self):
        # merged_x -> (B, nZ, 1, 1)

        nZ = self.nZ

        self.conv1 = spectral_norm(
            nn.ConvTranspose2d(nZ,
                               self.ndf * 32,
                               kernel_size=4,
                               stride=2,
                               bias=False,
                               padding=0))
        self.bn1 = ConditionalBN(self.num_classes, self.ndf * 32)
        self.act1 = nn.ReLU()

        self.conv2 = spectral_norm(
            nn.ConvTranspose2d(self.ndf * 32,
                               self.ndf * 16,
                               kernel_size=4,
                               stride=2,
                               bias=False,
                               padding=1))
        self.bn2 = ConditionalBN(self.num_classes, self.ndf * 16)
        self.act2 = nn.ReLU()

        self.attn1 = SelfAttention(C=self.ndf * 16)

        self.conv3 = spectral_norm(
            nn.ConvTranspose2d(self.ndf * 16,
                               self.ndf * 8,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=False))
        self.bn3 = ConditionalBN(self.num_classes, self.ndf * 8)
        self.act3 = nn.ReLU()

        self.attn2 = SelfAttention(C=self.ndf * 8)

        self.conv4 = spectral_norm(
            nn.ConvTranspose2d(self.ndf * 8,
                               self.ndf * 4,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=False))
        self.bn4 = ConditionalBN(self.num_classes, self.ndf * 4)
        self.act4 = nn.ReLU()

        self.conv5 = nn.Conv2d(self.ndf * 4,
                               3,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=True)
        self.tanh = nn.Tanh()
コード例 #19
0
def build_norm_layer(norm_type, param=None, num_feats=None):
    if norm_type == 'bnorm':
        return nn.BatchNorm1d(num_feats)
    elif norm_type == 'snorm':
        spectral_norm(param)
        return None
    elif norm_type is None:
        return None
    else:
        raise TypeError('Unrecognized norm type: ', norm_type)
コード例 #20
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     self.shortcut = nn.Sequential(
         nn.AvgPool2d(2),
         spectral_norm(nn.Conv2d(in_channels, out_channels, 1, 1, 0)))
     self.residual = nn.Sequential(
         spectral_norm(nn.Conv2d(in_channels, out_channels, 3, 1, 1)),
         nn.ReLU(),
         spectral_norm(nn.Conv2d(out_channels, out_channels, 3, 1, 1)),
         nn.AvgPool2d(2))
コード例 #21
0
 def initialize(self):
     for m in self.residual.modules():
         if isinstance(m, nn.Conv2d):
             init.xavier_uniform_(m.weight, math.sqrt(2))
             init.zeros_(m.bias)
             spectral_norm(m)
     for m in self.shortcut.modules():
         if isinstance(m, nn.Conv2d):
             init.xavier_uniform_(m.weight)
             init.zeros_(m.bias)
             spectral_norm(m)
コード例 #22
0
    def __init__(self, NUM_CLASSES=10):
        super(ProjectionDiscriminator_nobn, self).__init__()

        self.ndf = 64

        self.embedding = spectral_norm(nn.Embedding(NUM_CLASSES,
                                                    self.ndf * 16))

        self.discriminator = self.build_discriminator()
        self.final_linear = spectral_norm(
            nn.Linear(self.ndf * 16, 1, bias=True))
コード例 #23
0
 def use_s_norm(self, inp, outp, k, s, p, b = None):
     if self.s_norm:
         if b is None:
             return spectral_norm(nn.Conv2d(inp, outp, kernel_size=k, stride=s, padding=p))
         else:
             return spectral_norm(nn.Conv2d(inp, outp, kernel_size=k, stride=s, padding=p, bias = b))
     else:
         if b is None:
             return nn.Conv2d(inp, outp, kernel_size=k, stride=s, padding=p)
         else:
             return nn.Conv2d(inp, outp, kernel_size=k, stride=s, padding=p, bias = b)
コード例 #24
0
    def build_generator(self):
        # merged_x -> (B, nZ, 1, 1)

        nZ = self.nZ

        G = nn.Sequential(
            # IN: (-1, nZ, 1, 1)
            spectral_norm(
                nn.ConvTranspose2d(nZ,
                                   self.ndf * 32,
                                   kernel_size=4,
                                   stride=2,
                                   bias=False,
                                   padding=0)),
            nn.BatchNorm2d(self.ndf * 32),
            nn.ReLU(inplace=True),
            spectral_norm(
                nn.ConvTranspose2d(self.ndf * 32,
                                   self.ndf * 16,
                                   kernel_size=4,
                                   stride=2,
                                   bias=False,
                                   padding=1)),
            nn.BatchNorm2d(self.ndf * 16),
            nn.ReLU(inplace=True),
            SelfAttention(C=self.ndf * 16),
            spectral_norm(
                nn.ConvTranspose2d(self.ndf * 16,
                                   self.ndf * 8,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=False)),
            nn.BatchNorm2d(self.ndf * 8),
            nn.ReLU(inplace=True),
            SelfAttention(C=self.ndf * 8),
            spectral_norm(
                nn.ConvTranspose2d(self.ndf * 8,
                                   self.ndf * 4,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=False)),
            nn.BatchNorm2d(self.ndf * 4),
            nn.ReLU(inplace=True),
            nn.Conv2d(self.ndf * 4,
                      3,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True),
            nn.Tanh())

        return G
コード例 #25
0
    def __init__(self,
                 num_classes,
                 z_dim=64,
                 base_width=8,
                 base_filters=4,
                 use_spectral_norm=True,
                 use_attention=False):

        super(Generator, self).__init__()

        self.num_classes = num_classes
        self.base_width = base_width
        self.base_filters = base_filters

        self.use_attention = use_attention

        l1 = linear_layer(in_features=z_dim,
                          out_features=(base_width**2) * 16 * base_filters)
        self.l1 = spectral_norm(l1) if use_spectral_norm else l1

        self.block0 = res_block_g(16 * base_filters,
                                  8 * base_filters,
                                  num_classes,
                                  use_spectral_norm=use_spectral_norm)  # 16x16
        self.block1 = res_block_g(8 * base_filters,
                                  4 * base_filters,
                                  num_classes,
                                  use_spectral_norm=use_spectral_norm)  # 32x32
        self.block2 = res_block_g(4 * base_filters,
                                  4 * base_filters,
                                  num_classes,
                                  use_spectral_norm=use_spectral_norm)  # 64x64
        self.block3 = res_block_g(
            4 * base_filters,
            2 * base_filters,
            num_classes,
            use_spectral_norm=use_spectral_norm)  # 128x128
        self.block4 = res_block_g(
            2 * base_filters,
            base_filters,
            num_classes,
            use_spectral_norm=use_spectral_norm)  # 256x256

        # self.bn = ConditionalBN(num_classes, base_filters)# batch_norm(4*base_filters)
        self.bn = batch_norm(base_filters)
        self.act = nn.ReLU()

        conv_l = conv_layer(base_filters, 3, 3, padding=1)
        self.conv_l = spectral_norm(conv_l) if use_spectral_norm else conv_l
        self.tanh = nn.Tanh()

        if use_attention:
            # self.att = SelfAttention(4*base_filters, downsample=True)
            self.att2 = SelfAttention(4 * base_filters, downsample=True)
コード例 #26
0
 def __init__(self,
              num_features,
              num_con=8,
              eps=1e-5,
              momentum=0.1,
              track_running_stats=False):
     super(_AdaINorm, self).__init__(num_features, eps, momentum, affine,
                                     track_running_stats)
     self.num_con = num_con
     if num_con > 0:
         self.ConAlpha = spectral_norm(nn.Linear(num_con, num_features))
         self.ConBeta = spectral_norm(nn.Linear(num_con, num_features))
コード例 #27
0
    def __init__(self, M=32):
        super().__init__()
        self.M = M

        self.main = nn.Sequential(
            # M
            spectral_norm(nn.Conv2d(
                3, 64, kernel_size=3, stride=1, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            spectral_norm(nn.Conv2d(
                64, 64, kernel_size=4, stride=2, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            # M / 2
            spectral_norm(nn.Conv2d(
                64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            spectral_norm(nn.Conv2d(
                128, 128, kernel_size=4, stride=2, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            # M / 4
            spectral_norm(nn.Conv2d(
                128, 256, kernel_size=3, stride=1, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            spectral_norm(nn.Conv2d(
                256, 256, kernel_size=4, stride=2, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True),
            # M / 8
            spectral_norm(nn.Conv2d(
                256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
            nn.LeakyReLU(0.1, inplace=True))

        self.linear = spectral_norm(
            nn.Linear(M // 8 * M // 8 * 512, 1, bias=False))
        res_arch_init(self)
コード例 #28
0
    def __init__(self, embed_dim, max_seq_len, num_rep, vocab_size, padding_idx, norm='none',gpu=False, dropout=0.25):
        super(RelGAN_D, self).__init__(embed_dim, vocab_size, dis_filter_sizes, dis_num_filters, padding_idx,
                                       gpu, dropout)

        self.embed_dim = embed_dim
        self.max_seq_len = max_seq_len
        self.feature_dim = sum(dis_num_filters)
        self.emb_dim_single = int(embed_dim / num_rep)
        self.num_rep = num_rep

        self.embeddings = nn.Linear(vocab_size, embed_dim, bias=False)
        if norm == 'spectral':
            print('use spectral')
            self.convs = nn.ModuleList([
                spectral_norm(nn.Conv2d(1, n, (f, self.emb_dim_single), stride=(1, self.emb_dim_single))) for (n, f) in
                zip(dis_num_filters, dis_filter_sizes)
            ])        
        else:
            self.convs = nn.ModuleList([
                nn.Sequential(
                    nn.Conv2d(1, n, (f, self.emb_dim_single), stride=(1, self.emb_dim_single)),
                )
                     for (n, f) in
                zip(dis_num_filters, dis_filter_sizes)
            ])

        self.highway = nn.Linear(self.feature_dim, self.feature_dim)
        self.feature2out = nn.Linear(self.feature_dim, 100)
        self.out2logits = nn.Linear(100, 1)
        self.dropout = nn.Dropout(dropout)

        self.init_params()
コード例 #29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 output_padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 padding_mode='zeros'):
        super(MeanSpectralNormTransConv2d, self).__init__()

        self.conv = spectral_norm(
            nn.ConvTranspose2d(in_channels,
                               out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               dilation=dilation,
                               groups=groups,
                               bias=bias,
                               padding_mode=padding_mode,
                               output_padding=output_padding))

        self.bias = nn.Parameter(torch.zeros(out_channels, 1))
        self.register_buffer('running_mean', torch.zeros(out_channels))
        self.momentum = 0.1
コード例 #30
0
 def __init__(self, nZ=10, NUM_CLASSES=10):
     super(ConditionalGenerator, self).__init__()
     self.nZ = nZ
     self.ndf = 64
     self.embedding = spectral_norm(
         nn.Embedding(num_embeddings=NUM_CLASSES, embedding_dim=self.nZ))
     self.generator = self.build_generator()