Пример #1
0
 def __init__(self, code_size, weight_init=None, weight_norm=None):
     super(Generator, self).__init__()
     self.inplane = 8
     self.input_linear = nn.Linear(code_size, 4 * 4 * self.inplane * 32)
     self.model = nn.Sequential(
         *conv2d_trans_bn_lrelu_block(self.inplane * 32,
                                      self.inplane * 16,
                                      4,
                                      2,
                                      1,
                                      normalize=True,
                                      bias=False,
                                      weight_norm=weight_norm),
         *conv2d_trans_bn_lrelu_block(self.inplane * 16,
                                      self.inplane * 8,
                                      4,
                                      2,
                                      1,
                                      normalize=True,
                                      bias=False,
                                      weight_norm=weight_norm),
         *conv2d_trans_bn_lrelu_block(self.inplane * 8,
                                      self.inplane * 4,
                                      4,
                                      2,
                                      1,
                                      normalize=True,
                                      bias=False,
                                      weight_norm=weight_norm),
         apply_weight_norm(
             nn.ConvTranspose2d(self.inplane * 4, 1, 3, 1, 1, bias=False),
             weight_norm), nn.Tanh())
     if weight_init:
         self.apply(weight_init)
Пример #2
0
 def __init__(self, code_size, weight_init=None, weight_norm=None):
     super(DCGenerator, self).__init__()
     self.label_embedding = nn.Embedding(10, 10)
     self.input_linear = nn.Linear(code_size + 10, 512 * 4 * 4)
     self.model = nn.Sequential(
         *conv2d_trans_bn_relu_block(512,
                                     256,
                                     4,
                                     2,
                                     1,
                                     normalize=True,
                                     bias=True,
                                     weight_norm=weight_norm),
         *conv2d_trans_bn_relu_block(256,
                                     128,
                                     4,
                                     2,
                                     1,
                                     normalize=True,
                                     bias=True,
                                     weight_norm=weight_norm),
         *conv2d_trans_bn_relu_block(128,
                                     64,
                                     4,
                                     2,
                                     1,
                                     normalize=True,
                                     bias=True,
                                     weight_norm=weight_norm),
         apply_weight_norm(nn.ConvTranspose2d(64, 3, 3, 1, 1), weight_norm),
         nn.Tanh())
     if weight_init:
         self.apply(weight_init)
Пример #3
0
 def __init__(self, latent_lst, code_size, weight_init=None, weight_norm=None):
     super(Generator, self).__init__()
     self.inplane = 8
     self.label_embedding = nn.ModuleList()
     input_size = code_size
     for latent in latent_lst:
         if isinstance(latent, int):
             self.label_embedding.append(nn.Embedding(latent, latent))
             input_size += latent
         else:
             self.label_embedding.append(None)
             input_size += 1
     self.input_linear = nn.Linear(input_size, 4 * 4 * self.inplane * 32)
     self.model = nn.Sequential(
         *conv2d_trans_bn_relu_block(self.inplane * 32, self.inplane * 16, 4, 2, 1, normalize=True, bias=False,
                                     weight_norm=weight_norm),
         *conv2d_trans_bn_relu_block(self.inplane * 16, self.inplane * 8, 4, 2, 1, normalize=True, bias=False,
                                     weight_norm=weight_norm),
         *conv2d_trans_bn_relu_block(self.inplane * 8, self.inplane * 4, 4, 2, 1, normalize=True, bias=False,
                                     weight_norm=weight_norm),
         apply_weight_norm(nn.ConvTranspose2d(self.inplane * 4, 1, 3, 1, 1, bias=False), weight_norm),
         nn.Tanh()
     )
     if weight_init:
         self.apply(weight_init)
Пример #4
0
def conv1d_bn_lrelu_block(in_channels, out_channels, kernel_size, stride, padding, alpha=0.2, normalize=True, bias=True,
                          weight_norm=None):
    conv = apply_weight_norm(nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, bias=bias), weight_norm)
    layers = [conv]
    if normalize:
        layers.append(nn.BatchNorm1d(out_channels))
    layers.append(nn.LeakyReLU(alpha, inplace=True))
    return layers
Пример #5
0
def conv2d_bn_relu_block(in_channels, out_channels, kernel_size, stride, padding, normalize=True, bias=True,
                         weight_norm=None):
    """ conv2d + batchnorm (optional) + relu """
    conv = apply_weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias), weight_norm)
    layers = [conv]
    if normalize:
        layers.append(nn.BatchNorm2d(out_channels))
    layers.append(nn.ReLU(inplace=True))
    return layers