Beispiel #1
0
 def __init__(self, latent_dim, n_c, x_shape, verbose=False):
     super(Generator_CNN, self).__init__()
     self.name = 'generator'
     self.latent_dim = latent_dim
     self.n_c = n_c
     self.x_shape = x_shape
     self.ishape = (128, 7, 7)
     self.iels = int(np.prod(self.ishape))
     self.verbose = verbose
     self.model0 = nn.Sequential(
         nn.Linear((self.latent_dim + self.n_c), 1024))
     self.model1 = nn.Sequential(BatchNorm1d(1024), nn.Leaky_relu(0.2))
     self.model2 = nn.Sequential(nn.Linear(1024, self.iels),
                                 BatchNorm1d(self.iels), nn.Leaky_relu(0.2))
     self.model3 = nn.Sequential(
         Reshape(self.ishape),
         nn.ConvTranspose(128, 64, 4, stride=2, padding=1, bias=True),
         nn.BatchNorm(64), nn.Leaky_relu(0.2))
     self.model4 = nn.Sequential(
         nn.ConvTranspose(64, 1, 4, stride=2, padding=1, bias=True))
     self.sigmoid = nn.Sigmoid()
     initialize_weights(self)
     if self.verbose:
         print('Setting up {}...\n'.format(self.name))
         print(self.model)
Beispiel #2
0
 def __init__(self):
     super(Encoder, self).__init__()
     self.model = nn.Sequential(nn.Linear(int(np.prod(img_shape)), 512),
                                nn.Leaky_relu(0.2), nn.Linear(512, 512),
                                nn.BatchNorm1d(512), nn.Leaky_relu(0.2))
     self.mu = nn.Linear(512, opt.latent_dim)
     self.logvar = nn.Linear(512, opt.latent_dim)
Beispiel #3
0
 def __init__(self):
     super(Decoder, self).__init__()
     self.model = nn.Sequential(nn.Linear(opt.latent_dim, 512),
                                nn.Leaky_relu(0.2), nn.Linear(512, 512),
                                nn.BatchNorm1d(512), nn.Leaky_relu(0.2),
                                nn.Linear(512, int(np.prod(img_shape))),
                                nn.Tanh())
Beispiel #4
0
 def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
     super(UNetDown, self).__init__()
     layers = [nn.Conv(in_size, out_size, 3, stride=2, padding=1, bias=False)]
     if normalize:
         layers.append(nn.BatchNorm(out_size, 0.8))
     layers.append(nn.Leaky_relu(0.2))
     self.model = nn.Sequential(*layers)
Beispiel #5
0
 def discriminator_block(in_filters, out_filters, normalize=True):
     'Returns downsampling layers of each discriminator block'
     layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)]
     if normalize:
         layers.append(nn.BatchNorm(out_filters, 0.8))
     layers.append(nn.Leaky_relu(0.2))
     return layers
Beispiel #6
0
 def discriminator_block(in_filters, out_filters, stride, normalize):
     'Returns layers of each discriminator block'
     layers = [nn.Conv(in_filters, out_filters, 3, stride=stride, padding=1)]
     if normalize:
         layers.append(nn.InstanceNorm2d(out_filters))
     layers.append(nn.Leaky_relu(scale=0.2))
     return layers
Beispiel #7
0
 def __init__(self):
     super(Generator, self).__init__()
     self.init_size = (opt.img_size // 4)
     self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, (128 * (self.init_size ** 2))))
     self.conv_blocks = nn.Sequential(
         nn.Upsample(scale_factor=2),
         nn.Conv(128, 128, 3, stride=1, padding=1),
         nn.BatchNorm(128,0.8),
         nn.Leaky_relu(0.2),
         nn.Upsample(scale_factor=2),
         nn.Conv(128, 64, 3, stride=1, padding=1),
         nn.BatchNorm(64, 0.8),
         nn.Leaky_relu(0.2),
         nn.Conv(64, opt.channels, 3, stride=1, padding=1),
         nn.Tanh()
     )
Beispiel #8
0
 def __init__(self, wass_metric=False, verbose=False):
     super(Discriminator_CNN, self).__init__()
     self.name = 'discriminator'
     self.channels = 1
     self.cshape = (128, 5, 5)
     self.iels = int(np.prod(self.cshape))
     self.lshape = (self.iels, )
     self.wass = wass_metric
     self.verbose = verbose
     self.model = nn.Sequential(
         nn.Conv(self.channels, 64, 4, stride=2, bias=True),
         nn.Leaky_relu(0.2), nn.Conv(64, 128, 4, stride=2, bias=True),
         nn.Leaky_relu(0.2), Reshape(self.lshape),
         nn.Linear(self.iels, 1024), nn.Leaky_relu(0.2), nn.Linear(1024, 1))
     if (not self.wass):
         self.model = nn.Sequential(self.model, nn.Sigmoid())
     initialize_weights(self)
     if self.verbose:
         print('Setting up {}...\n'.format(self.name))
         print(self.model)
Beispiel #9
0
 def __init__(self, latent_dim, n_c, verbose=False):
     super(Encoder_CNN, self).__init__()
     self.name = 'encoder'
     self.channels = 1
     self.latent_dim = latent_dim
     self.n_c = n_c
     self.cshape = (128, 5, 5)
     self.iels = int(np.prod(self.cshape))
     self.lshape = (self.iels, )
     self.verbose = verbose
     self.model = nn.Sequential(
         nn.Conv(self.channels, 64, 4, stride=2, bias=True),
         nn.Leaky_relu(0.2), nn.Conv(64, 128, 4, stride=2, bias=True),
         nn.Leaky_relu(0.2), Reshape(self.lshape),
         nn.Linear(self.iels, 1024), nn.Leaky_relu(0.2),
         nn.Linear(1024, (latent_dim + n_c)))
     initialize_weights(self)
     if self.verbose:
         print('Setting up {}...\n'.format(self.name))
         print(self.model)
Beispiel #10
0
 def __init__(self, dim=3):
     super(discriminator, self).__init__()
     self.conv1 = nn.Conv(dim, 64, 5, 2, 2)
     self.conv2 = nn.Conv(64, 128, 5, 2, 2)
     self.conv2_bn = nn.BatchNorm(128)
     self.conv3 = nn.Conv(128, 256, 5, 2, 2)
     self.conv3_bn = nn.BatchNorm(256)
     self.conv4 = nn.Conv(256, 512, 5, 2, 2)
     self.conv4_bn = nn.BatchNorm(512)
     self.fc = nn.Linear(512 * 7 * 7, 1)
     self.leaky_relu = nn.Leaky_relu()
Beispiel #11
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.model = nn.Sequential(nn.Linear(opt.latent_dim, 512),
                                nn.Leaky_relu(0.2), nn.Linear(512, 256),
                                nn.Leaky_relu(0.2), nn.Linear(256, 1),
                                nn.Sigmoid())
Beispiel #12
0
 def downsample(in_feat, out_feat, normalize=True):
     layers = [nn.Conv(in_feat, out_feat, 4, stride=2, padding=1)]
     if normalize:
         layers.append(nn.BatchNorm(out_feat, eps=0.8))
     layers.append(nn.Leaky_relu(scale=0.2))
     return layers
Beispiel #13
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.model = nn.Sequential(nn.Linear(int(np.prod(img_shape)), 512),
                                nn.Leaky_relu(0.2), nn.Linear(512, 256),
                                nn.Leaky_relu(0.2), nn.Linear(256, 1),
                                nn.Sigmoid())
Beispiel #14
0
 def block(in_feat, out_feat, normalize=True):
     layers = [nn.Linear(in_feat, out_feat)]
     if normalize:
         layers.append(nn.BatchNorm1d(out_feat, 0.8))
     layers.append(nn.Leaky_relu(0.2))
     return layers