if args.use_morph_network:
    Gz = EncoderMorphNet28(args.l_size,
                           args.h_size,
                           args.use_mish,
                           n_channels=1,
                           cap_variance=True,
                           block_Gz_morph_grads=args.no_morph_loss_on_Gz)
else:
    Gz = Encoder28(args.l_size,
                   args.h_size,
                   args.use_mish,
                   n_channels=1,
                   cap_variance=True)
Gx = Generator28(args.l_size,
                 args.h_size,
                 args.use_mish,
                 n_channels=1,
                 sigmoid_out=True)
D = ALIDiscriminator28(args.l_size,
                       args.h_size,
                       use_bn=args.use_batchnorm_in_D,
                       use_mish=args.use_mish,
                       n_channels=1,
                       dropout=args.dropout_rate,
                       fc_h_size=args.fc_h_size)

if args.use_morph_network:
    G_no_mn_optimizer = torch.optim.Adam(list(Gz.Gz_params()) +
                                         list(Gx.parameters()),
                                         lr=args.lr,
                                         betas=(0.5, 0.999))
Beispiel #2
0
dataset = CelebaCropped(split="train",
                        download=True,
                        transform=transforms.Compose([
                            transforms.Resize(28),
                            transforms.ToTensor(),
                            transforms.Lambda(lambda img: img * 2 - 1)
                        ]))
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=12)

G = Generator28(args.l_size,
                args.h_size,
                args.use_mish,
                not args.no_bias_in_G,
                n_channels=3)
D = Discriminator28(args.h_size,
                    use_bn=args.use_batchnorm_in_D,
                    use_mish=args.use_mish,
                    n_channels=3,
                    dropout=args.dropout_rate)
G_optimizer = torch.optim.Adam(G.parameters(), lr=args.lr, betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

if args.cuda:
    G = G.cuda()
    D = D.cuda()

D.init_weights()
Beispiel #3
0
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=4)

print("Dataset length: ", len(dataset))

Gz = Encoder28(args.l_size,
               args.h_size,
               args.use_mish,
               n_channels=3,
               cap_variance=True,
               use_lr_norm=args.use_lr_norm)
Gx = Generator28(args.l_size,
                 args.h_size,
                 args.use_mish,
                 n_channels=3,
                 sigmoid_out=True,
                 use_lr_norm=args.use_lr_norm)
D = ALIDiscriminator28(args.l_size,
                       args.h_size,
                       use_bn=args.use_batchnorm_in_D,
                       use_mish=args.use_mish,
                       n_channels=3,
                       dropout=args.dropout_rate,
                       fc_h_size=args.fc_h_size)
G_optimizer = torch.optim.Adam(list(Gz.parameters()) + list(Gx.parameters()),
                               lr=args.lr,
                               betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

# Code for loading frs model when frs based reconstruction loss is used
Beispiel #4
0
                              ]))

valid_pairs_dataset = CelebaCroppedPairs(split="valid",
                                         download=True,
                                         transform=transforms.Compose([
                                             transforms.Resize(28),
                                             transforms.ToTensor(),
                                         ]))

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=4)

Gz = Encoder28(args.l_size, args.h_size, n_channels=3)
Gx = Generator28(args.l_size, args.h_size, n_channels=3, sigmoid_out=True)
D = VAEGANDiscriminator28(args.h_size,
                          use_bn=args.use_batchnorm_in_D,
                          n_channels=3,
                          dropout=args.dropout_rate)
Gz_optimizer = torch.optim.Adam(Gz.parameters(),
                                lr=args.lr,
                                betas=(0.5, 0.999))
Gx_optimizer = torch.optim.Adam(Gx.parameters(),
                                lr=args.lr,
                                betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

if args.cuda:
    Gz = Gz.cuda()
    Gx = Gx.cuda()
                                                     args)

dataset = CelebaCropped(split="train",
                        download=True,
                        transform=transforms.Compose([
                            transforms.Resize(28),
                            transforms.ToTensor(),
                        ]))
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=12)

G = Generator28(args.l_size,
                args.h_size,
                args.use_mish,
                bias=False,
                n_channels=3,
                sigmoid_out=True)
D = Discriminator28(args.h_size,
                    use_bn=False,
                    use_mish=args.use_mish,
                    n_channels=3,
                    dropout=args.dropout_rate,
                    use_logits=True)
G_optimizer = torch.optim.Adam(G.parameters(), lr=args.lr, betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

if args.cuda:
    G = G.cuda()
    D = D.cuda()
Beispiel #6
0
output_path = util.output.init_experiment_output_dir("mnist", "gan", args)

dataset = data.MNIST("data/downloads/mnist",
                     train=True,
                     download=True,
                     transform=transforms.Compose([
                         transforms.ToTensor(),
                         transforms.Lambda(lambda img: img * 2 - 1)
                     ]))
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=12)

G = Generator28(args.l_size, args.h_size, args.use_mish)
D = Discriminator28(args.h_size,
                    use_bn=args.use_batchnorm_in_D,
                    use_mish=args.use_mish)

if args.cuda:
    G = G.cuda()
    D = D.cuda()

G.init_weights()
D.init_weights()

G_optimizer = torch.optim.Adam(G.parameters(), lr=args.lr, betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

listeners = [
Beispiel #7
0
valid_dataset = MNIST("data/downloads/mnist",
                      train=False,
                      download=True,
                      transform=transforms.Compose([
                          transforms.Resize(28),
                          transforms.ToTensor(),
                          transforms.Lambda(lambda img: img * 2 - 1)
                      ]))

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=12)

Gz = Encoder28(args.l_size, args.h_size, args.use_mish, n_channels=1)
Gx = Generator28(args.l_size, args.h_size, args.use_mish, False, n_channels=1)
D = ALIDiscriminator28(args.l_size,
                       args.h_size,
                       use_bn=args.use_batchnorm_in_D,
                       use_mish=args.use_mish,
                       n_channels=1,
                       dropout=args.dropout_rate,
                       fc_h_size=args.fc_h_size)
G_optimizer = torch.optim.Adam(list(Gz.parameters()) + list(Gx.parameters()),
                               lr=args.lr,
                               betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

if args.cuda:
    Gz = Gz.cuda()
    Gx = Gx.cuda()
dataset = CelebaCroppedTriplets(split="train", download=True, transform=transforms.Compose([
    transforms.Resize(28),
    transforms.ToTensor(),
]))

valid_dataset = CelebaCroppedTriplets(split="valid", download=True, transform=transforms.Compose([
    transforms.Resize(28),
    transforms.ToTensor(),
]))

dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)

print("Dataset length: ", len(dataset))

Gz = Encoder28(args.l_size, args.h_size, args.use_mish, n_channels=3, cap_variance=True, add_dense_layer=False, deterministic=True)
Gx = Generator28(args.l_size, args.h_size, args.use_mish, n_channels=3, sigmoid_out=True, add_dense_layer=False)
D = ALIDiscriminator28(args.l_size, args.h_size, use_bn=args.use_batchnorm_in_D, use_mish=args.use_mish, n_channels=3, dropout=args.dropout_rate, fc_h_size=args.fc_h_size)
G_optimizer = torch.optim.Adam(list(Gz.parameters()) + list(Gx.parameters()), lr=args.lr, betas=(0.5, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.999))

if args.cuda:
    Gz = Gz.cuda()
    Gx = Gx.cuda()
    D = D.cuda()

Gz.init_weights()
Gx.init_weights()
D.init_weights()

listeners = [
    LossReporter(),