Esempio n. 1
0
                      hidden_init=args.hidden_init,
                      dropout=args.dropout,
                      gpu=args.cuda)

gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)

print(autoencoder)
print(gan_gen)
print(gan_disc)

optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
                             lr=args.lr_gan_g,
                             betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))

criterion_ce = nn.CrossEntropyLoss()

if args.cuda:
    autoencoder = autoencoder.cuda()
    gan_gen = gan_gen.cuda()
    gan_disc = gan_disc.cuda()
    criterion_ce = criterion_ce.cuda()

###############################################################################
# Training code
###############################################################################
Esempio n. 2
0
                      hidden_init=args.hidden_init,
                      dropout=args.dropout,
                      gpu=args.cuda)

gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)

print(autoencoder)
print(gan_gen)
print(gan_disc)

optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
                             lr=args.lr_gan_g,
                             betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))

criterion_ce = nn.CrossEntropyLoss()

if args.cuda:
    autoencoder = autoencoder.cuda()
    gan_gen = gan_gen.cuda()
    gan_disc = gan_disc.cuda()
    criterion_ce = criterion_ce.cuda()

###############################################################################
# Training code
###############################################################################
Esempio n. 3
0
                  ntokens=word_args.ntokens,
                  nlayers=word_args.nlayers,
                  noise_r=word_args.noise_r,
                  hidden_init=word_args.hidden_init,
                  dropout=word_args.dropout)

word_ae.load_state_dict(word_ae_params)

D = MLP_D(input_dim=args.nhidden, output_dim=1, arch_layers=args.arch_d)
G = MLP_G(input_dim=args.nhidden,
          output_dim=args.nhidden,
          noise_dim=args.z_size,
          arch_layers=args.arch_g)
if args.finetune_ae:
    logger.info("AE will be fine-tuned")
    optimizer_D = optim.Adam(list(D.parameters()) +
                             list(char_ae.parameters()) +
                             list(word_ae.parameters()),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))
    optimizer_G = optim.Adam(list(G.parameters()) +
                             list(char_ae.parameters()) +
                             list(word_ae.parameters()),
                             lr=args.lr_gan_g,
                             betas=(args.beta1, 0.999))
else:
    logger.info("AE will not be fine-tuned")
    optimizer_D = optim.Adam(D.parameters(),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))
    optimizer_G = optim.Adam(G.parameters(),