Example #1
0
corpus.dictionary.weights_matrix = to_gpu(args.cuda,
                                          corpus.dictionary.weights_matrix)
autoencoder = Seq2Seq2Decoder(emsize=args.emsize,
                              nhidden=args.nhidden,
                              ntokens=ntokens,
                              nlayers=args.nlayers,
                              noise_r=args.noise_r,
                              hidden_init=args.hidden_init,
                              dropout=args.dropout,
                              gpu=args.cuda,
                              weights_matrix=corpus.dictionary.weights_matrix)

gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
classifier = MLP_Classify(ninput=args.nhidden,
                          noutput=1,
                          layers=args.arch_classify)
g_factor = None

print(autoencoder)
print(gan_gen)
print(gan_disc)
print(classifier)

optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
                             lr=args.lr_gan_g,
                             betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))
Example #2
0
###############################################################################

ntokens = len(corpus.dictionary.word2idx)
autoencoder = Seq2Seq2Decoder(emsize=args.emsize,
                              nhidden=args.nhidden,
                              ntokens=ntokens,
                              nlayers=args.nlayers,
                              noise_r=args.noise_r,
                              hidden_init=args.hidden_init,
                              dropout=args.dropout,
                              gpu=args.cuda)

gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
classifier = MLP_Classify(ninput=args.nhidden,
                          noutput=1,
                          layers=args.arch_classify)
g_factor = None

print(autoencoder)
print(gan_gen)
print(gan_disc)
print(classifier)

optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
                             lr=args.lr_gan_g,
                             betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
                             lr=args.lr_gan_d,
                             betas=(args.beta1, 0.999))
Example #3
0
                                     pooling_enc=args.pooling_enc,
                                     gpu=args.cuda)
else:
    autoencoder = Seq2Seq2Decoder(emsize=args.emsize,
                                  nhidden=args.nhidden,
                                  ntokens=ntokens,
                                  nlayers=args.nlayers,
                                  noise_r=args.noise_r,
                                  hidden_init=args.hidden_init,
                                  dropout=args.dropout,
                                  gpu=args.cuda)

gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
classifier = MLP_Classify(ninput=args.nhidden,
                          noutput=1,
                          layers=args.arch_classify)
g_factor = None

print(autoencoder)
print(gan_gen)
print(gan_disc)
print(classifier)

if args.cuda:
    autoencoder = autoencoder.cuda()
    gan_gen = gan_gen.cuda()
    gan_disc = gan_disc.cuda()
    classifier = classifier.cuda()