コード例 #1
0
def load_models(load_path):
    model_args = json.load(open(os.path.join(load_path, 'options.json'), 'r'))
    vars(args).update(model_args)
    autoencoder = Seq2Seq(emsize=args.emsize,
                          nhidden=args.nhidden,
                          ntokens=args.ntokens,
                          nlayers=args.nlayers,
                          noise_r=args.noise_r,
                          hidden_init=args.hidden_init,
                          dropout=args.dropout,
                          gpu=args.cuda)
    gan_gen = MLP_G(ninput=args.z_size,
                    noutput=args.nhidden,
                    layers=args.arch_g)
    gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)

    autoencoder = autoencoder.cuda()
    gan_gen = gan_gen.cuda()
    gan_disc = gan_disc.cuda()

    word2idx = json.load(open(os.path.join(args.save, 'vocab.json'), 'r'))
    idx2word = {v: k for k, v in word2idx.items()}

    print('Loading models from {}'.format(args.save))
    loaded = torch.load(os.path.join(args.save, "model.pt"))
    autoencoder.load_state_dict(loaded.get('ae'))
    gan_gen.load_state_dict(loaded.get('gan_g'))
    gan_disc.load_state_dict(loaded.get('gan_d'))
    return model_args, idx2word, autoencoder, gan_gen, gan_disc
コード例 #2
0
ファイル: main.py プロジェクト: nkasmanoff/HIGAN
    if opt.MLP == True:
        netG = MLP_G(s_sample, nz, nc, ngf, ngpu)
        netD = MLP_D(s_sample, nz, nc, ngf, ngpu)

    else:
        netG = DCGAN_G(s_sample, nz, nc, ngf, ngpu)
        netD = DCGAN_D(s_sample, nz, nc, ndf, ngpu)

    #experiments/ch128_lr0005_tanh/netD_epoch_47.pth
    epoch_load = opt.epoch_st - 1
    wass_loss = []

    if opt.load_weights == True:
        netG.load_state_dict(
            torch.load(opt.experiment + 'netG_epoch_' + str(epoch_load) +
                       '.pth'))
        netD.load_state_dict(
            torch.load(opt.experiment + 'netD_epoch_' + str(epoch_load) +
                       '.pth'))

        wass_loss = pd.read_csv(opt.experiment + 'loss.csv', header=None)
        wass_loss = wass_loss[wass_loss.columns[0]].tolist()

    device = torch.device("cuda" if opt.cuda else "cpu")

    #part, datapath, s_sample, nsamples, transform, d2

    partition = {'train': [x for x in range(0, n_samples)]}

    dataset = HydrogenDataset(part=partition,
コード例 #3
0
        torch.load('{}/autoencoder_model.pt'.format(args.outf),
                   map_location=lambda storage, loc: storage))

if os.path.isfile('{}/classifier_model.pt'.format(args.outf)):
    classifier.load_state_dict(
        torch.load('{}/classifier_model.pt'.format(args.outf),
                   map_location=lambda storage, loc: storage))

if os.path.isfile('{}/gan_disc_model.pt'.format(args.outf)):
    gan_disc.load_state_dict(
        torch.load('{}/gan_disc_model.pt'.format(args.outf),
                   map_location=lambda storage, loc: storage))

if os.path.isfile('{}/gan_gen_model.pt'.format(args.outf)):
    gan_gen.load_state_dict(
        torch.load('{}/gan_gen_model.pt'.format(args.outf),
                   map_location=lambda storage, loc: storage))

if args.cuda:
    autoencoder = autoencoder.cuda()
    gan_gen = gan_gen.cuda()
    gan_disc = gan_disc.cuda()
    classifier = classifier.cuda()
    criterion_ce = criterion_ce.cuda()

###############################################################################
# Training code
###############################################################################


def save_model():