コード例 #1
0
print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
    try:
        checkpoint = torch.load('./checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
    except FileNotFoundError:
        print('Can\'t found autoencoder.t7')
else:
    start_epoch = 0
    print('===> Start from scratch')

if args.cuda:
    model.cuda()
    cudnn.benchmark = True

criterion = nn.L1Loss()
optimizer_1 = optim.Adam([{'params': model.encoder.parameters()},
                          {'params': model.decoder_A.parameters()}]
                         , lr=5e-5, betas=(0.5, 0.999))
optimizer_2 = optim.Adam([{'params': model.encoder.parameters()},
                          {'params': model.decoder_B.parameters()}]
                         , lr=5e-5, betas=(0.5, 0.999))

# print all the parameters im model
# s = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Number of params: %d' % s)

if __name__ == "__main__":
コード例 #2
0
ファイル: train_gan.py プロジェクト: shreydesai/latext-gan
    train_loader, vocab = load(args.batch_size, args.seq_len)

    autoencoder = Autoencoder(args.enc_hidden_dim, args.dec_hidden_dim,
                              args.embedding_dim, args.latent_dim,
                              vocab.size(), args.dropout, args.seq_len)
    autoencoder.load_state_dict(
        torch.load('autoencoder.th', map_location=lambda x, y: x))
    generator = Generator(args.n_layers, args.block_dim)
    critic = Critic(args.n_layers, args.block_dim)

    g_optimizer = optim.Adam(generator.parameters(), lr=args.lr)
    c_optimizer = optim.Adam(critic.parameters(), lr=args.lr)

    if args.cuda:
        autoencoder = autoencoder.cuda()
        generator = generator.cuda()
        critic = critic.cuda()

    print('G Parameters:', sum([p.numel() for p in generator.parameters() if \
                                p.requires_grad]))
    print('C Parameters:', sum([p.numel() for p in critic.parameters() if \
                                p.requires_grad]))

    best_loss = np.inf

    for epoch in range(1, args.epochs + 1):
        g_loss, c_loss = train(epoch)
        loss = g_loss + c_loss
        if loss < best_loss:
            best_loss = loss
コード例 #3
0
ファイル: train_ae.py プロジェクト: shreydesai/latext-gan
    parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
    args = parser.parse_args()

    print(args)

    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True

    train_loader, vocab = load(args.batch_size, args.seq_len)

    model = Autoencoder(args.enc_hidden_dim, args.dec_hidden_dim,
                        args.embedding_dim, args.latent_dim, vocab.size(),
                        args.dropout, args.seq_len)

    if args.cuda:
        model = model.cuda()

    print('Parameters:', sum([p.numel() for p in model.parameters() if \
                              p.requires_grad]))
    print('Vocab size:', vocab.size())

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    best_loss = np.inf

    for epoch in range(1, args.epochs + 1):
        loss = train(epoch)
        if loss < best_loss:
            best_loss = loss
            print('* Saved')