Esempio n. 1
0
if opt.load_pretrained_D:
    D_name = os.path.join(opt.save_folder + opt.pretrained_D)
    if os.path.exists(D_name):
        #model= torch.load(model_name, map_location=lambda storage, loc: storage)
        D.load_state_dict(
            torch.load(D_name, map_location=lambda storage, loc: storage))
        print('Pre-trained Discriminator model is loaded.')

if cuda:
    model = model.cuda(gpus_list[0])
    D = D.cuda(gpus_list[0])
    feature_extractor = feature_extractor.cuda(gpus_list[0])
    MSE_loss = MSE_loss.cuda(gpus_list[0])
    BCE_loss = BCE_loss.cuda(gpus_list[0])

optimizer = optim.Adam(model.parameters(),
                       lr=opt.lr,
                       betas=(0.9, 0.999),
                       eps=1e-8)
D_optimizer = optim.Adam(D.parameters(),
                         lr=opt.lr,
                         betas=(0.9, 0.999),
                         eps=1e-8)

##PRETRAINED
if opt.pretrained:
    print('Pre-training starts.')
    for epoch in range(1, opt.pretrained_iter + 1):
        train_pretrained(epoch)
    print('Pre-training finished.')
    checkpoint(epoch, pretrained_flag=True)
Esempio n. 2
0
print('---------- Networks architecture -------------')
print_network(model)
print('----------------------------------------------')

if opt.pretrained:
    model_name = os.path.join(opt.save_folder + opt.pretrained_sr)
    if os.path.exists(model_name):
        #model= torch.load(model_name, map_location=lambda storage, loc: storage)
        model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
        print('Pre-trained SR model is loaded.')

if cuda:
    model = model.cuda(gpus_list[0])
    criterion = criterion.cuda(gpus_list[0])

optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)

for epoch in range(1, opt.nEpochs + 1):
    train(epoch)
    #test()

    # learning rate is decayed by a factor of 10 every half of total epochs
    if (epoch+1) % (opt.nEpochs/2) == 0:
        for param_group in optimizer.param_groups:
            param_group['lr'] /= 10.0
        print('Learning rate decay: lr={}'.format(optimizer.param_groups[0]['lr']))
            
    if (epoch + 1 ) % (opt.snapshots) == 0:
        checkpoint(epoch)