Exemple #1
0
                    default='73_LapSRN_R_epochs100_HR2.tif',
                    help='where to save the output image')
parser.add_argument('--outputHR4',
                    type=str,
                    default='73_LapSRN_R_epochs100_HR4.tif',
                    help='where to save the output image')
parser.add_argument('--cuda', action='store_true', help='use cuda')

opt = parser.parse_args()

print(opt)

model_r = LapSRN().cuda()
model_g = LapSRN().cuda()
model_b = LapSRN().cuda()
optimizer_r = optim.Adagrad(model_r.parameters(), lr=1e-3, weight_decay=1e-5)
optimizer_g = optim.Adagrad(model_g.parameters(), lr=1e-3, weight_decay=1e-5)
optimizer_b = optim.Adagrad(model_b.parameters(), lr=1e-3, weight_decay=1e-5)

model_r, optimizer_r, epochs_r = load_model(model_r, optimizer_r, opt.model_r)
model_g, optimizer_g, epochs_g = load_model(model_g, optimizer_g, opt.model_g)
model_b, optimizer_b, epochs_b = load_model(model_b, optimizer_b, opt.model_b)

img = Image.open(opt.input).convert('RGB')
LR_r, LR_g, LR_b = img.split()

LR_r, LR_g, LR_b = pre_deal(LR_r, LR_g, LR_b)

HR_2_r, HR_4_r = testing(model_r, LR_r)
HR_2_g, HR_4_g = testing(model_g, LR_g)
HR_2_b, HR_4_b = testing(model_g, LR_b)
Exemple #2
0
        results['Avg. PSNR1'].append(float('%.2f'%(avg_psnr1 / len(val_data_loader))))
        results['Avg. PSNR2'].append(float('%.2f'%(avg_psnr2 / len(val_data_loader))))


def checkpoint(epoch):
    model_out_g_path = "LapSRN_model_epoch_g_{}.pth".format(epoch)
    state_g = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch':epoch, 'lr':lr}
    torch.save(state_g, model_out_g_path, _use_new_zipfile_serialization=False)
    print("Checkpoint saved to {}".format(model_out_g_path))


lr = opt.lr

for epoch in range(1, opt.nEpochs + 1):

    optimizer = optim.Adagrad(model.parameters(), lr=lr, weight_decay=1e-5)

    train(epoch)
    val()
    if epoch % 10 ==0:
        checkpoint(epoch)
    if epoch % 50 ==0:
        lr = lr/2

    data_frame = pd.DataFrame(
    data={ 'Avg. Loss': results['Avg. Loss'],
           'Avg. PSNR1': results['Avg. PSNR1'],
           'Avg. PSNR2': results['Avg. PSNR2']},
    index=range(1,epoch + 1))
    data_frame.to_csv('./result-g.csv', index_label='Epoch')
Exemple #3
0
        'model': model_g.state_dict(),
        'optimizer': optimizer_g.state_dict(),
        'epoch': epoch,
        'lr': lr_g
    }
    torch.save(state_g, model_out_g_path, _use_new_zipfile_serialization=False)

    print("Checkpoint saved to {} and {}".format(model_out_r_path,
                                                 model_out_g_path))


lr_r, lr_g = opt.lr, opt.lr

for epoch in range(1, opt.nEpochs + 1):

    optimizer_r = optim.Adagrad(model_r.parameters(),
                                lr=lr_r,
                                weight_decay=1e-5)
    optimizer_g = optim.Adagrad(model_g.parameters(),
                                lr=lr_g,
                                weight_decay=1e-5)

    train(epoch)
    val()
    if epoch % 10 == 0:
        checkpoint(epoch)
    if epoch % 50 == 0:
        lr_r, lr_g = lr_r / 2, lr_g / 2

    data_frame = pd.DataFrame(data={
        'R_Avg. Loss': results['R_Avg. Loss'],
    torch.save(state_r, model_out_r_path, _use_new_zipfile_serialization=False)
    
    model_out_g_path = "LapSRN_model_epoch_g_{}.pth".format(epoch)
    state_g = {'model': model_g.state_dict(), 'optimizer': optimizer_g.state_dict(), 'epoch':epoch, 'lr':lr_g}
    torch.save(state_g, model_out_g_path, _use_new_zipfile_serialization=False)
    
    print("Checkpoint saved to {} and {}".format(model_out_r_path, model_out_g_path))


if os.path.exists(opt.pre_model_r):
    model_r = LapSRN().to(device)
    checkpoints_r = torch.load(opt.pre_model_r)
    model_r.load_state_dict(checkpoints_r['model'])
    model_r.train()
    epoch_continue_r = checkpoints_r['epoch']
    optimizer_r = optim.Adagrad(model_r.parameters())


    model_g = LapSRN().to(device)
    checkpoints_g = torch.load(opt.pre_model_g)
    model_g.load_state_dict(checkpoints_g['model'])
    model_g.train()
    epoch_continue_g = checkpoints_g['epoch']
    optimizer_g = optim.Adagrad(model_g.parameters())

    for epoch in range(epoch_continue_g + 1, opt.nEpochs + 1):

        if epoch == epoch_continue_g + 1:
            optimizer_r.load_state_dict(checkpoints_r['optimizer'])
            optimizer_g.load_state_dict(checkpoints_g['optimizer'])
            lr_r = checkpoints_r['lr']
        'optimizer': optimizer.state_dict(),
        'epoch': epoch,
        'lr': lr
    }
    torch.save(state_r, model_out_r_path, _use_new_zipfile_serialization=False)
    print("Checkpoint saved to {}".format(model_out_r_path))


if os.path.exists(opt.pre_model):
    model = LapSRN().cuda()
    checkpoints = torch.load(opt.pre_model)
    model.load_state_dict(checkpoints['model'])
    model.train()
    epoch_continue = checkpoints['epoch']

    optimizer = optim.Adagrad(model.parameters())

    for epoch in range(epoch_continue + 1, opt.nEpochs + 1):

        if epoch == epoch_continue + 1:
            optimizer.load_state_dict(checkpoints['optimizer'])
            lr = checkpoints['lr']

        else:
            optimizer = optim.Adagrad(model.parameters(),
                                      lr=lr,
                                      weight_decay=1e-5)

        train(epoch)
        val()