コード例 #1
0
def main():
    # build the model
    net = VDN(_C, slope=args.slope, wf=args.wf, dep_U=args.depth)
    # move the model to GPU
    net = nn.DataParallel(net).cuda()

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    args.milestones = [10, 20, 25, 30, 35, 40, 45, 50]
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.milestones, args.gamma)

    if args.resume:
        if os.path.isfile(args.resume):
            print('=> Loading checkpoint {:s}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.epoch_start = checkpoint['epoch']
            args.step = checkpoint['step']
            args.step_img = checkpoint['step_img']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            args.clip_grad_D = checkpoint['grad_norm_D']
            args.clip_grad_S = checkpoint['grad_norm_S']
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(args.resume, checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args.epoch_start = 0
        if os.path.isdir(args.log_dir):
            shutil.rmtree(args.log_dir)
        os.makedirs(args.log_dir)
        if os.path.isdir(args.model_dir):
            shutil.rmtree(args.model_dir)
        os.makedirs(args.model_dir)

    # print the arg pamameters
    for arg in vars(args):
        print('{:<15s}: {:s}'.format(arg,  str(getattr(args, arg))))

    # making traing data
    simulate_dir = Path(args.simulate_dir)
    train_im_list = list(simulate_dir.glob('*.jpg')) + list(simulate_dir.glob('*.png')) + \
                                                                    list(simulate_dir.glob('*.bmp'))
    train_im_list = sorted([str(x) for x in train_im_list])
    # making tesing data
    test_case1_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case1.hdf5')
    test_case2_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case2.hdf5')
    test_case3_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case3.hdf5')
    test_im_list = (Path('test_data') / 'CBSD68').glob('*.png')
    test_im_list = sorted([str(x) for x in test_im_list])
    datasets = {'train':DenoisingDatasets.SimulateTrain(train_im_list, 5000*args.batch_size,
                                          args.patch_size, radius=args.radius, noise_estimate=True),
                         'test_cbsd681':DenoisingDatasets.SimulateTest(test_im_list, test_case1_h5),
                        'test_cbsd682': DenoisingDatasets.SimulateTest(test_im_list, test_case2_h5),
                        'test_cbsd683': DenoisingDatasets.SimulateTest(test_im_list, test_case3_h5)}
    # train model
    print('\nBegin training with GPU: ' + str(args.gpu_id))
    train_model(net, datasets, optimizer, scheduler, loss_fn)
コード例 #2
0
def main():
    # move the model to GPU
    net = VDN(_C, wf=args.wf, slope=args.slope, dep_U=args.depth)
    # multi GPU setting
    net = nn.DataParallel(net).cuda()

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    args.milestones = [10, 20, 25, 30, 35, 40, 45, 50]
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.milestones, args.gamma)

    if args.resume:
        if os.path.isfile(args.resume):
            print('=> Loading checkpoint {:s}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.epoch_start = checkpoint['epoch']
            args.step = checkpoint['step']
            args.step_img = checkpoint['step_img']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            args.clip_grad_D = checkpoint['grad_norm_D']
            args.clip_grad_S = checkpoint['grad_norm_S']
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(args.resume, checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        net = weight_init_kaiming(net)
        args.epoch_start = 0
        if os.path.isdir(args.log_dir):
            shutil.rmtree(args.log_dir)
        os.makedirs(args.log_dir)
        if os.path.isdir(args.model_dir):
            shutil.rmtree(args.model_dir)
        os.makedirs(args.model_dir)

    for arg in vars(args):
        print('{:<15s}: {:s}'.format(arg,  str(getattr(args, arg))))

    # train dataset
    path_SIDD_train = os.path.join(args.SIDD_dir, 'small_imgs_train.hdf5')
    # test dataset
    path_SIDD_test = os.path.join(args.SIDD_dir, 'small_imgs_test.hdf5')
    datasets = {'train':DenoisingDatasets.BenchmarkTrain(path_SIDD_train, 5000*args.batch_size,
                          args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True),
                                        'test_SIDD':DenoisingDatasets.BenchmarkTest(path_SIDD_test)}
    # train model
    print('\nBegin training with GPU: ' + str(args.gpu_id))
    train_model(net, datasets, optimizer, scheduler, loss_fn)
コード例 #3
0
from networks import VDN
from skimage.measure import compare_psnr, compare_ssim
from skimage import img_as_float, img_as_ubyte
from utils import load_state_dict_cpu
from matplotlib import pyplot as plt
import time
from scipy.io import loadmat

use_gpu = True
C = 3
dep_U = 4

# load the pretrained model
print('Loading the Model')
checkpoint = torch.load('./model_state/model_state_SIDD')
net = VDN(C, dep_U=dep_U, wf=64)
if use_gpu:
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(checkpoint)
else:
    load_state_dict_cpu(net, checkpoint)
net.eval()

im_noisy = loadmat('./test_data/DND/1.mat')['InoisySRGB']
H, W, _ = im_noisy.shape
if H % 2**dep_U != 0:
    H -= H % 2**dep_U
if W % 2**dep_U != 0:
    W -= W % 2**dep_U
im_noisy = im_noisy[:H, :W, ]