def main(): # move the model to GPU net = VDN.VDNU(_C, wf=args.wf) # multi GPU setting net = nn.DataParallel(net).cuda() # optimizer optimizer = optim.Adam(net.parameters(), lr=args.lr) args.milestones = [10, 20, 25, 30, 35, 40, 45, 50] scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.milestones, args.gamma) if args.resume: if os.path.isfile(args.resume): print('=> Loading checkpoint {:s}'.format(args.resume)) checkpoint = torch.load(args.resume) args.epoch_start = checkpoint['epoch'] args.step = checkpoint['step'] args.step_img = checkpoint['step_img'] optimizer.load_state_dict(checkpoint['optimizer_state_dict']) scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) net.load_state_dict(checkpoint['model_state_dict']) args.clip_grad_D = checkpoint['grad_norm_D'] args.clip_grad_S = checkpoint['grad_norm_S'] print('=> Loaded checkpoint {:s} (epoch {:d})'.format( args.resume, checkpoint['epoch'])) else: sys.exit('Please provide corrected model path!') else: net = VDN.weight_init_kaiming(net, args.activation) args.epoch_start = 0 if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) os.makedirs(args.log_dir) if os.path.isdir(args.model_dir): shutil.rmtree(args.model_dir) os.makedirs(args.model_dir) for arg in vars(args): print('{:<15s}: {:s}'.format(arg, str(getattr(args, arg)))) # train dataset path_SIDD_train = os.path.join(args.SIDD_dir, 'small_imgs_train.hdf5') # test dataset path_SIDD_test = os.path.join(args.SIDD_dir, 'small_imgs_test.hdf5') datasets = { 'train': DenoisingDatasets.BenchmarkTrain(path_SIDD_train, 5000 * args.batch_size, args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True), 'test_SIDD': DenoisingDatasets.BenchmarkTest(path_SIDD_test) } # train model print('\nBegin training with GPU: ' + str(args.gpu_id)) train_model(net, datasets, optimizer, scheduler, loss_fn)
def main(): # build the model net = VDN(_C, slope=args.slope, wf=args.wf, dep_U=args.depth) # move the model to GPU net = nn.DataParallel(net).cuda() # optimizer optimizer = optim.Adam(net.parameters(), lr=args.lr) args.milestones = [10, 20, 25, 30, 35, 40, 45, 50] scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.milestones, args.gamma) if args.resume: if os.path.isfile(args.resume): print('=> Loading checkpoint {:s}'.format(args.resume)) checkpoint = torch.load(args.resume) args.epoch_start = checkpoint['epoch'] args.step = checkpoint['step'] args.step_img = checkpoint['step_img'] optimizer.load_state_dict(checkpoint['optimizer_state_dict']) scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) net.load_state_dict(checkpoint['model_state_dict']) args.clip_grad_D = checkpoint['grad_norm_D'] args.clip_grad_S = checkpoint['grad_norm_S'] print('=> Loaded checkpoint {:s} (epoch {:d})'.format(args.resume, checkpoint['epoch'])) else: sys.exit('Please provide corrected model path!') else: args.epoch_start = 0 if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) os.makedirs(args.log_dir) if os.path.isdir(args.model_dir): shutil.rmtree(args.model_dir) os.makedirs(args.model_dir) # print the arg pamameters for arg in vars(args): print('{:<15s}: {:s}'.format(arg, str(getattr(args, arg)))) # making traing data simulate_dir = Path(args.simulate_dir) train_im_list = list(simulate_dir.glob('*.jpg')) + list(simulate_dir.glob('*.png')) + \ list(simulate_dir.glob('*.bmp')) train_im_list = sorted([str(x) for x in train_im_list]) # making tesing data test_case1_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case1.hdf5') test_case2_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case2.hdf5') test_case3_h5 = Path('test_data').joinpath('noise_niid', 'CBSD68_niid_case3.hdf5') test_im_list = (Path('test_data') / 'CBSD68').glob('*.png') test_im_list = sorted([str(x) for x in test_im_list]) datasets = {'train':DenoisingDatasets.SimulateTrain(train_im_list, 5000*args.batch_size, args.patch_size, radius=args.radius, noise_estimate=True), 'test_cbsd681':DenoisingDatasets.SimulateTest(test_im_list, test_case1_h5), 'test_cbsd682': DenoisingDatasets.SimulateTest(test_im_list, test_case2_h5), 'test_cbsd683': DenoisingDatasets.SimulateTest(test_im_list, test_case3_h5)} # train model print('\nBegin training with GPU: ' + str(args.gpu_id)) train_model(net, datasets, optimizer, scheduler, loss_fn)
from networks import VDN from skimage.measure import compare_psnr, compare_ssim from skimage import img_as_float, img_as_ubyte from utils import load_state_dict_cpu from matplotlib import pyplot as plt import time from scipy.io import loadmat use_gpu = True C = 3 dep_U = 4 # load the pretrained model print('Loading the Model') checkpoint = torch.load('./model_state/model_state_SIDD') net = VDN(C, dep_U=dep_U, wf=64) if use_gpu: net = torch.nn.DataParallel(net).cuda() net.load_state_dict(checkpoint) else: load_state_dict_cpu(net, checkpoint) net.eval() im_noisy = loadmat('./test_data/DND/1.mat')['InoisySRGB'] H, W, _ = im_noisy.shape if H % 2**dep_U != 0: H -= H % 2**dep_U if W % 2**dep_U != 0: W -= W % 2**dep_U im_noisy = im_noisy[:H, :W, ]
def main(): # move the model to GPU if args.net.lower() == 'vdn': net = VDN.VDNU(_C, args.activation, args.relu_init, wf=args.wf, batch_norm=args.bn_UNet) elif args.net.lower() == 'vdnrd': net = VDN.VDNRD(_C, args.activation, args.relu_init, num_RDB=args.num_RDB, num_conv=args.num_conv, growth_rate=args.growth_rate) clip_grad_D = 5e3 elif args.net.lower() == 'vdnrdu': net = VDN.VDNRDU(_C, args.activation, args.relu_init, num_RDB=args.num_RDBU, num_conv=args.num_conv, wf=args.wf) else: sys.exit('Please input the corrected network type') # multi GPU setting net = nn.DataParallel(net).cuda() # optimizer optimizer = optim.Adam(net.parameters(), lr=args.lr) if args.net.lower() == 'vdn': scheduler = optim.lr_scheduler.StepLR(optimizer, args.step_size, args.gamma) else: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.multi_steps, args.gamma) if args.resume: if os.path.isfile(args.resume): print('=> Loading checkpoint {:s}'.format(args.resume)) checkpoint = torch.load(args.resume) args.epoch_start = checkpoint['epoch'] args.step = checkpoint['step'] args.step_img = checkpoint['step_img'] optimizer.load_state_dict(checkpoint['optimizer_state_dict']) scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) net.load_state_dict(checkpoint['model_state_dict']) args.clip_grad_D = checkpoint['grad_norm_D'] args.clip_grad_S = checkpoint['grad_norm_S'] if args.activation.lower() == 'prelu': clip_grad_D_relu = checkpoint['grad_norm_D_relu'] clip_grad_S_relu = checkpoint['grad_norm_S_relu'] print('=> Loaded checkpoint {:s} (epoch {:d})'.format( args.resume, checkpoint['epoch'])) else: sys.exit('Please provide corrected model path!') else: args.epoch_start = 0 if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) os.makedirs(args.log_dir) if os.path.isdir(args.model_dir): shutil.rmtree(args.model_dir) os.makedirs(args.model_dir) for arg in vars(args): print('{:<15s}: {:s}'.format(arg, str(getattr(args, arg)))) # train dataset path_Renoir_train = os.path.join(args.Renoir_dir, 'small_imgs_all.hdf5') path_SIDD_train = os.path.join(args.SIDD_dir, 'small_imgs_train.hdf5') # test dataset path_SIDD_test = os.path.join(args.SIDD_dir, 'small_imgs_test.hdf5') if args.data_case == 1: dataset_Renoir_train = DenoisingDatasets.BenchmarkTrain( path_Renoir_train, 1500 * args.batch_size, args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True) dataset_SIDD_train = DenoisingDatasets.BenchmarkTrain( path_SIDD_train, 3500 * args.batch_size, args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True) datasets = { 'train': uData.ConcatDataset((dataset_SIDD_train, dataset_Renoir_train)), 'test_SIDD': DenoisingDatasets.BenchmarkTest(path_SIDD_test) } elif args.data_case == 2: datasets = { 'train': DenoisingDatasets.BenchmarkTrain(path_SIDD_train, 5000 * args.batch_size, args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True), 'test_SIDD': DenoisingDatasets.BenchmarkTest(path_SIDD_test) } elif args.data_case == 3: datasets = { 'train': DenoisingDatasets.BenchmarkTrain(path_Renoir_train, 5000 * args.batch_size, args.patch_size, radius=args.radius, eps2=args.eps2, noise_estimate=True), 'test_SIDD': DenoisingDatasets.BenchmarkTest(path_SIDD_test) } else: sys.exit('Please input the correct data case: 1, 2 and 3') # train model print('\nBegin training with GPU: ' + str(args.gpu_id)) train_model(net, datasets, optimizer, scheduler, loss_fn)
from networks import VDN from skimage.measure import compare_psnr, compare_ssim from skimage import img_as_float, img_as_ubyte from utils import load_state_dict_cpu from matplotlib import pyplot as plt import time from scipy.io import loadmat use_gpu = True C = 3 dep_U = 4 # load the pretrained model print('Loading the Model') checkpoint = torch.load('./model_state/model_state_SIDD') net = VDN.VDNU(C, dep_U=dep_U, wf=64, batch_norm=True) if use_gpu: net = torch.nn.DataParallel(net).cuda() net.load_state_dict(checkpoint) else: load_state_dict_cpu(net, checkpoint) net.eval() im_noisy = loadmat('./test_data/DND/1.mat')['im_noisy'] H, W, _ = im_noisy.shape if H % 2**dep_U != 0: H -= H % 2**dep_U if W % 2**dep_U != 0: W -= W % 2**dep_U im_noisy = im_noisy[:H, :W, ]
from pathlib import Path from utils import peaks, load_state_dict_cpu from matplotlib import pyplot as plt import time use_gpu = False C = 3 dep_U = 4 data_path = Path('test_data') / 'CBSD68' im_list = [str(x) for x in data_path.glob('*.png')] # load the pretrained model print('Loading the Model') checkpoint = torch.load('./model_state/model_state_niidgauss') net = VDN.VDNU(C, dep_U=dep_U, wf=64) if use_gpu: net = torch.nn.DataParallel(net).cuda() net.load_state_dict(checkpoint) else: load_state_dict_cpu(net, checkpoint) net.eval() im_path = im_list[np.random.randint(0, len(im_list))] im_name = im_path.split('/')[-1] im_gt = img_as_float(cv2.imread(im_path)[:, :, ::-1]) H, W, _ = im_gt.shape if H % 2**dep_U != 0: H -= H % 2**dep_U if W % 2**dep_U != 0: W -= W % 2**dep_U