def optimizeMAP(data_list, scale, lr, net, config): # reproducibility torch.manual_seed(1) np.random.seed(1) torch.backends.cudnn.deterministic = True #Device for computation (CPU or GPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") best_psnr_sum = 0 best_ssim_sum = 0 cnt = 0 step = 0 description = 'Evaluation_parameter_scale=' + str(scale) + '_lr' + str(lr) logfile = open(config.directory.joinpath(description + '.txt'), 'w+') #writer_tensorboard = SummaryWriter(comment=description) writer_tensorboard = SummaryWriter(config.directory.joinpath(description)) writer_tensorboard.add_text('Config parameters', config.config_string) #PSNR, SSIM - step size Matrix psnr_per_step = np.zeros((len(data_list), config.n_epochs)) ssim_per_step = np.zeros((len(data_list), config.n_epochs)) image_list = torch.zeros((len(data_list), config.n_epochs, 1, 1, config.crop_size, config.crop_size)) # Iterate through dataset for cnt, (image, y) in enumerate(data_list): image = torch.tensor(image, dtype=torch.float32) # Initialization of parameter to optimize x = torch.tensor(y.to(device), requires_grad=True) #PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu()) Optimizing parameters sigma = torch.tensor(torch.tensor(config.sigma) * 2 / 255, dtype=torch.float32).to(device) alpha = torch.tensor(scale, dtype=torch.float32).to(device) #config.alpha y = y.to(device) params = [x] #Initialize Measurement parameters conv_cnt = 0 best_psnr = 0 best_ssim = 0 psnr_ssim = 0 if config.linesearch: optimizer = config.optimizer(params, lr=config.lr, history_size=10, line_search='Wolfe', debug=True) else: optimizer = config.optimizer(params, lr=lr, betas=[0.9, 0.8]) #, momentum=0.88) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, config.linesearch, scheduler, config.continuous_logistic, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=None) for i in range(2): #config.n_epochs): # ============================================================================= # def closure(): # optimizer.zero_grad(); # loss = logposterior(x, y, sigma, alpha, logit[0,:,:,:]); # loss.backward(retain_graph=True); # print(loss) # return loss; # ============================================================================= x, gradient, loss = denoise(x, y, image, i) psnr = PSNR(x[0, :, :, :].cpu(), image[0, :, :, :].cpu()) ssim = c_ssim(((x.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp( min=-1, max=1).numpy(), ((image.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) #Save psnr in matrix psnr_per_step[cnt, i] = psnr.detach().numpy() ssim_per_step[cnt, i] = ssim # tensorboard writer_tensorboard.add_scalar('Optimize/PSNR_of_Image' + str(cnt), psnr, i) writer_tensorboard.add_scalar('Optimize/SSIM_of_Image' + str(cnt), ssim, i) writer_tensorboard.add_scalar('Optimize/Loss_of_Image' + str(cnt), loss, i) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr step = i + 1 psnr_ssim = ssim conv_cnt = 0 else: conv_cnt += 1 # Save image in list image_list[cnt, i] = (x.detach().cpu() + 1) / 2 #if conv_cnt>config.control_epochs: break; psnr = PSNR(x[0, :, :, :].cpu(), image[0, :, :, :].cpu()) ssim = c_ssim( ((x.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp(min=-1, max=1).numpy(), ((image.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) # tensorboard writer_tensorboard.add_scalar('Optimize/Best_PSNR', best_psnr, cnt) writer_tensorboard.add_scalar('Optimize/Best_SSIM', best_ssim, cnt) writer_tensorboard.add_scalar('Optimize/SSIM_to_best_PSNR', psnr_ssim, cnt) print('Image ', cnt, ': ', psnr, '-', ssim) logfile.write('PSNR_each: %f - step %f\r\n' % (psnr, step)) logfile.write('PSNR_best: %f\r\n' % best_psnr) logfile.write('SSIM_each: %f\r\n' % ssim) logfile.write('SSIM_best: %f\r\n' % best_ssim) best_psnr_sum += best_psnr best_ssim_sum += best_ssim #if cnt == 1: break; psnr_avg = best_psnr_sum / (cnt + 1) ssim_avg = best_ssim_sum / (cnt + 1) logfile.write('Best_PSNR_avg: %f\r\n' % psnr_avg) logfile.write('Best_SSIM_avg: %f\r\n' % ssim_avg) # Logging of average psnr and ssim per step log_psnr_per_step = open( config.directory.joinpath(description + '_psnr_per_step.txt'), 'w+') log_ssim_per_step = open( config.directory.joinpath(description + '_ssim_per_step.txt'), 'w+') psnr_avg_step = np.mean(psnr_per_step, 0) ssim_avg_step = np.mean(ssim_per_step, 0) for n in range(psnr_avg_step.shape[0]): log_psnr_per_step.write('Step %f: %f\r\n' % (n + 1, psnr_avg_step[n])) log_ssim_per_step.write('Step %f: %f\r\n' % (n + 1, ssim_avg_step[n])) #print(psnr_avg_step.shape) #print(psnr_per_step.shape) best_step = np.argmax(psnr_avg_step) + 1 log_psnr_per_step.write('Best PSNR: %f\r\n' % np.max(psnr_avg_step)) log_psnr_per_step.write('Step to best PSNR: %f\r\n' % best_step) logfile.close() # Save images in tensorboard for i in range(len(data_list)): image_grid = make_grid(image_list[i, best_step - 1], normalize=True, scale_each=True) writer_tensorboard.add_image('Image', image_grid, i) writer_tensorboard.close() return np.max(psnr_avg_step), ssim_avg_step[best_step - 1], best_step
def patch_denoising(dataset, config, net): # reproducibility torch.manual_seed(1) np.random.seed(1) #Device for computation (CPU or GPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #mat_eng = matlab.engine.start_matlab() #mat_eng.cd(r'C:\Users\garwi\Desktop\Uni\Master\3_Semester\Masterthesis\Implementation\DnCNN\DnCNN\utilities') # Load datasetloader #test_loader = get_loader_cifar('../../../datasets/CIFAR10', 1, train=False, num_workers=0); #test_loader = get_loader_bsds('../../../datasets/BSDS/pixelcnn_data/train', 1, train=False, crop_size=[32,32]); test_loader = get_loader_denoising( '../../../datasets/' + dataset, 1, train=False, gray_scale=True, crop_size=None ) #[140,140])#[config.crop_size, config.crop_size]) #256 psnr_sum = 0 ssim_sum = 0 cnt = 0 step = 0 description = 'Denoising_dataset_' + dataset logfile = open(config.directory.joinpath(description + '.txt'), 'w+') #writer_tensorboard = SummaryWriter(comment=description) writer_tensorboard = SummaryWriter(config.directory.joinpath(description)) writer_tensorboard.add_text('Config parameters', config.config_string) # Iterate through dataset for image, label in test_loader: cnt += 1 image = torch.tensor(image, dtype=torch.float32) img_size = image.size() #Add noise to image sigma = torch.tensor(config.sigma) mean = torch.tensor(0.) noisy_img = add_noise(image, sigma, mean) # Size of patches patch_size = [256, 256] # Cop and create array of patches noisy_patches, upper_borders, left_borders = patchify( noisy_img, patch_size) image_patches, _, _ = patchify(image, patch_size) print(image_patches.size()) # Optimizing parameters sigma = torch.tensor(sigma * 2 / 255, dtype=torch.float32).to(device) alpha = torch.tensor(config.alpha, dtype=torch.float32).to(device) denoised_patches = torch.zeros(noisy_patches.size()) for i in range(noisy_patches.size(0)): # Initialization of parameter to optimize x = torch.tensor(noisy_patches[i].to(device), requires_grad=True) img = image_patches[i] y = noisy_patches[i].to(device) params = [x] if config.linesearch: optimizer = config.optimizer(params, lr=config.lr, history_size=10, line_search='Wolfe', debug=True) else: optimizer = config.optimizer(params, lr=config.lr, betas=[0.9, 0.8]) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, config.linesearch, scheduler, config.continuous_logistic, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=None) conv_cnt = 0. best_psnr = 0. best_ssim = 0. psnr_ssim = 0. for j in range(2): #config.n_epochs): x, gradient, loss = denoise(x, y, img, j) psnr = PSNR(x[0, :, :, :].cpu(), img[0, :, :, :].cpu()) ssim = c_ssim(((x.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp(min=-1, max=1).numpy(), ((img.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) print('SSIM: ', ssim) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr step = j + 1 psnr_ssim = ssim conv_cnt = 0 else: conv_cnt += 1 if keyboard.is_pressed('*'): break #x_plt = (x+1)/2 denoised_patches[i] = x.detach().cpu() img_denoised = aggregate(denoised_patches, upper_borders, left_borders, img_size) psnr = PSNR(img_denoised[0, :, :, :].cpu().clamp(min=-1, max=1), image[0, :, :, :].cpu()) ssim = c_ssim(((img_denoised.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp(min=-1, max=1).numpy(), ((image.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) # tensorboard img_denoised_plt = (img_denoised + 1) / 2 writer_tensorboard.add_scalar('Optimize/Best_PSNR', psnr, cnt) writer_tensorboard.add_scalar('Optimize/Best_SSIM', ssim, cnt) image_grid = make_grid(img_denoised_plt, normalize=True, scale_each=True) writer_tensorboard.add_image('Image', image_grid, cnt) print('Image ', cnt, ': ', psnr, '-', ssim) logfile.write('PSNR_each: %f - step %f\r\n' % (psnr, step)) logfile.write('SSIM_each: %f\r\n' % ssim) psnr_sum += psnr ssim_sum += ssim # test1 = img[0,0].numpy() # # test2 = ((denoised_patches[0][0,0]+1)/2).numpy() # test3 = ((denoised_patches[1][0,0]+1)/2).numpy() # test4 = ((denoised_patches[2][0,0]+1)/2).numpy() # test5 = ((denoised_patches[3][0,0]+1)/2).numpy() #Plotting fig, axs = plt.subplots(2, 1, figsize=(8, 8)) count = 0 for i in range(0, 1): axs[count].imshow( ((denoised_patches[i][0, 0] + 1) / 2).cpu().detach().numpy(), cmap='gray') count += 1 #fig.colorbar(im, ax=axs[i]) if cnt > 7: break psnr_avg = psnr_sum / cnt ssim_avg = ssim_sum / cnt print('PSNR_Avg: ', psnr_avg) print('SSIM_Avg: ', ssim_avg) logfile.write('PSNR_avg: %f\r\n' % psnr_avg) logfile.write('SSIM_avg: %f\r\n' % ssim_avg) logfile.close() writer_tensorboard.close() #print(patches.size()) print(img.size()) axs[1].imshow(((img_denoised[0, 0, :, :] + 1) / 2).cpu().detach().numpy(), cmap='gray') print(PSNR(img_denoised[0, :, :, :].cpu(), image[0, :, :, :].cpu())) return psnr_avg, ssim_avg
def optimizeMAP(data_list, scale, net, config): # reproducibility torch.manual_seed(1) np.random.seed(1) psnr_sum = 0. ssim_sum = 0. step_sum = 0. cnt = 0 description = 'Parametertraining_alpha_' + str(scale) #writer_tensorboard = SummaryWriter(comment=description) writer_tensorboard = SummaryWriter(config.directory.joinpath(description)) writer_tensorboard.add_text('Config parameters', config.config_string) logfile = open(config.directory.joinpath(description + '.txt'), 'w+') for image, y in data_list: cnt += 1 image = torch.tensor(image, dtype=torch.float32) # Initialization of parameter to optimize x = torch.tensor(y.to(device), requires_grad=True) #PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu()) Optimizing parameters sigma = torch.tensor(config.sigma * 2 / 255, dtype=torch.float32).to(device) alpha = torch.tensor(scale, dtype=torch.float32).to(device) y = y.to(device) params = [x] optimizer = config.optimizer(params, lr=config.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, scheduler, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=None) conv_cnt = 0. best_psnr = 0. best_ssim = 0. worst_psnr = 0. step = 0 optimal_step = 0. for i in range(2): #config.n_epochs): x, gradient = denoise(x, y, image, i) psnr = PSNR(x[0, :, :, :].cpu(), image[0, :, :, :].cpu()) ssim = c_ssim(((x.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp( min=-1, max=1).numpy(), ((image.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) print('SSIM: ', ssim) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr psnr_ssim = ssim x_plt = (x + 1) / 2 optimal_step = i + 1 else: conv_cnt += 1 if psnr < worst_psnr: worst_psnr = psnr #if keyboard.is_pressed('s'): break; if conv_cnt > 2: break psnr = PSNR(x[0, :, :, :].cpu(), image[0, :, :, :].cpu()) ssim = c_ssim( ((x.data[0, 0, :, :] + 1) / 2).cpu().detach().clamp(min=-1, max=1).numpy(), ((image.data[0, 0, :, :] + 1) / 2).cpu().numpy(), data_range=1, gaussian_weights=True) # tensorboard writer_tensorboard.add_scalar('Optimize/PSNR', best_psnr, cnt) writer_tensorboard.add_scalar('Optimize/SSIM', best_ssim, cnt) writer_tensorboard.add_scalar('Optimize/SSIM_to_best_PSNR', psnr_ssim, cnt) image_grid = make_grid(x_plt, normalize=True, scale_each=True) writer_tensorboard.add_image('Image', image_grid, cnt) print('Image ', cnt, ': ', psnr, '-', ssim) logfile.write('PSNR_each: %f - step %f\r\n' % (psnr, step)) logfile.write('PSNR_best: %f\r\n' % best_psnr) logfile.write('SSIM_each: %f\r\n' % ssim) logfile.write('SSIM_best: %f\r\n' % best_ssim) psnr_sum += best_psnr ssim_sum += best_ssim step_sum += optimal_step #if cnt == 1: break; psnr_avg = psnr_sum / cnt ssim_avg = ssim_sum / cnt step_avg = step_sum / cnt print(psnr_avg) print(ssim_avg) logfile.write('PSNR_avg: %f\r\n' % psnr_avg) logfile.write('SSIM_avg: %f\r\n' % ssim_avg) logfile.close() writer_tensorboard.close() return psnr_avg, ssim_avg, step_avg
def denoise_parameter(par, config, net): # reproducibility torch.manual_seed(1) np.random.seed(1) # Load datasetloader #test_loader = get_loader_cifar('../../../datasets/CIFAR10', 1, train=False, gray_scale=False, num_workers=0); test_loader = get_loader_denoising('../../../datasets/Parameterevaluation', 1, train=False, gray_scale=True, crop_size=[config.crop_size, config.crop_size]) #256 #Device for computation (CPU or GPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") best_psnr_sum = 0 best_ssim_sum = 0 cnt = 0 step = 0 description = 'Evaluation_parameter_par=' + str(par) logfile = open(config.directory.joinpath(description + '.txt'),'w+') #writer_tensorboard = SummaryWriter(comment=description) writer_tensorboard = SummaryWriter(config.directory.joinpath(description)) writer_tensorboard.add_text('Config parameters', config.config_string) #PSNR, SSIM - step size Matrix psnr_per_step = np.zeros((len(test_loader), config.n_epochs)) ssim_per_step = np.zeros((len(test_loader), config.n_epochs)) # Iterate through dataset for cnt, (image, label) in enumerate(test_loader): image = torch.tensor(image,dtype=torch.float32) y = add_noise(image, torch.tensor(25.), torch.tensor(0.)) # Initialization of parameter to optimize x = torch.tensor(y.to(device),requires_grad=True); #PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu()) Optimizing parameters sigma = torch.tensor(torch.tensor(25.)*2/255, dtype=torch.float32).to(device); alpha = torch.tensor(config.alpha, dtype=torch.float32).to(device); y = y.to(device) params=[x] #Initialize Measurement parameters conv_cnt = 0 best_psnr = 0 best_ssim = 0 psnr_ssim = 0 prev_psnr = 0 prev_x = x.data if config.linesearch: optimizer = config.optimizer(params, lr=config.lr, history_size=10, line_search='Wolfe', debug=True) else: optimizer = config.optimizer(params, lr=config.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, config.linesearch, scheduler, config.continuous_logistic,image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=None) for i in range(config.n_epochs): # ============================================================================= # def closure(): # optimizer.zero_grad(); # loss = logposterior(x, y, sigma, alpha, logit[0,:,:,:]); # loss.backward(retain_graph=True); # print(loss) # return loss; # ============================================================================= x, gradient = denoise(x, y, image, i) psnr = PSNR(x[0,:,:,:].cpu(), image[0,:,:,:].cpu()) ssim = c_ssim(((x.data[0,0,:,:]+1)/2).cpu().detach().clamp(min=-1,max=1).numpy(), ((image.data[0,0,:,:]+1)/2).cpu().numpy(), data_range=1, gaussian_weights=True) #Save psnr in matrix psnr_per_step[cnt, i] = psnr.detach().numpy() ssim_per_step[cnt, i] = ssim # tensorboard writer_tensorboard.add_scalar('Optimize/PSNR_of_Image'+str(cnt), psnr, i) writer_tensorboard.add_scalar('Optimize/SSIM_of_Image'+str(cnt), ssim, i) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr step = i+1 psnr_ssim = ssim x_plt = (x+1)/2 conv_cnt = 0 else: conv_cnt += 1 #Reset if psnr-prev_psnr < -1.: #if conv_cnt>config.control_epochs: break; psnr = PSNR(x[0,:,:,:].cpu(), image[0,:,:,:].cpu()) ssim = c_ssim(((x.data[0,0,:,:]+1)/2).cpu().detach().clamp(min=-1,max=1).numpy(), ((image.data[0,0,:,:]+1)/2).cpu().numpy(), data_range=1, gaussian_weights=True) # tensorboard writer_tensorboard.add_scalar('Optimize/Best_PSNR', best_psnr, cnt) writer_tensorboard.add_scalar('Optimize/Best_SSIM', best_ssim, cnt) writer_tensorboard.add_scalar('Optimize/SSIM_to_best_PSNR', psnr_ssim, cnt) image_grid = make_grid(x_plt, normalize=True, scale_each=True) writer_tensorboard.add_image('Image', image_grid, cnt) print('Image ', cnt, ': ', psnr, '-', ssim) logfile.write('PSNR_each: %f - step %f\r\n' %(psnr,step)) logfile.write('PSNR_best: %f\r\n' %best_psnr) logfile.write('SSIM_each: %f\r\n' %ssim) logfile.write('SSIM_best: %f\r\n' %best_ssim) best_psnr_sum += best_psnr best_ssim_sum += best_ssim #if cnt == 1: break; psnr_avg = best_psnr_sum/(cnt+1) ssim_avg = best_ssim_sum/(cnt+1) logfile.write('Best_PSNR_avg: %f\r\n' %psnr_avg) logfile.write('Best_SSIM_avg: %f\r\n' %ssim_avg) # Logging of average psnr and ssim per step log_psnr_per_step = open(config.directory.joinpath(description + '_psnr_per_step.txt'),'w+') log_ssim_per_step = open(config.directory.joinpath(description + '_ssim_per_step.txt'),'w+') psnr_avg_step = np.mean(psnr_per_step, 0) ssim_avg_step = np.mean(ssim_per_step, 0) for n in range(psnr_avg_step.shape[0]): log_psnr_per_step.write('Step %f: %f\r\n' %(n, psnr_avg_step[n])) log_ssim_per_step.write('Step %f: %f\r\n' %(n, ssim_avg_step[n])) print(psnr_avg_step.shape) print(psnr_per_step.shape) best_step = np.argmax(psnr_avg_step) log_psnr_per_step.write('Best PSNR: %f\r\n' %np.max(psnr_avg_step)) log_psnr_per_step.write('Step to best PSNR: %f\r\n' %best_step) logfile.close() writer_tensorboard.close() return np.max(psnr_avg_step), ssim_avg_step[best_step], best_step; # ============================================================================= # #Plotting # fig, axs = plt.subplots(3,1, figsize=(8,8)) # axs[0].imshow(y[0,0,:,:].cpu().detach().numpy(), cmap='gray') # axs[1].imshow(x[0,0,:,:].cpu().detach().numpy(), cmap='gray') # axs[2].imshow(image[0,0,:,:].cpu().detach().numpy(), cmap='gray') # # res = x[0,0,:,:].cpu().detach().numpy() # orig = image[0,0,:,:].cpu().detach().numpy() # # # #plt.imshow(x[0,0,:,:].cpu().detach().numpy(),cmap='gray') # #plt.colorbar() # print('Noisy_Image: ', PSNR(y[0,0,:,:].cpu(), image[0,0,:,:].cpu())) # print('Denoised_Image: ', PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu())) # # #save_image(x, 'Denoised.png') # =============================================================================
def denoise_dataset(dataset, config, net): # reproducibility torch.manual_seed(1) np.random.seed(1) # Load datasetloader #test_loader = get_loader_cifar('../../../datasets/CIFAR10', 1, train=False, gray_scale=False, num_workers=0); test_loader = get_loader_denoising('../../../datasets/' + dataset, 1, train=False, gray_scale=True, crop_size=[config.crop_size, config.crop_size]) #256 #Device for computation (CPU or GPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") psnr_sum = 0 ssim_sum = 0 cnt = 0 step = 0 description = 'Denoising_dataset_' + dataset logfile = open(config.directory.joinpath(description + '.txt'),'w+') #writer_tensorboard = SummaryWriter(comment=description) writer_tensorboard = SummaryWriter(config.directory.joinpath(description)) writer_tensorboard.add_text('Config parameters', config.config_string) # Iterate through dataset for image, label in test_loader: cnt += 1 image = torch.tensor(image,dtype=torch.float32) y = add_noise(image, torch.tensor(config.sigma), torch.tensor(0.)) # Initialization of parameter to optimize x = torch.tensor(y.to(device),requires_grad=True); #PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu()) Optimizing parameters sigma = torch.tensor(torch.tensor(25.)*2/255, dtype=torch.float32).to(device); alpha = torch.tensor(config.alpha, dtype=torch.float32).to(device); y = y.to(device) params=[x] #Initialize Measurement parameters conv_cnt = 0 best_psnr = 0 best_ssim = 0 psnr_ssim = 0 if config.linesearch: optimizer = config.optimizer(params, lr=config.lr, history_size=10, line_search='Wolfe', debug=True) else: optimizer = config.optimizer(params, lr=config.lr, betas=[0.9,0.8]) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, config.linesearch, scheduler, config.continuous_logistic,image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=None) for i in range(config.n_epochs): # ============================================================================= # def closure(): # optimizer.zero_grad(); # loss = logposterior(x, y, sigma, alpha, logit[0,:,:,:]); # loss.backward(retain_graph=True); # print(loss) # return loss; # ============================================================================= x, gradient, loss = denoise(x, y, image, i) psnr = PSNR(x[0,:,:,:].cpu(), image[0,:,:,:].cpu()) ssim = c_ssim(((x.data[0,0,:,:]+1)/2).cpu().detach().clamp(min=-1,max=1).numpy(), ((image.data[0,0,:,:]+1)/2).cpu().numpy(), data_range=1, gaussian_weights=True) #print('SSIM: ', ssim) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr step = i+1 psnr_ssim = ssim conv_cnt = 0 else: conv_cnt += 1 #if conv_cnt>config.control_epochs: break; #if x.grad.sum().abs() < 10 and i > 50: break; psnr = PSNR(x[0,:,:,:].cpu().clamp(min=-1,max=1), image[0,:,:,:].cpu()) ssim = c_ssim(((x.data[0,0,:,:]+1)/2).cpu().detach().clamp(min=0,max=1).numpy(), ((image.data[0,0,:,:]+1)/2).cpu().numpy(), data_range=1, gaussian_weights=True) # tensorboard x_plt = (x+1)/2 writer_tensorboard.add_scalar('Optimize/Best_PSNR', best_psnr, cnt) writer_tensorboard.add_scalar('Optimize/Best_SSIM', best_ssim, cnt) writer_tensorboard.add_scalar('Optimize/SSIM_to_best_PSNR', psnr_ssim, cnt) image_grid = make_grid(x_plt, normalize=True, scale_each=True) writer_tensorboard.add_image('Image', image_grid, cnt) print('Image ', cnt, ': ', psnr, '-', ssim) logfile.write('PSNR_each: %f - step %f\r\n' %(psnr,step)) logfile.write('PSNR_best: %f\r\n' %best_psnr) logfile.write('SSIM_each: %f\r\n' %ssim) logfile.write('SSIM_best: %f\r\n' %best_ssim) psnr_sum += psnr ssim_sum += ssim #if cnt == 1: break; psnr_avg = psnr_sum/cnt ssim_avg = ssim_sum/cnt print(psnr_avg) print(ssim_avg) logfile.write('PSNR_avg: %f\r\n' %psnr_avg) logfile.write('SSIM_avg: %f\r\n' %ssim_avg) logfile.close() writer_tensorboard.close() return psnr_avg, ssim_avg # ============================================================================= # #Plotting # fig, axs = plt.subplots(3,1, figsize=(8,8)) # axs[0].imshow(y[0,0,:,:].cpu().detach().numpy(), cmap='gray') # axs[1].imshow(x[0,0,:,:].cpu().detach().numpy(), cmap='gray') # axs[2].imshow(image[0,0,:,:].cpu().detach().numpy(), cmap='gray') # # res = x[0,0,:,:].cpu().detach().numpy() # orig = image[0,0,:,:].cpu().detach().numpy() # # # #plt.imshow(x[0,0,:,:].cpu().detach().numpy(),cmap='gray') # #plt.colorbar() # print('Noisy_Image: ', PSNR(y[0,0,:,:].cpu(), image[0,0,:,:].cpu())) # print('Denoised_Image: ', PSNR(x[0,0,:,:].cpu(), image[0,0,:,:].cpu())) # # #save_image(x, 'Denoised.png') # =============================================================================
dtype=torch.float32, debug=True) else: optimizer = config.optimizer( params, lr=config.lr, betas=[0.9, 0.8]) #, tolerance_grad = 1, tolerance_change=1) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, config.linesearch, scheduler, config.continuous_logistic, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=writer_tensorboard) conv_cnt = 0. best_psnr = 0. best_ssim = 0. worst_psnr = 0. img_list = [] start = time.time() for i in range(1000): #with torch.no_grad():
# denoise = Denoising(optimizer, scheduler, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=writer_tensorboard) ## Turn the images if n>0: with torch.no_grad(): x = img_rot90(x) y = img_rot90(y) image = img_rot90(image) x = torch.tensor(x.to(device), requires_grad=True) params=[x] optimizer = config.optimizer(params, lr=config.lr) #0.05 scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) denoise = Denoising(optimizer, scheduler, image, y, net, sigma, alpha, net_interval=1, writer_tensorboard=writer_tensorboard) for i in range(5): x, gradient = denoise(x, y, image, i) psnr = PSNR(x[0,:,:,:].cpu(), image[0,:,:,:].cpu()) ssim = c_ssim(((x.data[0,0,:,:]+1)/2).cpu().detach().clamp(min=-1,max=1).numpy(), ((image.data[0,0,:,:]+1)/2).cpu().numpy(), data_range=1, gaussian_weights=True) print('SSIM: ', ssim) # Save best SSIM and PSNR if ssim >= best_ssim: best_ssim = ssim if psnr >= best_psnr: best_psnr = psnr else: