def main(): # Experiment specifications imagename = 'image_Lena512rgb.png' # Load noise-free image y = np.array(Image.open(imagename)) / 255 # Possible noise types to be generated 'gw', 'g1', 'g2', 'g3', 'g4', 'g1w', # 'g2w', 'g3w', 'g4w'. noise_type = 'g3' noise_var = 0.02 # Noise variance seed = 0 # seed for pseudorandom noise realization # Generate noise with given PSD noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, y.shape) # N.B.: For the sake of simulating a more realistic acquisition scenario, # the generated noise is *not* circulant. Therefore there is a slight # discrepancy between PSD and the actual PSD computed from infinitely many # realizations of this noise with different seeds. # Generate noisy image corrupted by additive spatially correlated noise # with noise power spectrum PSD z = np.atleast_3d(y) + np.atleast_3d(noise) # Call BM3D With the default settings. y_est = bm3d_rgb(z, psd) # To include refiltering: # y_est = bm3d_rgb(z, psd, 'refilter'); # For other settings, use BM3DProfile. # profile = BM3DProfile(); # equivalent to profile = BM3DProfile('np'); # profile.gamma = 6; # redefine value of gamma parameter # y_est = bm3d_rgb(z, psd, profile); # Note: For white noise, you may instead of the PSD # also pass a standard deviation # y_est = bm3d_rgb(z, sqrt(noise_var)); # If the different channels have varying PSDs, you can supply a MxNx3 PSD or a list of 3 STDs: # y_est = bm3d_rgb(z, np.concatenate((psd1, psd2, psd3), 2)) # y_est = bm3d_rgb(z, [sigma1, sigma2, sigma3]) psnr = get_psnr(y, y_est) print("PSNR:", psnr) # PSNR ignoring 16-pixel wide borders (as used in the paper), due to refiltering potentially leaving artifacts # on the pixels near the boundary of the image when noise is not circulant psnr_cropped = get_cropped_psnr(y, y_est, [16, 16]) print("PSNR cropped:", psnr_cropped) # Ignore values outside range for display (or plt gives an error for multichannel input) y_est = np.minimum(np.maximum(y_est, 0), 1) z_rang = np.minimum(np.maximum(z, 0), 1) plt.title("y, z, y_est") plt.imshow(np.concatenate((y, np.squeeze(z_rang), y_est), axis=1)) plt.show()
def add_noise(image, sigma): from experiment_funcs import get_experiment_noise y = np.array(Image.open(imagename)) / 255 noise_type = "gw" noise_var = (sigma / 255) ** 2 # Noise variance 25 std seed = 0 # seed for pseudorandom noise realization noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, y.shape) z = np.atleast_3d(y) + np.atleast_3d(noise) z_rang = np.minimum(np.maximum(z, 0), 1) noisyimagename = imagepath + "noisy\\" + t + "_g.bmp" plt.imsave(noisyimagename, z_rang)
def GPP_Color_solve(test_img='color_tiger', savedir='outs_gpp_color', USE_BM3D=True): savedir = 'outs_color' test_img = 'color_leapord' # I_x = I_y = 512 #size of images (can be varied) I_x = 768 I_y = 512 d_x = d_y = 32 #size of patches (can be varied) n_measure = 0.1 #measurement rate (can be varied) nIter = 5001 if USE_BM3D: from bm3d import bm3d, BM3DProfile from experiment_funcs import get_experiment_noise noise_type = 'g0' noise_var = 0.01 # Noise variance seed = 0 # seed for pseudorandom noise realization # Generate noise with given PSD noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, [256, 256]) ### if not os.path.exists(savedir): os.makedirs(savedir) dim_x = d_x * d_y batch_size = (I_x * I_y) // (dim_x) nz = 100 #latent dimensionality of GAN (fixed) dim_phi = int(n_measure * dim_x) n_img_plot_x = I_x // d_x n_img_plot_y = I_y // d_y workers = 2 ngpu = 1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ## measurement operator phi_np = np.random.randn(dim_x, dim_phi) phi_test = torch.Tensor(phi_np) iters = np.array(np.geomspace(10, 10, nIter), dtype=int) fname = '../test_images/{}.jpg'.format(test_img) image = io.imread(fname) x_test = resize(image, (I_x, I_y), anti_aliasing=True, preserve_range=True, mode='reflect') x_test_ = np.array(x_test) / 255. print(x_test_.shape) x_test = [] for i in range(n_img_plot_x): for j in range(n_img_plot_y): _x = x_test_[i * d_x:d_x * (i + 1), j * d_y:d_y * (j + 1)] x_test.append(_x) x_test = np.array(x_test) print(x_test.shape) test_images = torch.Tensor( np.transpose(x_test[:batch_size, :, :, :], [0, 3, 1, 2])) # vutils.imsave(test_images,[n_img_plot_x,n_img_plot_y],'cs_outs/gt_sample.png') # img_gt = vutils.make_grid(test_images,nrow=n_img_plot_y, padding=0, normalize=True) # vutils.save_image(img_gt,'outs/gt_sample.png') io.imsave('{}/gt.png'.format(savedir), (255 * x_test_).astype(np.uint8)) genPATH = './all_models/generator.pt' netG = Generator(ngpu=ngpu, nc=3).to(device) netG.apply(weights_init) if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) if os.path.isfile(genPATH): if device.type == 'cuda': netG.load_state_dict(torch.load(genPATH)) elif device.type == 'cpu': netG.load_state_dict( torch.load(genPATH, map_location=torch.device('cpu'))) else: raise Exception("Unable to load model to specified device") print("************ Generator weights restored! **************") netG.eval() criterion = nn.MSELoss() z_prior = torch.zeros(batch_size, nz, 1, 1, requires_grad=True, device=device) optimizerZ = optim.RMSprop([z_prior], lr=5e-3) real_cpu = test_images.to(device) for iters in range(nIter): optimizerZ.zero_grad() z2 = torch.clamp(z_prior, -1., 1.) fake = 0.5 * netG(z2) + 0.5 fake = nnf.interpolate(fake, size=(d_x, d_y), mode='bilinear', align_corners=False) cost = 0. for i in range(3): y_gt, y_est = cs_measure(real_cpu[:, i, :, :], fake[:, i, :, :], phi_test.to(device)) cost += criterion(y_gt, y_est) cost.backward() optimizerZ.step() if (iters % 50 == 0): with torch.no_grad(): z2 = torch.clamp(z_prior, -1., 1.) fake = 0.5 * netG(z2).detach().cpu() + 0.5 fake = nnf.interpolate(fake, size=(d_x, d_y), mode='bilinear', align_corners=False) G_imgs = np.transpose(fake.detach().cpu().numpy(), [0, 2, 3, 1]) imgest = merge(G_imgs, [n_img_plot_x, n_img_plot_y]) psnr = compare_psnr(x_test_, imgest, data_range=1.0) if USE_BM3D: merged_clean = bm3d(imgest, psd) psnr1 = compare_psnr(x_test_, merged_clean, data_range=1.0) print( 'Iter: {:d}, Error: {:.3f}, PSNR-raw: {:.3f}, PSNR-bm3d: {:.3f}' .format(iters, cost.item(), psnr, psnr1)) io.imsave( '{}/inv_solution_bm3d_iters_{}.png'.format( savedir, str(iters).zfill(4)), (255 * merged_clean).astype(np.uint8)) else: print('Iter: {:d}, Error: {:.3f}, PSNR-raw: {:.3f}'.format( iters, cost.item(), psnr)) io.imsave( '{}/inv_solution_iters_{}.png'.format( savedir, str(iters).zfill(4)), (255 * imgest).astype(np.uint8))
def main(t, sigma, args=None): # Experiment specifications #imagename = 'image_Lena512rgb.png' imagepath = args.path + '\\' imagename = imagepath + 'ref\\' + t + '_r.bmp' # Load noise-free image y = np.array(Image.open(imagename)) / 255 # Possible noise types to be generated 'gw', 'g1', 'g2', 'g3', 'g4', 'g1w', # 'g2w', 'g3w', 'g4w'. noise_type = 'gw' noise_var = (sigma / 255)**2 # Noise variance 25 std seed = 0 # seed for pseudorandom noise realization # Generate noise with given PSD noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, y.shape) # N.B.: For the sake of simulating a more realistic acquisition scenario, # the generated noise is *not* circulant. Therefore there is a slight # discrepancy between PSD and the actual PSD computed from infinitely many # realizations of this noise with different seeds. # Generate noisy image corrupted by additive spatially correlated noise # with noise power spectrum PSD z = np.atleast_3d(y) + np.atleast_3d(noise) z_rang = np.minimum(np.maximum(z, 0), 1) noisyimagename = imagepath + 'noisy\\' + t + '_g.bmp' plt.imsave(noisyimagename, z_rang) z = np.array(Image.open(noisyimagename)) / 255 # Call BM3D With the default settings. y_est = bm3d_rgb(z, psd) # To include refiltering: # y_est = bm3d_rgb(z, psd, 'refilter'); # For other settings, use BM3DProfile. # profile = BM3DProfile(); # equivalent to profile = BM3DProfile('np'); # profile.gamma = 6; # redefine value of gamma parameter # y_est = bm3d_rgb(z, psd, profile); # Note: For white noise, you may instead of the PSD # also pass a standard deviation # y_est = bm3d_rgb(z, sqrt(noise_var)); # If the different channels have varying PSDs, you can supply a MxNx3 PSD or a list of 3 STDs: # y_est = bm3d_rgb(z, np.concatenate((psd1, psd2, psd3), 2)) # y_est = bm3d_rgb(z, [sigma1, sigma2, sigma3]) psnr = get_psnr(y, y_est) print("PSNR:", psnr) # PSNR ignoring 16-pixel wide borders (as used in the paper), due to refiltering potentially leaving artifacts # on the pixels near the boundary of the image when noise is not circulant psnr_cropped = get_cropped_psnr(y, y_est, [16, 16]) print("PSNR cropped:", psnr_cropped) # Ignore values outside range for display (or plt gives an error for multichannel input) y_est = np.minimum(np.maximum(y_est, 0), 1) z_rang = np.minimum(np.maximum(z, 0), 1) opath = 'bm3d_{0}.bmp'.format(t) plt.imsave(opath.format(t), y_est) y_est = np.array(Image.open(opath.format(t))) / 255 psnr = get_psnr(y, y_est) print("PSNR 2:", psnr) mse = ((y_est - y)**2).mean() * 255 print("MSE:", mse) #plt.imsave(imagepath+ 'noisy\\' + t + '_g.bmp', z_rang) # TEST CV2 PSNR try: from skimage.metrics import structural_similarity as compare_ssim except Exception: from skimage.measure import compare_ssim import cv2 opath = 'bm3d_{0}.bmp'.format(t) argref = imagename # path of noisy image d = cv2.imread(opath) tref = cv2.imread(argref) (score, diff) = compare_ssim(tref, d, full=True, multichannel=True) psnr2 = cv2.PSNR(tref, d) print('#######################') print('CV2 PSNR, SSIM: {:.2f}, {:.2f}'.format(psnr2, score)) print('#######################') print('') #plt.title("y, z, y_est") #plt.imshow(np.concatenate((y, np.squeeze(z_rang), y_est), axis=1)) #plt.show() return psnr, mse
def GPP_solve(): USE_BM3D = True savedir = './outs_pt' genPATH = './all_models/grayscale_generator.pt' # genPATH = './all_models/grayscale_generator_v1-4-0.model' filename = 'Monarch' fname = '../test_images/{}.tif'.format(filename) if not os.path.exists(savedir): os.makedirs(savedir) I_y = 256 I_x = 256 d_x = d_y = 32 dim_x = d_x*d_y batch_size = (I_x*I_y)//(dim_x) n_measure = 0.1 nz = 100 dim_phi = int(n_measure*dim_x) nIter = 5001 n_img_plot_x = I_x//d_x n_img_plot_y = I_y//d_y workers = 2 ngpu = 1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ## measurement operator phi_np = np.random.randn(dim_x,dim_phi) phi_test = torch.Tensor(phi_np) if USE_BM3D: from bm3d import bm3d, BM3DProfile from experiment_funcs import get_experiment_noise noise_type = 'g0' noise_var = 0.02 # Noise variance seed = 0 # seed for pseudorandom noise realization # Generate noise with given PSD noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, [256,256]) ### def cs_measure(gt,est,phi): n_dim = gt.shape[2]* gt.shape[3] y_gt = torch.matmul(gt.view(-1,n_dim),phi) y_est = torch.matmul(est.view(-1,n_dim),phi) return y_gt,y_est x_test = Image.open(fname).convert(mode='L').resize((I_x,I_y)) x_test_ = np.expand_dims(np.array(x_test)/255.,axis=2) torch_gt = torch.Tensor(np.transpose(x_test_,[2,0,1])) io.imsave('{}/gt.png'.format(savedir),(255*x_test_[:,:,0]).astype(np.uint8)) # x_test_ = 2*x_test_-1 x_test = [] for i in range(n_img_plot_x): for j in range(n_img_plot_y): _x = x_test_[i*d_x:d_x*(i+1),j*d_y:d_y*(j+1)] x_test.append(_x) x_test = np.array(x_test) test_images = torch.Tensor(np.transpose(x_test[:batch_size,:,:,:],[0,3,1,2])) netG = Generator(ngpu=ngpu,nc=1).to(device) if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) netG.apply(weights_init) if os.path.isfile(genPATH): if device.type == 'cuda': netG.load_state_dict(torch.load(genPATH)) elif device.type=='cpu': netG.load_state_dict(torch.load(genPATH,map_location=torch.device('cpu'))) else: raise Exception("Unable to load model to specified device") print("************ Generator weights restored! **************") netG.eval() for param in netG.parameters(): param.requires_grad = False criterion = nn.MSELoss() z_prior = torch.zeros(batch_size,nz,1,1,requires_grad=True,device=device) optimizerZ = optim.RMSprop([z_prior], lr=5e-3) lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizerZ, gamma=0.9999) real_cpu = test_images.to(device) for iters in range(nIter): # z2 = torch.clamp(z_prior,-1.,1.) fake = 0.5*netG(z_prior)+0.5 fake = nnf.interpolate(fake, size=(d_x, d_y), mode='bilinear', align_corners=False) y_gt,y_est = cs_measure(real_cpu,fake,phi_test.to(device)) cost = criterion(y_gt,y_est) optimizerZ.zero_grad() cost.backward() optimizerZ.step() if (iters+1)%100==0: lr_scheduler.step() if (iters % 500 == 0): with torch.no_grad(): fake = 0.5*netG(z_prior).detach().cpu()+0.5 fake = nnf.interpolate(fake, size=(d_x, d_y), mode='bilinear', align_corners=False) G_imgs = np.transpose(fake.detach().cpu().numpy(),[0,2,3,1]) merged = merge(G_imgs,[n_img_plot_x,n_img_plot_y]) psnr0 = compare_psnr(x_test_[:,:,0],merged,data_range=1.0) if USE_BM3D: merged_clean = pybm3d.bm3d.bm3d(merged,0.25) # merged_clean = bm3d(merged,psd) psnr1 = compare_psnr(x_test_[:,:,0],merged_clean,data_range=1.0) display = merged_clean print('Iter: {:d}, Error: {:.3f}, PSNR: {:.3f}, PSNR-bm3d: {:.3f}, Current LR:{:.5f} '.format(iters,cost.item(),psnr0,psnr1,lr_scheduler.get_last_lr()[0])) io.imsave('{}/inv_solution_bm3d_iters_{}.png'.format(savedir,str(iters).zfill(4)),(255*merged_clean).astype(np.uint8)) else: print('PSNR and reconstructions are not processed with BM3D') display = merged psnr1 = psnr0 print('Iter: {:d}, Error: {:.3f}, PSNR: {:.3f}, Current LR:{:.5f} '.format(iters,cost.item(),psnr0,lr_scheduler.get_last_lr()[0])) io.imsave('{}/inv_solution_iters_{}.png'.format(savedir,str(iters).zfill(4)),(255*imgest).astype(np.uint8))
from experiment_funcs import get_experiment_noise, get_psnr, get_cropped_psnr bv = np.load('/Users/mskirk/data/Bradshaw/bv_simulation_high_fn_171.step1.npz', allow_pickle=True) imcube = bv['arr_0'] y = np.sqrt(imcube[:, :, -1]) y_cube = imcube / imcube.max() y = y / y.max() noise_type = 'g4w' noise_var = y.min() / 4. # Noise variance noise_cube_var = y_cube.min() / 4 seed = 0 # seed for pseudorandom noise realization noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, y.shape) noise_cube, psd_cube, kernel_cube = get_experiment_noise( noise_type, noise_cube_var, seed, y_cube.shape) # Generate noisy image corrupted by additive spatially correlated noise # with noise power spectrum PSD z = (np.atleast_3d(y) + np.atleast_3d(noise)) # Call BM3D With the default settings. y_est = bm3d(z, psd) # Note: For white noise, you may instead of the PSD # also pass a standard deviation # y_est = bm3d(z, sqrt(noise_var)); psnr = get_psnr(y, y_est)
def main(): # Experiment specifications # The multichannel example data is acquired from: http://www.bic.mni.mcgill.ca/brainweb/ # (C.A. Cocosco, V. Kollokian, R.K.-S. Kwan, A.C. Evans, # "BrainWeb: Online Interface to a 3D MRI Simulated Brain Database" # NeuroImage, vol.5, no.4, part 2/4, S425, 1997 # -- Proceedings of 3rd International Conference on Functional Mapping of the Human Brain, Copenhagen, May 1997. data_name = 'brainslice.mat' table_name = 'slice_sample' # Load noise-free image # Data should be in same shape as with Image.open, but channel count can be any (M x N x channels) # Noise-free data should be between 0 and 1. y = loadmat(data_name)[table_name] # Possible noise types to be generated 'gw', 'g1', 'g2', 'g3', 'g4', 'g1w', # 'g2w', 'g3w', 'g4w'. noise_type = 'g2' noise_var = 0.02 # Noise variance seed = 0 # seed for pseudorandom noise realization # Generate noise with given PSD noise, psd, kernel = get_experiment_noise(noise_type, noise_var, seed, y.shape) # N.B.: For the sake of simulating a more realistic acquisition scenario, # the generated noise is *not* circulant. Therefore there is a slight # discrepancy between PSD and the actual PSD computed from infinitely many # realizations of this noise with different seeds. # Generate noisy image corrupted by additive spatially correlated noise # with noise power spectrum PSD z = np.atleast_3d(y) + np.atleast_3d(noise) # Call BM3D With the default settings. # The call is identical to that of the grayscale BM3D. y_est = bm3d(z, psd) # To include refiltering: # y_est = bm3d(z, psd, 'refilter'); # For other settings, use BM3DProfile. # profile = BM3DProfile(); # equivalent to profile = BM3DProfile('np'); # profile.gamma = 6; # redefine value of gamma parameter # y_est = bm3d(z, psd, profile); # Note: For white noise, you may instead of the PSD # also pass a standard deviation # y_est = bm3d(z, sqrt(noise_var)); # Instead of passing a singular PSD, you may also pass equal number of PSDs to the channels: # y_est = bm3d(z, np.concatenate((psd1, psd2, psd3, psd4, psd5), 2)) # y_est = bm3d(z, [sigma1, sigma2, sigma3, sigma4, sigma5]) psnr = get_psnr(y, y_est) print("PSNR:", psnr) # PSNR ignoring 16-pixel wide borders (as used in the paper), due to refiltering potentially leaving artifacts # on the pixels near the boundary of the image when noise is not circulant psnr_cropped = get_cropped_psnr(y, y_est, [16, 16]) print("PSNR cropped:", psnr_cropped) # Ignore values outside range for display (or plt gives an error for multichannel input) y_est = np.minimum(np.maximum(y_est, 0), 1) z_rang = np.minimum(np.maximum(z, 0), 1) plt.title("y, z, y_est") disp_mat = np.concatenate([ np.concatenate( (y[:, :, i], np.squeeze(z_rang[:, :, i]), y_est[:, :, i]), axis=1) for i in range(y_est.shape[2]) ], axis=0) plt.imshow(disp_mat) plt.show()