# parameters n = 256 m = 8 image_size = (n, n) patch_size = (m, m) step = 4 patches_recto = [] patches_verso = [] # Extract reference patches from the images print('Extracting reference patches...') for i in range(10): image1, image2 = load_images('../images/set' + str(i + 1) + '_pic1.png', '../images/set' + str(i + 1) + '_pic2.png', 256, show_images=False) patches1 = patchify(image1, patch_size, step) patches2 = patchify(image2, patch_size, step) initial_patch_size = patches1.shape patches1 = patches1.reshape(-1, patch_size[0] * patch_size[1]) patches2 = patches2.reshape(-1, patch_size[0] * patch_size[1]) patches_recto.append(patches1) patches_recto.append(patches2) patches_recto = np.reshape(patches_recto, (-1, m * m)) patches_recto -= np.mean(patches_recto, axis=0) # remove the mean patches_recto /= np.std(patches_recto, axis=0) # normalize each patch
t = np.linspace(1, max_it, int(max_it / 5)) d = 25 method = 'single_dictionary_learning_filtered' #f.write(f'\nsigma = {sigma}, degrees = {d}, iterations = {max_it}\n method = {method}') #f.write(f'\n\nLearned on 15 images from images, tested on images hard set 3') patches_recto = [] patches_verso = [] # Extract reference patches from the first images print('Extracting reference patches...') for i in range(15): image1, image2 = load_images(f'./train_images_bm3d_v/set{i+1}_pic1.png', f'./train_images_bm3d_v/set{i+1}_pic2.png', 256, show_images=False) patches1 = patchify(image1, patch_size, step) initial_patch_size = patches1.shape patches1 = patches1.reshape(-1, patch_size[0] * patch_size[1]) patches_recto.append(patches1) patches2 = patchify(image2, patch_size, step) patches2 = patches2.reshape(-1, patch_size[0] * patch_size[1]) patches_recto.append(patches2) patches_recto = np.reshape(patches_recto, (-1, m * m)) patches_recto -= np.mean(patches_recto, axis=0) # remove the mean patches_recto /= np.std(patches_recto, axis=0) # normalize each patch print('Learning the recto dictionary...')
This function does the whitening projection with PCA """ R = np.dot(S, S.T) W = la.sqrtm(np.linalg.inv(R)) return W, np.dot(W, S) n = 256 image_size = (n, n) max_it = 100 d = 45 i = 4 source1, source2 = load_images(f'../images_hard/set{i+1}_pic1.png', f'../images_hard/set{i+1}_pic2.png', 256, show_images=False) theta = np.radians(d) mixing_matrix = [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] sources, mixtures = linear_mixture(source1, source2, mixing_matrix=mixing_matrix, show_images=False) W_whiten, mixtures = whiten_projection(mixtures) mix1, mix2 = unflatten(mixtures, image_size) plt.figure() plt.suptitle(f'Mixtures, theta = {d}')
from mixing_models import load_images, linear_mixture from functions import TV_proj, flatten, unflatten, whiten_projection, learn_dictionary, dictionary_projection from skimage.metrics import structural_similarity as ssim from skimage.util import random_noise from skimage.restoration import denoise_tv_chambolle n = 256 m = 8 image_size = (n, n) patch_size = (m, m) step = 4 n_coef = 2 # 2 images source1, source2 = load_images('../../images_hard/set2_pic1.png', '../../images_hard/set2_pic2.png', n=256, show_images=False) print('SSIM of the clean image with itself =', ssim(source1, source1)) # add gaussian noise sigma = 0.1 noisy_image1 = random_noise(source1, var=sigma**2) #var = 0.01 by default noisy_image2 = random_noise(source2, var=sigma**2) #var = 0.01 by default print( '\nSSIM of source 1 with its noisy one =', ssim(source1, noisy_image1, data_range=noisy_image1.max() - noisy_image1.min())) print( 'SSIM of source 2 with its noisy one =',