def test_extract_pixel_feature(): # Make and test 2D image sm = 0.5 * np.ones((4, 5)) sm[0, 0] = 0 lg = 0.3 * np.ones((7, 10)) lg[0, 0] = 1 c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) sm_0 = np.array([[ 0, 0, 0.5], [ 0, 0, 0.5], [0.5, 0.5, 0.5]]) lg_0 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3]]) im_padded = pad_img_pair(sm, lg, c) # First test full feature correct_feat_0_0 = np.hstack([sm_0.flatten(), lg_0.flatten()]) feat = extract_pixel_feature(im_padded, (0, 0), c, full_feat=True) assert(np.allclose(feat, correct_feat_0_0)) # Now test half feature correct_feat_0_0 = np.hstack([sm_0.flatten(), lg_0.flatten()[:c.n_half]]) feat = extract_pixel_feature(im_padded, (0, 0), c, full_feat=False) assert(np.allclose(feat, correct_feat_0_0))
def test_extract_pixel_feature(): # Make and test 2D image sm = 0.5 * np.ones((4, 5)) sm[0, 0] = 0 lg = 0.3 * np.ones((7, 10)) lg[0, 0] = 1 c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) sm_0 = np.array([[0, 0, 0.5], [0, 0, 0.5], [0.5, 0.5, 0.5]]) lg_0 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3]]) im_padded = pad_img_pair(sm, lg, c) # First test full feature correct_feat_0_0 = np.hstack([sm_0.flatten(), lg_0.flatten()]) feat = extract_pixel_feature(im_padded, (0, 0), c, full_feat=True) assert (np.allclose(feat, correct_feat_0_0)) # Now test half feature correct_feat_0_0 = np.hstack([sm_0.flatten(), lg_0.flatten()[:c.n_half]]) feat = extract_pixel_feature(im_padded, (0, 0), c, full_feat=False) assert (np.allclose(feat, correct_feat_0_0))
def test_best_coherence_match(): # make A_pd, Ap_pd, BBp_feat, s A_orig = plt.imread('./test_images/test_best_coherence_match_A.jpg') Ap_orig = plt.imread('./test_images/test_best_coherence_match_Ap.jpg') A = convert_to_YIQ( A_orig/255.)[:, :, 0] Ap = convert_to_YIQ(Ap_orig/255.)[:, :, 0] A_pyr = compute_gaussian_pyramid( A, min_size=3) Ap_pyr = compute_gaussian_pyramid(Ap, min_size=3) imh, imw = A.shape[:2] c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(A) c.max_levels = len(A_pyr) A_pd = pad_img_pair( A_pyr[-2], A_pyr[-1], c) Ap_pd = pad_img_pair(Ap_pyr[-2], Ap_pyr[-1], c) flann, flann_params, As, As_size = create_index(A_pyr, Ap_pyr, c) # BBp_feat cases: all corners and middle indices = [(1, 1), (1, imw - 1), (imh - 1, 1), (imh - 1, imw - 1), (np.floor(imh/2.).astype(int), np.floor(imw/2.).astype(int))] for row, col in indices: num_px = row * imw + col s_rows = np.random.random_integers(num_px, size=num_px) - 1 s_cols = np.random.random_integers(num_px, size=num_px) - 1 s = [(rr, cc) for rr, cc in zip(s_rows, s_cols)] s[(row - 1) * imw + col - 1] = (row - 1, col - 1) Bs_feat = np.hstack([extract_pixel_feature( A_pd, (row, col), c, full_feat=True), extract_pixel_feature(Ap_pd, (row, col), c, full_feat=False)]) p_coh_orig, r_star_orig = best_coherence_match_orig(A_pd, Ap_pd, Bs_feat, s, (row, col, imw), c) p_coh_new, r_star_new = best_coherence_match(As[-1], A.shape, Bs_feat, s, (row, col, imw), c) try: assert(p_coh_orig == (row, col)) assert(p_coh_new == (row, col)) except: print('row, col, p_coh_orig, p_coh_new, s', row, col, p_coh_orig, p_coh_new, s) As_feat = np.hstack([extract_pixel_feature( A_pd, p_coh_orig, p_coh_new, c, full_feat=True), extract_pixel_feature(Ap_pd, p_coh_orig, p_coh_new, c, full_feat=False)]) print('As_feat', As_feat) print('Bs_feat', Bs_feat) assert(False)
def test_compute_feature_array(): # Make and test 2D image sm = 0.5 * np.ones((4, 5)) sm[0, 0] = 0 lg = 0.3 * np.ones((7, 10)) lg[0, 0] = 1 # First test a full feature c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) feat = compute_feature_array([sm, lg], c, full_feat=True) sm_0 = np.array([[0, 0, 0.5], [0, 0, 0.5], [0.5, 0.5, 0.5]]) lg_0 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3]]) correct_feat_0 = np.hstack([sm_0.flatten(), lg_0.flatten()]) assert (len(feat) == 2) assert (feat[0] == []) assert (feat[1].shape == (7 * 10, 9 + 25)) assert (np.allclose(feat[1][0], correct_feat_0)) # Next test a half feature feat = compute_feature_array([sm, lg], c, full_feat=False) correct_feat_0 = np.hstack([sm_0.flatten(), lg_0.flatten()[:c.n_half]]) assert (len(feat) == 2) assert (feat[0] == []) assert (feat[1].shape == (7 * 10, 9 + c.n_half)) assert (np.allclose(feat[1][0], correct_feat_0)) # Make and test 3D image sm = 0.5 * np.ones((4, 5, 3)) sm[0, 0, :] = 0 lg = 0.3 * np.ones((7, 10, 3)) lg[0, 0] = 1 # First test a full feature c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) feat = compute_feature_array([sm, lg], c, full_feat=True) sm_3d_0 = np.dstack([sm_0, sm_0, sm_0]) lg_3d_0 = np.dstack([lg_0, lg_0, lg_0]) correct_feat_0 = np.hstack([sm_3d_0.flatten(), lg_3d_0.flatten()]) assert (len(feat) == 2) assert (feat[0] == []) assert (feat[1].shape == (7 * 10, (9 + 25) * 3)) assert (np.allclose(feat[1][0], correct_feat_0)) # Next test a half feature feat = compute_feature_array([sm, lg], c, full_feat=False) correct_feat_0 = np.hstack( [sm_3d_0.flatten(), lg_3d_0.flatten()[:c.n_half * 3]]) assert (len(feat) == 2) assert (feat[0] == []) assert (feat[1].shape == (7 * 10, (9 + c.n_half) * 3)) assert (np.allclose(feat[1][0], correct_feat_0))
def test_best_coherence_match(): # make A_pd, Ap_pd, BBp_feat, s A_orig = plt.imread('./test_images/test_best_coherence_match_A.jpg') Ap_orig = plt.imread('./test_images/test_best_coherence_match_Ap.jpg') A = convert_to_YIQ(A_orig / 255.)[:, :, 0] Ap = convert_to_YIQ(Ap_orig / 255.)[:, :, 0] A_pyr = compute_gaussian_pyramid(A, min_size=3) Ap_pyr = compute_gaussian_pyramid(Ap, min_size=3) imh, imw = A.shape[:2] c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(A) c.max_levels = len(A_pyr) A_pd = pad_img_pair(A_pyr[-2], A_pyr[-1], c) Ap_pd = pad_img_pair(Ap_pyr[-2], Ap_pyr[-1], c) flann, flann_params, As, As_size = create_index(A_pyr, Ap_pyr, c) # BBp_feat cases: all corners and middle indices = [(1, 1), (1, imw - 1), (imh - 1, 1), (imh - 1, imw - 1), (np.floor(imh / 2.).astype(int), np.floor(imw / 2.).astype(int)) ] for row, col in indices: num_px = row * imw + col s_rows = np.random.random_integers(num_px, size=num_px) - 1 s_cols = np.random.random_integers(num_px, size=num_px) - 1 s = [(rr, cc) for rr, cc in zip(s_rows, s_cols)] s[(row - 1) * imw + col - 1] = (row - 1, col - 1) Bs_feat = np.hstack([ extract_pixel_feature(A_pd, (row, col), c, full_feat=True), extract_pixel_feature(Ap_pd, (row, col), c, full_feat=False) ]) p_coh_orig, r_star_orig = best_coherence_match_orig( A_pd, Ap_pd, Bs_feat, s, (row, col, imw), c) p_coh_new, r_star_new = best_coherence_match(As[-1], A.shape, Bs_feat, s, (row, col, imw), c) try: assert (p_coh_orig == (row, col)) assert (p_coh_new == (row, col)) except: print('row, col, p_coh_orig, p_coh_new, s', row, col, p_coh_orig, p_coh_new, s) As_feat = np.hstack([ extract_pixel_feature(A_pd, p_coh_orig, p_coh_new, c, full_feat=True), extract_pixel_feature(Ap_pd, p_coh_orig, p_coh_new, c, full_feat=False) ]) print('As_feat', As_feat) print('Bs_feat', Bs_feat) assert (False)
def img_setup(A_fname, Ap_fname_list, B_fname, out_path, c): if not os.path.exists(out_path): os.makedirs(out_path) A_orig = plt.imread(A_fname) B_orig = plt.imread(B_fname) assert(len(A_orig.shape) == len(B_orig.shape)) # same number of channels (for now) Ap_orig_list = [] for Ap_fname in Ap_fname_list: Ap_orig = plt.imread(Ap_fname) assert(A_orig.shape == Ap_orig.shape) # src alignment Ap_orig_list.append(Ap_orig) # Make sure all images are floats on 0 to 1 scale scales = [] for img in [A_orig, B_orig, Ap_orig[0]]: if np.max(img) > 1.0: scales.append(255.) else: scales.append(1.0) # Do conversions if c.convert: A_yiq = convert_to_YIQ( A_orig/scales[0]) B_yiq = convert_to_YIQ( B_orig/scales[1]) A = A_yiq[:, :, 0] B = B_yiq[:, :, 0] Ap_yiq_list = [] Ap_list = [] for Ap_orig in Ap_orig_list: Ap_yiq_list.append(convert_to_YIQ(Ap_orig/scales[2])) Ap_list.append(Ap_yiq_list[-1][:, :, 0]) else: A = A_orig/scales[0] B = B_orig/scales[1] Ap_list = [] for Ap_orig in Ap_orig_list: Ap_list.append(Ap_orig/scales[2]) # Process input images if c.remap_lum: A, Ap_list = remap_luminance(A, Ap_list, B) if not c.init_rand: B_orig_pyr = compute_gaussian_pyramid(B, c.n_sm) A, B = compress_values(A, B, c.AB_weight) c.num_ch, c.padding_sm, c.padding_lg, c.weights = setup_vars(A) # Create Pyramids A_pyr = compute_gaussian_pyramid(A, c.n_sm) B_pyr = compute_gaussian_pyramid(B, c.n_sm) Ap_pyr_list = [] for Ap in Ap_list: Ap_pyr_list.append(compute_gaussian_pyramid(Ap, c.n_sm)) if c.convert: color_pyr_list = [compute_gaussian_pyramid(B_yiq, c.n_sm)] else: color_pyr_list = [compute_gaussian_pyramid(Ap_orig, c.n_sm) for Ap_orig in Ap_list] if len(A_pyr) != len(B_pyr): c.max_levels = min(len(A_pyr), len(B_pyr)) warnings.warn('Warning: input images are very different sizes! The minimum number of levels will be used.') else: c.max_levels = len(B_pyr) # Create Random Initialization of Bp if c.init_rand: Bp_pyr = initialize_Bp(B_pyr, init_rand=True) else: Bp_pyr = initialize_Bp(B_orig_pyr, init_rand=False) return A_pyr, Ap_pyr_list, B_pyr, Bp_pyr, color_pyr_list, c
def test_compute_feature_array(): # Make and test 2D image sm = 0.5 * np.ones((4, 5)) sm[0, 0] = 0 lg = 0.3 * np.ones((7, 10)) lg[0, 0] = 1 # First test a full feature c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) feat = compute_feature_array([sm, lg], c, full_feat=True) sm_0 = np.array([[ 0, 0, 0.5], [ 0, 0, 0.5], [0.5, 0.5, 0.5]]) lg_0 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 1, 1, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, 0.3]]) correct_feat_0 = np.hstack([sm_0.flatten(), lg_0.flatten()]) assert(len(feat) == 2) assert(feat[0] == []) assert(feat[1].shape == (7 * 10, 9 + 25)) assert(np.allclose(feat[1][0], correct_feat_0)) # Next test a half feature feat = compute_feature_array([sm, lg], c, full_feat=False) correct_feat_0 = np.hstack([sm_0.flatten(), lg_0.flatten()[:c.n_half]]) assert(len(feat) == 2) assert(feat[0] == []) assert(feat[1].shape == (7 * 10, 9 + c.n_half)) assert(np.allclose(feat[1][0], correct_feat_0)) # Make and test 3D image sm = 0.5 * np.ones((4, 5, 3)) sm[0, 0, :] = 0 lg = 0.3 * np.ones((7, 10, 3)) lg[0, 0] = 1 # First test a full feature c.num_ch, c.padding_sm, c.padding_lg, c.weights = c.setup_vars(lg) feat = compute_feature_array([sm, lg], c, full_feat=True) sm_3d_0 = np.dstack([sm_0, sm_0, sm_0]) lg_3d_0 = np.dstack([lg_0, lg_0, lg_0]) correct_feat_0 = np.hstack([sm_3d_0.flatten(), lg_3d_0.flatten()]) assert(len(feat) == 2) assert(feat[0] == []) assert(feat[1].shape == (7 * 10, (9 + 25) * 3)) assert(np.allclose(feat[1][0], correct_feat_0)) # Next test a half feature feat = compute_feature_array([sm, lg], c, full_feat=False) correct_feat_0 = np.hstack([sm_3d_0.flatten(), lg_3d_0.flatten()[:c.n_half * 3]]) assert(len(feat) == 2) assert(feat[0] == []) assert(feat[1].shape == (7 * 10, (9 + c.n_half) * 3)) assert(np.allclose(feat[1][0], correct_feat_0))
def img_setup(A_fname, Ap_fname_list, B_fname, out_path, c): if not os.path.exists(out_path): os.makedirs(out_path) A_orig = plt.imread(A_fname) B_orig = plt.imread(B_fname) assert (len(A_orig.shape) == len(B_orig.shape) ) # same number of channels (for now) Ap_orig_list = [] for Ap_fname in Ap_fname_list: Ap_orig = plt.imread(Ap_fname) assert (A_orig.shape == Ap_orig.shape) # src alignment Ap_orig_list.append(Ap_orig) # Make sure all images are floats on 0 to 1 scale scales = [] for img in [A_orig, B_orig, Ap_orig[0]]: if np.max(img) > 1.0: scales.append(255.) else: scales.append(1.0) # Do conversions if c.convert: A_yiq = convert_to_YIQ(A_orig / scales[0]) B_yiq = convert_to_YIQ(B_orig / scales[1]) A = A_yiq[:, :, 0] B = B_yiq[:, :, 0] Ap_yiq_list = [] Ap_list = [] for Ap_orig in Ap_orig_list: Ap_yiq_list.append(convert_to_YIQ(Ap_orig / scales[2])) Ap_list.append(Ap_yiq_list[-1][:, :, 0]) else: A = A_orig / scales[0] B = B_orig / scales[1] Ap_list = [] for Ap_orig in Ap_orig_list: Ap_list.append(Ap_orig / scales[2]) # Process input images if c.remap_lum: A, Ap_list = remap_luminance(A, Ap_list, B) if not c.init_rand: B_orig_pyr = compute_gaussian_pyramid(B, c.n_sm) A, B = compress_values(A, B, c.AB_weight) c.num_ch, c.padding_sm, c.padding_lg, c.weights = setup_vars(A) # Create Pyramids A_pyr = compute_gaussian_pyramid(A, c.n_sm) B_pyr = compute_gaussian_pyramid(B, c.n_sm) Ap_pyr_list = [] for Ap in Ap_list: Ap_pyr_list.append(compute_gaussian_pyramid(Ap, c.n_sm)) if c.convert: color_pyr_list = [compute_gaussian_pyramid(B_yiq, c.n_sm)] else: color_pyr_list = [ compute_gaussian_pyramid(Ap_orig, c.n_sm) for Ap_orig in Ap_list ] if len(A_pyr) != len(B_pyr): c.max_levels = min(len(A_pyr), len(B_pyr)) warnings.warn( 'Warning: input images are very different sizes! The minimum number of levels will be used.' ) else: c.max_levels = len(B_pyr) # Create Random Initialization of Bp if c.init_rand: Bp_pyr = initialize_Bp(B_pyr, init_rand=True) else: Bp_pyr = initialize_Bp(B_orig_pyr, init_rand=False) return A_pyr, Ap_pyr_list, B_pyr, Bp_pyr, color_pyr_list, c