def search_t_binary(haze_patch, comparator, start_t, end_t, A, step_tol): ''' The main difference from method2 is the step size is halved after each step and the remaining part is changed accordingly ''' # assuming start_t < end_t t_begin = start_t t_end = end_t conf = 1 # abs() is not required as t_end > t_begin. Always. while (t_end - t_begin) > step_tol and \ (t_end > t_begin): t_mid = (t_begin + t_end) / 2.0 p_d = dehaze_patch(haze_patch, t_mid, A) [a, b] = comp_patches(comparator, haze_patch, p_d) if a > b: # the dehazed one is bad # no need to search below t_mid t_begin = t_mid else: t_end = t_mid # need to handle out of range values and t_end < t_begin case if t_begin > t_end: conf = 0 # this does not have check, as it is done in search_t_binary2 # whether the t tries to go out of range t_mid = (t_begin + t_end) / 2.0 return (t_mid, conf)
# the t_comp's should not be too close dist = pdist(np.reshape(t_comp, (-1, 1)), metric='minkowski', p=1) if np.any(dist < t_step): continue in_index = idx * n_in_bad_p[idx] bad_t[in_index:in_index + n_in_bad_p[idx]] = t_comp break # print bad_t assert (np.all(bad_t < t_g)) for t_good in good_t: d_p = dehaze_patch(haze_patch, t_good, A) add_patch(d_p, haze_patch, X_all, Y_all, X_idx) A_all[X_idx, :, :, :] = A X_idx += 1 assert (np.sum((haze_patch - d_p)**2) != 0) for t_bad in bad_t: d_p = dehaze_patch(haze_patch, t_bad, A) add_patch(haze_patch, d_p, X_all, Y_all, X_idx) A_all[X_idx, :, :, :] = A X_idx += 1 assert (np.sum((haze_patch - d_p)**2) != 0) t_all[i, j, 0] = t_g