def dehaze_comp_serial(im, A, comp_model, patch_X, patch_Y, stride_X, stride_Y, var_thr, edge_thr, angle_thr, angle_test_pixel_frac, alpha, start_t, end_t, t_tol): ''' Will return the predicted t(x) and the dehazed image using binary search ''' [nrow, ncol, nch] = im.shape numpixel = nrow * ncol # preallocate t_est = np.zeros((nrow, ncol), dtype='float32') count = np.zeros((nrow, ncol), dtype='float32') conf = np.zeros((nrow, ncol), dtype='float32') [x, y] = get_patch_indices(nrow, ncol, patch_X, patch_Y, stride_X, stride_Y) # estimate t in each patch for idx in xrange(x.size): for idy in xrange(y.size): patch = im[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y, :].copy() var_test = patch_variance_test(patch, var_thr) edge_test = patch_edge_test(patch, edge_thr) (airlight_angle_test, airlight_cos_dist) \ = patch_airlight_angle_test(patch, var_thr, angle_thr, A, angle_test_pixel_frac) if airlight_angle_test: pass if not (var_test and not edge_test and airlight_angle_test): continue (t_out, nconf) = search_t_binary(patch, comp_model, start_t, end_t, A, t_tol) # computed_conf = nconf*airlight_cos_dist computed_conf = nconf est_patch = t_est[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() c_patch = count[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() est_patch += t_out c_patch += 1 t_est[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = est_patch count[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = c_patch conf_patch = conf[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() conf_patch = np.maximum(conf_patch, computed_conf) conf[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = conf_patch nzidx = (count != 0) t_est[nzidx] = t_est[nzidx] / count[nzidx] # Now need to interpolate t L = get_laplacian_4neigh(im, longrange=True) # L is a sparse matrix not np.ndarray # it has zero at places with no estimates, i.e. the discarded patches sigma = sparse.dia_matrix((conf.flatten(), [0]), shape=(numpixel, numpixel), dtype='float32') # t_interp_col = splinalg.spsolve(sigma + alpha * L, sigma*t_est.flatten()) # cholesky is to be faster than spsolve chol = cholesky(sigma + alpha * L) t_interp_col = chol.solve_A(sigma * t_est.flatten()) t_interp = t_interp_col.reshape((nrow, ncol)) t_interp = np.clip(t_interp, 0, 1) t_est = np.clip(t_est, 0, 1) # recover A = A.reshape((1, 1, 3)) out_im = A + (im - A) / np.tile(t_interp[:, :, np.newaxis], (1, 1, 3)) out_im = np.clip(out_im, 0, 1) return (out_im, t_est, t_interp)
def dehaze_comp2(im, A, comp_model, patch_X, patch_Y, stride_X, stride_Y, var_thr, edge_thr, angle_thr, angle_test_pixel_frac, alpha, start_t, end_t, t_tol): ''' Modifies dehaze_comp to make the inferences in batches for speed-up ''' [nrow, ncol, nch] = im.shape # preallocate t_est = np.zeros((nrow, ncol), dtype='float32') count = np.zeros((nrow, ncol), dtype='float32') conf = np.zeros((nrow, ncol), dtype='float32') [x, y] = get_patch_indices(nrow, ncol, patch_X, patch_Y, stride_X, stride_Y) # total number of patches before discarding numpatch = x.size * y.size im_patches = np.zeros((numpatch, patch_X, patch_Y, nch), dtype='float32') patch_used = np.ones((numpatch, ), dtype='bool') # dump the patches sel_patch_idx = 0 patch_idx = -1 for idx in xrange(x.size): for idy in xrange(y.size): patch = im[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y, :].copy() patch_idx += 1 # discard patches here var_test = patch_variance_test(patch, var_thr) edge_test = patch_edge_test(patch, edge_thr) # take patch if it passes variance test and fails edge_test discard = not var_test or edge_test if discard: patch_used[patch_idx] = False continue im_patches[sel_patch_idx, :, :, :] = patch sel_patch_idx += 1 patches = im_patches[:sel_patch_idx, :, :, :] # find t's only in the extracted patches (t_out_patches, nconf_patches) \ = search_t_binary_batch(patches, comp_model, start_t, end_t, A, t_tol) # move from patches to image. Aggregate sel_patch_idx = 0 # reusing patch_idx = -1 for idx in xrange(x.size): for idy in xrange(y.size): patch_idx += 1 if not patch_used[patch_idx]: continue est_patch = t_est[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() c_patch = count[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() conf_patch = conf[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y].copy() # update est_patch += t_out_patches[sel_patch_idx] c_patch += 1 conf_patch = np.maximum(conf_patch, nconf_patches[sel_patch_idx]) sel_patch_idx += 1 t_est[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = est_patch count[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = c_patch conf[x[idx]:x[idx] + patch_X, y[idy]:y[idy] + patch_Y] = conf_patch # average the aggregated t nzidx = (count != 0) t_est[nzidx] = t_est[nzidx] / count[nzidx] t_est = np.clip(t_est, 0, 1) # interpolate t t_interp = t_interp_laplacian(t_est, conf, im, alpha) # recover A = A.reshape((1, 1, 3)) out_im = A + (im - A) / np.tile(t_interp[:, :, np.newaxis], (1, 1, 3)) out_im = np.clip(out_im, 0, 1) return (out_im, t_est, t_interp)