def test_reshaping(): a = np.random.randint(0, 10000, size=(30, 30, 30)) assert_allclose( a, col2im_nd(im2col_nd(a, (2, 2, 2), (1, 1, 1)), (2, 2, 2), (30, 30, 30), (1, 1, 1))) assert_allclose( a, col2im_nd(im2col_nd(a, (3, 3, 3), (2, 2, 2)), (3, 3, 3), (30, 30, 30), (2, 2, 2))) b = np.random.randint(0, 10000, size=(30, 30, 30, 10)) assert_allclose( b, col2im_nd(im2col_nd(b, (2, 2, 2, 10), (1, 1, 1, 1)), (2, 2, 2, 10), (30, 30, 30, 10), (1, 1, 1, 1))) assert_allclose( b, col2im_nd(im2col_nd(b, (3, 3, 3, 10), (2, 2, 2, 1)), (3, 3, 3, 10), (30, 30, 30, 10), (2, 2, 2, 1))) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (3, 3, 3), (2, 2, 2)) redo = col2im_nd(out, (3, 3, 3), (10, 10, 10), (2, 2, 2)) assert_allclose(a, redo) out = extract_patches(a, (3, 3, 3), (1, 1, 1)).reshape(-1, 3**3).T redo = col2im_nd(out, (3, 3, 3), (10, 10, 10), (2, 2, 2)) assert_allclose(a, redo) a = np.random.rand(100000).reshape(10, 100, 10, 10) out = extract_patches(a, (3, 3, 3, 10), (1, 1, 1, 10)).reshape(-1, 3**3 * 10).T redo = col2im_nd(out, (3, 3, 3, 10), (10, 100, 10, 10), (2, 2, 2, 0)) assert_allclose(a, redo) out = im2col_nd(a, (3, 3, 3, 10), (2, 2, 2, 0)) redo = col2im_nd(out, (3, 3, 3, 10), (10, 100, 10, 10), (2, 2, 2, 0)) assert_allclose(a, redo) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (2, 2, 2), (0, 0, 0)) redo = col2im_nd(out, (2, 2, 2), (10, 10, 10), (0, 0, 0)) assert_allclose(a, redo) out = extract_patches(a, (2, 2, 2), (2, 2, 2)).reshape(-1, 2**3).T redo = col2im_nd(out, (2, 2, 2), (10, 10, 10), (0, 0, 0)) assert_allclose(a, redo) a = np.random.rand(10000).reshape(10, 10, 10, 10) out = im2col_nd(a, (2, 2, 2, 10), (0, 0, 0, 0)) redo = col2im_nd(out, (2, 2, 2, 10), (10, 10, 10, 10), (0, 0, 0, 0)) assert_allclose(a, redo) out = extract_patches(a, (2, 2, 2, 10), (1, 1, 1, 10)).reshape(-1, 2**3 * 10).T redo = col2im_nd(out, (2, 2, 2, 10), (10, 10, 10, 10), (0, 0, 0, 0)) assert_allclose(a, redo)
def test_extract_patches_strided(): image_shapes_1D = [(10, ), (10, ), (11, ), (10, )] patch_sizes_1D = [(1, ), (2, ), (3, ), (8, )] patch_steps_1D = [(1, ), (1, ), (4, ), (2, )] expected_views_1D = [(10, ), (9, ), (3, ), (2, )] last_patch_1D = [(10, ), (8, ), (8, ), (2, )] image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D expected_views = expected_views_1D + expected_views_2D + expected_views_3D last_patches = last_patch_1D + last_patch_2D + last_patch_3D for (image_shape, patch_size, patch_step, expected_view, last_patch) in zip(image_shapes, patch_sizes, patch_steps, expected_views, last_patches): image = np.arange(np.prod(image_shape)).reshape(image_shape) patches = extract_patches(image, patch_size, patch_step, flatten=False) ndim = len(image_shape) assert patches.shape[:ndim] == expected_view last_patch_slices = tuple( slice(i, i + j, None) for i, j in zip(last_patch, patch_size)) assert (patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze()).all()
def local_denoise(data, block_size, overlap, variance, n_iter=10, mask=None, dtype=np.float64, n_cores=-1, use_threading=False, verbose=False): if verbose: logger.setLevel(logging.INFO) if mask is None: mask = np.ones(data.shape[:-1], dtype=np.bool) X = extract_patches(data, block_size, [1, 1, 1, block_size[-1]]).reshape( -1, np.prod(block_size)).T # Solving for D param_alpha = {} param_alpha['pos'] = True param_alpha['mode'] = 1 param_D = {} param_D['verbose'] = False param_D['posAlpha'] = True param_D['posD'] = True param_D['mode'] = 2 param_D['lambda1'] = 1.2 / np.sqrt(np.prod(block_size)) param_D['K'] = int(2 * np.prod(block_size)) param_D['iter'] = 150 param_D['batchsize'] = 500 param_D['numThreads'] = n_cores if 'D' in param_alpha: param_D['D'] = param_alpha['D'] mask_col = extract_patches(mask, block_size[:-1], (1, 1, 1), flatten=False) axis = tuple(range(mask_col.ndim // 2, mask_col.ndim)) train_idx = np.sum(mask_col, axis=axis).ravel() > (np.prod(block_size[:-1]) / 2.) train_data = np.asfortranarray(X[:, train_idx]) train_data /= np.sqrt(np.sum(train_data**2, axis=0, keepdims=True), dtype=dtype) param_alpha['D'] = spams.trainDL(train_data, **param_D) param_alpha['D'] /= np.sqrt( np.sum(param_alpha['D']**2, axis=0, keepdims=True, dtype=dtype)) param_D['D'] = param_alpha['D'] del train_idx, train_data, X, mask_col if use_threading or (n_cores == 1): param_alpha['numThreads'] = n_cores param_D['numThreads'] = n_cores else: param_alpha['numThreads'] = 1 param_D['numThreads'] = 1 arglist = ((data[:, :, k:k + block_size[2]], mask[:, :, k:k + block_size[2]], variance[:, :, k:k + block_size[2]], block_size, overlap, param_alpha, param_D, dtype, n_iter) for k in range(data.shape[2] - block_size[2] + 1)) if use_threading: data_denoised = starmap(processer, arglist) else: time_multi = time() data_denoised = Parallel(n_jobs=n_cores, verbose=verbose)(delayed(processer)(*args) for args in arglist) logger.info('Multiprocessing done in {0:.2f} mins.'.format( (time() - time_multi) / 60.)) # Put together the multiprocessed results data_subset = np.zeros_like(data, dtype=np.float32) divider = np.zeros_like(data, dtype=np.int16) for k, content in enumerate(data_denoised): data_subset[:, :, k:k + block_size[2]] += content divider[:, :, k:k + block_size[2]] += 1 data_subset /= divider return data_subset
def estimate_from_nmaps(data, size=5, return_mask=True, method='moments', full=False, ncores=-1, use_rejection=False, verbose=0): '''Given the data, estimates parameters of the gamma distribution in small 3D windows. input ------ data : noise maps to use for parameter estimation. optional -------- size : size of the 3D windows (default 5) return_mask : bool, if True returns the identified noise voxels as a mask method='moments' or method='maxlk' : which algorithm to use to estimate sigma and N full : bool, if True estimates are made in overlapping windows ncores : int, number of cores to use for multiprocessing use_rejection : if True, iterate to reject voxels in each estimated window, but this is much slower than just using all of the data. verbose : print progress for joblib, can be an integer to increase verbosity output ------- sigma, N, mask (optional) ''' m_out = np.zeros(data.shape[:-1], dtype=np.bool) median = np.median(data) if median == 0: median = np.median(data[data > 0]) if full: reshaped_maps = extract_patches(data, (size, size, size, data.shape[-1]), (1, 1, 1, data.shape[-1]), flatten=False) sigma = np.zeros(data.shape[:-1], dtype=np.float32) N = np.zeros(data.shape[:-1], dtype=np.float32) count = np.zeros(data.shape[:-1], dtype=np.int32) mask = np.zeros(data.shape[:-1], dtype=np.int32) indexer = list(np.ndindex(reshaped_maps.shape[:reshaped_maps.ndim//2 - 1])) output = Parallel(n_jobs=ncores, verbose=verbose)(delayed(proc_inner)(reshaped_maps[i], median, size, method, use_rejection) for i in indexer) indexer = (np.index_exp[idx[0]:idx[0] + size, idx[1]:idx[1] + size, idx[2]:idx[2] + size] for idx in indexer) # We accumulate the value at each voxel then take the average over the overlap for idx, (s, n, m) in zip(indexer, output): sigma[idx] += s N[idx] += n mask[idx] = m.sum() count[idx] += 1 sigma /= count N /= count if return_mask: return sigma, N, mask return sigma, N else: reshaped_maps = extract_patches(data, (size, size, size, data.shape[-1]), (size, size, size, data.shape[-1])) sigma = np.zeros(reshaped_maps.shape[0], dtype=np.float32) N = np.zeros(reshaped_maps.shape[0], dtype=np.float32) mask = np.zeros((reshaped_maps.shape[0], size**3), dtype=np.bool) output = Parallel(n_jobs=ncores, verbose=verbose)(delayed(proc_inner)(reshaped_maps[i], median, size, method, use_rejection) for i in range(reshaped_maps.shape[0])) for i, (s, n, m) in enumerate(output): sigma[i] = s N[i] = n mask[i] = np.squeeze(m).sum(axis=-1) s_out = sigma.reshape(data.shape[0] // size, data.shape[1] // size, data.shape[2] // size) N_out = N.reshape(data.shape[0] // size, data.shape[1] // size, data.shape[2] // size) for n, i in enumerate(np.ndindex(s_out.shape)): i = np.array(i) * size j = i + size m_out[i[0]:j[0], i[1]:j[1], i[2]:j[2]] = mask[n].reshape(size, size, size) x, y, z = np.array(s_out.shape) * size interpolated_sigma = np.zeros_like(data[..., 0], dtype=np.float32) interpolated_N = np.zeros_like(data[..., 0], dtype=np.float32) interpolated_sigma[:x, :y, :z] = zoom(s_out, size, order=1) interpolated_N[:x, :y, :z] = zoom(N_out, size, order=1) if return_mask: return interpolated_sigma, interpolated_N, m_out return interpolated_sigma, interpolated_N