def test_reshaping(): a = np.random.randint(0, 10000, size=(30, 30, 30)) assert_almost_equal(a, col2im_nd(im2col_nd(a, (2,2,2), (1,1,1)), (2,2,2), (30,30,30), (1,1,1))) assert_almost_equal(a, col2im_nd(im2col_nd(a, (3,3,3), (2,2,2)), (3,3,3), (30,30,30), (2,2,2))) b = np.random.randint(0, 10000, size=(30, 30, 30, 10)) assert_almost_equal(b, col2im_nd(im2col_nd(b, (2,2,2,10), (1,1,1,1)), (2,2,2,10), (30,30,30,10), (1,1,1,1))) assert_almost_equal(b, col2im_nd(im2col_nd(b, (3,3,3,10), (2,2,2,1)), (3,3,3,10), (30,30,30,10), (2,2,2,1))) a = np.random.rand(1000).reshape(10,10,10) out = im2col_nd(a, (3,3,3),(2,2,2)) redo = col2im_nd(out, (3,3,3), (10,10,10), (2,2,2)) np.testing.assert_allclose(a,redo) a = np.random.rand(100000).reshape(10,100,10,10) out = im2col_nd(a, (3,3,3,10),(2,2,2,0)) redo = col2im_nd(out, (3,3,3,10), (10,100,10,10), (2,2,2,0)) np.testing.assert_allclose(a,redo) a = np.random.rand(1000).reshape(10,10,10) out = im2col_nd(a, (2,2,2),(0,0,0)) redo = col2im_nd(out, (2,2,2), (10,10,10), (0,0,0)) np.testing.assert_allclose(a,redo) a = np.random.rand(10000).reshape(10,10,10,10) out = im2col_nd(a, (2,2,2,10),(0,0,0,0)) redo = col2im_nd(out, (2,2,2,10), (10,10,10,10), (0,0,0,0)) np.testing.assert_allclose(a,redo)
def test_reshaping(): a = np.random.randint(0, 10000, size=(30, 30, 30)) assert_allclose( a, col2im_nd(im2col_nd(a, (2, 2, 2), (1, 1, 1)), (2, 2, 2), (30, 30, 30), (1, 1, 1))) assert_allclose( a, col2im_nd(im2col_nd(a, (3, 3, 3), (2, 2, 2)), (3, 3, 3), (30, 30, 30), (2, 2, 2))) b = np.random.randint(0, 10000, size=(30, 30, 30, 10)) assert_allclose( b, col2im_nd(im2col_nd(b, (2, 2, 2, 10), (1, 1, 1, 1)), (2, 2, 2, 10), (30, 30, 30, 10), (1, 1, 1, 1))) assert_allclose( b, col2im_nd(im2col_nd(b, (3, 3, 3, 10), (2, 2, 2, 1)), (3, 3, 3, 10), (30, 30, 30, 10), (2, 2, 2, 1))) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (3, 3, 3), (2, 2, 2)) redo = col2im_nd(out, (3, 3, 3), (10, 10, 10), (2, 2, 2)) assert_allclose(a, redo) out = extract_patches(a, (3, 3, 3), (1, 1, 1)).reshape(-1, 3**3).T redo = col2im_nd(out, (3, 3, 3), (10, 10, 10), (2, 2, 2)) assert_allclose(a, redo) a = np.random.rand(100000).reshape(10, 100, 10, 10) out = extract_patches(a, (3, 3, 3, 10), (1, 1, 1, 10)).reshape(-1, 3**3 * 10).T redo = col2im_nd(out, (3, 3, 3, 10), (10, 100, 10, 10), (2, 2, 2, 0)) assert_allclose(a, redo) out = im2col_nd(a, (3, 3, 3, 10), (2, 2, 2, 0)) redo = col2im_nd(out, (3, 3, 3, 10), (10, 100, 10, 10), (2, 2, 2, 0)) assert_allclose(a, redo) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (2, 2, 2), (0, 0, 0)) redo = col2im_nd(out, (2, 2, 2), (10, 10, 10), (0, 0, 0)) assert_allclose(a, redo) out = extract_patches(a, (2, 2, 2), (2, 2, 2)).reshape(-1, 2**3).T redo = col2im_nd(out, (2, 2, 2), (10, 10, 10), (0, 0, 0)) assert_allclose(a, redo) a = np.random.rand(10000).reshape(10, 10, 10, 10) out = im2col_nd(a, (2, 2, 2, 10), (0, 0, 0, 0)) redo = col2im_nd(out, (2, 2, 2, 10), (10, 10, 10, 10), (0, 0, 0, 0)) assert_allclose(a, redo) out = extract_patches(a, (2, 2, 2, 10), (1, 1, 1, 10)).reshape(-1, 2**3 * 10).T redo = col2im_nd(out, (2, 2, 2, 10), (10, 10, 10, 10), (0, 0, 0, 0)) assert_allclose(a, redo)
def test_reshaping(): a = np.random.randint(0, 10000, size=(30, 30, 30)) assert_almost_equal( a, col2im_nd(im2col_nd(a, (2, 2, 2), (1, 1, 1)), (2, 2, 2), (30, 30, 30), (1, 1, 1))) assert_almost_equal( a, col2im_nd(im2col_nd(a, (3, 3, 3), (2, 2, 2)), (3, 3, 3), (30, 30, 30), (2, 2, 2))) b = np.random.randint(0, 10000, size=(30, 30, 30, 10)) assert_almost_equal( b, col2im_nd(im2col_nd(b, (2, 2, 2, 10), (1, 1, 1, 1)), (2, 2, 2, 10), (30, 30, 30, 10), (1, 1, 1, 1))) assert_almost_equal( b, col2im_nd(im2col_nd(b, (3, 3, 3, 10), (2, 2, 2, 1)), (3, 3, 3, 10), (30, 30, 30, 10), (2, 2, 2, 1))) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (3, 3, 3), (2, 2, 2)) redo = col2im_nd(out, (3, 3, 3), (10, 10, 10), (2, 2, 2)) np.testing.assert_allclose(a, redo) a = np.random.rand(100000).reshape(10, 100, 10, 10) out = im2col_nd(a, (3, 3, 3, 10), (2, 2, 2, 0)) redo = col2im_nd(out, (3, 3, 3, 10), (10, 100, 10, 10), (2, 2, 2, 0)) np.testing.assert_allclose(a, redo) a = np.random.rand(1000).reshape(10, 10, 10) out = im2col_nd(a, (2, 2, 2), (0, 0, 0)) redo = col2im_nd(out, (2, 2, 2), (10, 10, 10), (0, 0, 0)) np.testing.assert_allclose(a, redo) a = np.random.rand(10000).reshape(10, 10, 10, 10) out = im2col_nd(a, (2, 2, 2, 10), (0, 0, 0, 0)) redo = col2im_nd(out, (2, 2, 2, 10), (10, 10, 10, 10), (0, 0, 0, 0)) np.testing.assert_allclose(a, redo)
def processer(data, mask, variance, block_size, overlap, param_alpha, param_D, dtype=np.float64, n_iter=10, gamma=3., tau=1., tolerance=1e-5): orig_shape = data.shape mask_array = im2col_nd(mask, block_size[:-1], overlap[:-1]) train_idx = np.sum(mask_array, axis=0) > (mask_array.shape[0] / 2.) # If mask is empty, return a bunch of zeros as blocks if not np.any(train_idx): return np.zeros_like(data) X = im2col_nd(data, block_size, overlap) var_mat = np.median(im2col_nd(variance, block_size[:-1], overlap[:-1])[:, train_idx], axis=0) X_full_shape = X.shape X = X[:, train_idx].astype(dtype) param_alpha['L'] = int(0.5 * X.shape[0]) D = param_alpha['D'] alpha = lil_matrix((D.shape[1], X.shape[1])) W = np.ones(alpha.shape, dtype=dtype, order='F') DtD = np.dot(D.T, D) DtX = np.dot(D.T, X) DtXW = np.empty_like(DtX, order='F') alpha_old = np.ones(alpha.shape, dtype=dtype) has_converged = np.zeros(alpha.shape[1], dtype=np.bool) arr = np.empty(alpha.shape) xi = np.random.randn(X.shape[0], X.shape[1]) * var_mat eps = np.max(np.abs(np.dot(D.T, xi)), axis=0) for _ in range(n_iter): not_converged = np.equal(has_converged, False) DtXW[:, not_converged] = DtX[:, not_converged] / W[:, not_converged] for i in range(alpha.shape[1]): if not has_converged[i]: param_alpha['lambda1'] = var_mat[i] * ( X.shape[0] + gamma * np.sqrt(2 * X.shape[0])) DtDW = (1. / W[..., None, i]) * DtD * (1. / W[:, i]) alpha[:, i:i + 1] = spams.lasso(X[:, i:i + 1], Q=np.asfortranarray(DtDW), q=DtXW[:, i:i + 1], **param_alpha) alpha.toarray(out=arr) nonzero_ind = arr != 0 arr[nonzero_ind] /= W[nonzero_ind] has_converged = np.max(np.abs(alpha_old - arr), axis=0) < tolerance if np.all(has_converged): break alpha_old[:] = arr W[:] = 1. / (np.abs(alpha_old**tau) + eps) weigths = np.ones(X_full_shape[1], dtype=dtype, order='F') weigths[train_idx] = 1. / (alpha.getnnz(axis=0) + 1.) X = np.zeros(X_full_shape, dtype=dtype, order='F') X[:, train_idx] = np.dot(D, arr) return col2im_nd(X, block_size, orig_shape, overlap, weigths)
def local_denoise(data, block_size, overlap, variance, n_iter=10, mask=None, dtype=np.float64, n_cores=None, use_threading=False, verbose=False, mp_method=None): if verbose: logger.setLevel(logging.INFO) if mask is None: mask = np.ones(data.shape[:-1], dtype=np.bool) # no overlapping blocks for training no_over = (0, 0, 0, 0) X = im2col_nd(data, block_size, no_over) # Solving for D param_alpha = {} param_alpha['pos'] = True param_alpha['mode'] = 1 param_D = {} param_D['verbose'] = False param_D['posAlpha'] = True param_D['posD'] = True param_D['mode'] = 2 param_D['lambda1'] = 1.2 / np.sqrt(np.prod(block_size)) param_D['K'] = int(2 * np.prod(block_size)) param_D['iter'] = 150 param_D['batchsize'] = 500 param_D['numThreads'] = n_cores if 'D' in param_alpha: param_D['D'] = param_alpha['D'] mask_col = im2col_nd(np.broadcast_to(mask[..., None], data.shape), block_size, no_over) train_idx = np.sum(mask_col, axis=0) > (mask_col.shape[0] / 2.) train_data = X[:, train_idx] train_data = np.asfortranarray(train_data[:, np.any(train_data != 0, axis=0)], dtype=dtype) train_data /= np.sqrt(np.sum(train_data**2, axis=0, keepdims=True), dtype=dtype) param_alpha['D'] = spams.trainDL(train_data, **param_D) param_alpha['D'] /= np.sqrt( np.sum(param_alpha['D']**2, axis=0, keepdims=True, dtype=dtype)) param_D['D'] = param_alpha['D'] del train_data, X, mask_col if use_threading or (n_cores == 1): param_alpha['numThreads'] = n_cores param_D['numThreads'] = n_cores else: param_alpha['numThreads'] = 1 param_D['numThreads'] = 1 arglist = ((data[:, :, k:k + block_size[2]], mask[:, :, k:k + block_size[2]], variance[:, :, k:k + block_size[2]], block_size, overlap, param_alpha, param_D, dtype, n_iter) for k in range(data.shape[2] - block_size[2] + 1)) if use_threading: data_denoised = starmap(processer, arglist) else: time_multi = time() parallel_processer = multiprocesser(processer, n_cores=n_cores, mp_method=mp_method) data_denoised = parallel_processer(arglist) logger.info('Multiprocessing done in {0:.2f} mins.'.format( (time() - time_multi) / 60.)) # Put together the multiprocessed results data_subset = np.zeros_like(data, dtype=np.float32) divider = np.zeros_like(data, dtype=np.int16) for k, content in enumerate(data_denoised): data_subset[:, :, k:k + block_size[2]] += content divider[:, :, k:k + block_size[2]] += 1 data_subset /= divider return data_subset
def _processer(data, mask, variance, block_size, overlap, param_alpha, param_D, dtype=np.float64, n_iter=10, gamma=3., tau=1.): # data, mask, variance, block_size, overlap, param_alpha, param_D, dtype, n_iter = arglist # gamma = 3. # tau = 1. orig_shape = data.shape mask_array = im2col_nd(mask, block_size[:3], overlap[:3]) train_idx = np.sum(mask_array, axis=0) > mask_array.shape[0]/2 # If mask is empty, return a bunch of zeros as blocks if not np.any(train_idx): return np.zeros_like(data) X = im2col_nd(data, block_size, overlap) var_mat = np.median(im2col_nd(variance[..., 0:orig_shape[-1]], block_size, overlap)[:, train_idx], axis=0).astype(dtype) X_full_shape = X.shape X = X[:, train_idx] param_alpha['L'] = int(0.5 * X.shape[0]) D = param_alpha['D'] alpha = lil_matrix((D.shape[1], X.shape[1])) W = np.ones(alpha.shape, dtype=dtype, order='F') DtD = np.dot(D.T, D) DtX = np.dot(D.T, X) DtXW = np.empty_like(DtX, order='F') alpha_old = np.ones(alpha.shape, dtype=dtype) has_converged = np.zeros(alpha.shape[1], dtype=np.bool) xi = np.random.randn(X.shape[0], X.shape[1]) * var_mat eps = np.max(np.abs(np.dot(D.T, xi)), axis=0) param_alpha['mode'] = 1 param_alpha['pos'] = True for _ in range(n_iter): not_converged = np.equal(has_converged, False) DtXW[:, not_converged] = DtX[:, not_converged] / W[:, not_converged] for i in range(alpha.shape[1]): if not has_converged[i]: param_alpha['lambda1'] = var_mat[i] * (X.shape[0] + gamma * np.sqrt(2 * X.shape[0])) DtDW = (1. / W[..., None, i]) * DtD * (1. / W[:, i]) alpha[:, i:i+1] = spams.lasso(X[:, i:i+1], Q=np.asfortranarray(DtDW), q=DtXW[:, i:i+1], **param_alpha) arr = alpha.toarray() nonzero_ind = arr != 0 arr[nonzero_ind] /= W[nonzero_ind] has_converged = np.max(np.abs(alpha_old - arr), axis=0) < 1e-5 if np.all(has_converged): break alpha_old = arr W[:] = 1. / (np.abs(alpha_old**tau) + eps) # compute_weights(alpha_old, alpha, W, tau, eps) # alpha = arr # X = D.dot(alpha) # X = sparse_dot(D,alpha) X = np.dot(D, arr) weigths = np.ones(X_full_shape[1], dtype=dtype, order='F') weigths[train_idx] = 1. / (alpha.getnnz(axis=0) + 1.) X2 = np.zeros(X_full_shape, dtype=dtype, order='F') X2[:, train_idx] = X return col2im_nd(X2, block_size, orig_shape, overlap, weigths)
def denoise(data, block_size, overlap, param_alpha, param_D, variance, n_iter=10, mask=None, dtype=np.float64): # no overlapping blocks for training no_over = (0, 0, 0, 0) X = im2col_nd(data, block_size, no_over) # Solving for D param_alpha['pos'] = True param_alpha['mode'] = 2 param_alpha['lambda1'] = 1.2 / np.sqrt(np.prod(block_size)) param_D['verbose'] = False param_D['posAlpha'] = True param_D['posD'] = True param_D['mode'] = 2 param_D['lambda1'] = 1.2 / np.sqrt(np.prod(block_size)) param_D['K'] = int(2*np.prod(block_size)) param_D['iter'] = 150 param_D['batchsize'] = 500 if 'D' in param_alpha: param_D['D'] = param_alpha['D'] # mask_col = im2col_nd(mask, block_size[:3], no_over[:3]) mask_col = im2col_nd(np.broadcast_to(mask[..., None], data.shape), block_size, no_over) train_idx = np.sum(mask_col, axis=0) > mask_col.shape[0]/2 train_data = X[:, train_idx] train_data = np.asfortranarray(train_data[:, np.any(train_data != 0, axis=0)], dtype=dtype) train_data /= np.sqrt(np.sum(train_data**2, axis=0, keepdims=True), dtype=dtype) param_alpha['D'] = spams.trainDL(train_data, **param_D) param_alpha['D'] /= np.sqrt(np.sum(param_alpha['D']**2, axis=0, keepdims=True, dtype=dtype)) param_D['D'] = param_alpha['D'] del train_data n_cores = param_alpha['numThreads'] param_alpha['numThreads'] = 1 param_D['numThreads'] = 1 time_multi = time() pool = Pool(processes=n_cores) arglist = [(data[:, :, k:k+block_size[2]], mask[:, :, k:k+block_size[2]], variance[:, :, k:k+block_size[2]], block_size_subset, overlap_subset, param_alpha_subset, param_D_subset, dtype_subset, n_iter_subset) for k, block_size_subset, overlap_subset, param_alpha_subset, param_D_subset, dtype_subset, n_iter_subset in zip(range(data.shape[2] - block_size[2] + 1), repeat(block_size), repeat(overlap), repeat(param_alpha), repeat(param_D), repeat(dtype), repeat(n_iter))] data_denoised = pool.map(processer, arglist) pool.close() pool.join() param_alpha['numThreads'] = n_cores param_D['numThreads'] = n_cores print('Multiprocessing done in {0:.2f} mins.'.format((time() - time_multi) / 60.)) # Put together the multiprocessed results data_subset = np.zeros_like(data) divider = np.zeros_like(data, dtype=np.int16) ones = np.ones_like(data_denoised[0], dtype=np.int16) for k in range(len(data_denoised)): data_subset[:, :, k:k+block_size[2]] += data_denoised[k] divider[:, :, k:k+block_size[2]] += ones data_subset /= divider return data_subset
def local_denoise(data, block_size, overlap, variance, n_iter=10, mask=None, dtype=np.float64, n_cores=None, verbose=False): if verbose: logger.setLevel(logging.INFO) if mask is None: mask = np.ones(data.shape[:-1], dtype=np.bool) # no overlapping blocks for training no_over = (0, 0, 0, 0) X = im2col_nd(data, block_size, no_over) # Solving for D param_alpha = {} param_alpha['pos'] = True param_alpha['mode'] = 1 param_D = {} param_D['verbose'] = False param_D['posAlpha'] = True param_D['posD'] = True param_D['mode'] = 2 param_D['lambda1'] = 1.2 / np.sqrt(np.prod(block_size)) param_D['K'] = int(2 * np.prod(block_size)) param_D['iter'] = 150 param_D['batchsize'] = 500 if 'D' in param_alpha: param_D['D'] = param_alpha['D'] mask_col = im2col_nd(np.broadcast_to(mask[..., None], data.shape), block_size, no_over) train_idx = np.sum(mask_col, axis=0) > (mask_col.shape[0] / 2.) train_data = X[:, train_idx] train_data = np.asfortranarray(train_data[:, np.any(train_data != 0, axis=0)], dtype=dtype) train_data /= np.sqrt(np.sum(train_data**2, axis=0, keepdims=True), dtype=dtype) param_alpha['D'] = spams.trainDL(train_data, **param_D) param_alpha['D'] /= np.sqrt( np.sum(param_alpha['D']**2, axis=0, keepdims=True, dtype=dtype)) param_D['D'] = param_alpha['D'] del train_data, X param_alpha['numThreads'] = 1 param_D['numThreads'] = 1 time_multi = time() pool = Pool(processes=n_cores) arglist = [ (data[:, :, k:k + block_size[2]], mask[:, :, k:k + block_size[2]], variance[:, :, k:k + block_size[2]], block_size_subset, overlap_subset, param_alpha_subset, param_D_subset, dtype_subset, n_iter_subset) for k, block_size_subset, overlap_subset, param_alpha_subset, param_D_subset, dtype_subset, n_iter_subset in zip( range(data.shape[2] - block_size[2] + 1), repeat(block_size), repeat(overlap), repeat(param_alpha), repeat(param_D), repeat(dtype), repeat(n_iter)) ] data_denoised = pool.map(processer, arglist) pool.close() pool.join() logger.info('Multiprocessing done in {0:.2f} mins.'.format( (time() - time_multi) / 60.)) # Put together the multiprocessed results data_subset = np.zeros_like(data, dtype=np.float32) divider = np.zeros_like(data, dtype=np.int16) ones = np.ones_like(data_denoised[0], dtype=np.int16) for k in range(len(data_denoised)): data_subset[:, :, k:k + block_size[2]] += data_denoised[k] divider[:, :, k:k + block_size[2]] += ones data_subset /= divider return data_subset