def test_sharpness(): # check the edge-preserving nature S0 = np.ones((30, 30, 30)) * 100 S0[10:20, 10:20, 10:20] = 50 S0[20:30, 20:30, 20:30] = 0 S0_noise = S0 + 20 * np.random.standard_normal((30, 30, 30)) S0n1 = non_local_means( S0_noise, sigma=400, rician=False, patch_radius=1, block_radius=1) edg1 = np.abs(np.mean(S0n1[8, 10:20, 10:20] - S0n1[12, 10:20, 10:20]) - 50) print("Edge gradient smaller patch", edg1) S0n2 = non_local_means( S0_noise, sigma=400, rician=False, patch_radius=2, block_radius=2) edg2 = np.abs(np.mean(S0n2[8, 10:20, 10:20] - S0n2[12, 10:20, 10:20]) - 50) print("Edge gradient larger patch", edg2) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 400) edg = np.abs(np.mean(S0n[8, 10:20, 10:20] - S0n[12, 10:20, 10:20]) - 50) print("Edge gradient ASCM", edg) assert_(edg2 > edg1) assert_(edg2 > edg) assert_(np.abs(edg1 - edg) < 1.5)
def test_sharpness(): # check the edge-preserving nature S0 = np.ones((30, 30, 30)) * 100 S0[10:20, 10:20, 10:20] = 50 S0[20:30, 20:30, 20:30] = 0 S0_noise = S0 + 20 * np.random.standard_normal((30, 30, 30)) S0n1 = non_local_means(S0_noise, sigma=400, rician=False, patch_radius=1, block_radius=1) edg1 = np.abs(np.mean(S0n1[8, 10:20, 10:20] - S0n1[12, 10:20, 10:20]) - 50) print("Edge gradient smaller patch", edg1) S0n2 = non_local_means(S0_noise, sigma=400, rician=False, patch_radius=2, block_radius=2) edg2 = np.abs(np.mean(S0n2[8, 10:20, 10:20] - S0n2[12, 10:20, 10:20]) - 50) print("Edge gradient larger patch", edg2) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 400) edg = np.abs(np.mean(S0n[8, 10:20, 10:20] - S0n[12, 10:20, 10:20]) - 50) print("Edge gradient ASCM", edg) assert_(edg2 > edg1) assert_(edg2 > edg) assert_(np.abs(edg1 - edg) < 1.5)
def test_ascm_accuracy(): test_ascm_data_ref = nib.load(dpd.get_data("ascm_test")).get_data() test_data = nib.load(dpd.get_data("aniso_vox")).get_data() # the test data was constructed in this manner mask = test_data > 50 sigma = estimate_sigma(test_data, N=4) den_small = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) den_large = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1, rician=True) S0n = np.array(adaptive_soft_matching(test_data, den_small, den_large, sigma[0])) assert_array_almost_equal(S0n, test_ascm_data_ref)
def test_ascm_rmse_with_nlmeans(): # checks the smoothness S0 = np.ones((30, 30, 30)) * 100 S0[10:20, 10:20, 10:20] = 50 S0[20:30, 20:30, 20:30] = 0 S0_noise = S0 + 20 * np.random.standard_normal((30, 30, 30)) print("Original RMSE", np.sum(np.abs(S0 - S0_noise)) / np.sum(S0)) S0n1 = non_local_means( S0_noise, sigma=400, rician=False, patch_radius=1, block_radius=1) print("Smaller patch RMSE", np.sum(np.abs(S0 - S0n1)) / np.sum(S0)) S0n2 = non_local_means( S0_noise, sigma=400, rician=False, patch_radius=2, block_radius=2) print("Larger patch RMSE", np.sum(np.abs(S0 - S0n2)) / np.sum(S0)) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 400) print("ASCM RMSE", np.sum(np.abs(S0 - S0n)) / np.sum(S0)) assert_(np.sum(np.abs(S0 - S0n)) / np.sum(S0) < np.sum(np.abs(S0 - S0n1)) / np.sum(S0)) assert_(np.sum(np.abs(S0 - S0n)) / np.sum(S0) < np.sum(np.abs(S0 - S0_noise)) / np.sum(S0)) assert_(90 < np.mean(S0n) < 110)
def test_ascm_accuracy(): f_name = dpd.get_fnames("ascm_test") test_ascm_data_ref = np.asanyarray(nib.load(f_name).dataobj) test_data = np.asanyarray(nib.load(dpd.get_fnames("aniso_vox")).dataobj) # the test data was constructed in this manner mask = test_data > 50 sigma = estimate_sigma(test_data, N=4) den_small = non_local_means(test_data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) den_large = non_local_means(test_data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1, rician=True) S0n = np.array( adaptive_soft_matching(test_data, den_small, den_large, sigma[0])) assert_array_almost_equal(S0n, test_ascm_data_ref)
def test_ascm_rmse_with_nlmeans(): # checks the smoothness S0 = np.ones((30, 30, 30)) * 100 S0[10:20, 10:20, 10:20] = 50 S0[20:30, 20:30, 20:30] = 0 S0_noise = S0 + 20 * np.random.standard_normal((30, 30, 30)) print("Original RMSE", np.sum(np.abs(S0 - S0_noise)) / np.sum(S0)) S0n1 = non_local_means(S0_noise, sigma=400, rician=False, patch_radius=1, block_radius=1) print("Smaller patch RMSE", np.sum(np.abs(S0 - S0n1)) / np.sum(S0)) S0n2 = non_local_means(S0_noise, sigma=400, rician=False, patch_radius=2, block_radius=2) print("Larger patch RMSE", np.sum(np.abs(S0 - S0n2)) / np.sum(S0)) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 400) print("ASCM RMSE", np.sum(np.abs(S0 - S0n)) / np.sum(S0)) assert_( np.sum(np.abs(S0 - S0n)) / np.sum(S0) < np.sum(np.abs(S0 - S0n1)) / np.sum(S0)) assert_( np.sum(np.abs(S0 - S0n)) / np.sum(S0) < np.sum(np.abs(S0 - S0_noise)) / np.sum(S0)) assert_(90 < np.mean(S0n) < 110)
def test_ascm_static(): S0 = 100 * np.ones((20, 20, 20), dtype='f8') S0n1 = non_local_means(S0, sigma=0, rician=False, patch_radius=1, block_radius=1) S0n2 = non_local_means(S0, sigma=0, rician=False, patch_radius=2, block_radius=1) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 0) assert_array_almost_equal(S0, S0n)
def test_nlmeans_boundary(): # nlmeans preserves boundaries S0 = 100 + np.zeros((20, 20, 20)) noise = 2 * np.random.standard_normal((20, 20, 20)) S0 += noise S0[:10, :10, :10] = 300 + noise[:10, :10, :10] non_local_means(S0, sigma=np.std(noise), rician=False) assert_(S0[9, 9, 9] > 290) assert_(S0[10, 10, 10] < 110)
def test_nlmeans_dtype(): S0 = 200 * np.ones((20, 20, 20, 3), dtype='f4') mask = np.zeros((20, 20, 20)) mask[10:14, 10:14, 10:14] = 1 S0n = non_local_means(S0, sigma=1, mask=mask, rician=True) assert_equal(S0.dtype, S0n.dtype) S0 = 200 * np.ones((20, 20, 20), dtype=np.uint16) mask = np.zeros((20, 20, 20)) mask[10:14, 10:14, 10:14] = 1 S0n = non_local_means(S0, sigma=1, mask=mask, rician=True) assert_equal(S0.dtype, S0n.dtype)
def test_ascm_random_noise(): S0 = 100 + 2 * np.random.standard_normal((22, 23, 30)) S0n1 = non_local_means(S0, sigma=1, rician=False, patch_radius=1, block_radius=1) S0n2 = non_local_means(S0, sigma=1, rician=False, patch_radius=2, block_radius=1) S0n = adaptive_soft_matching(S0, S0n1, S0n2, 1) print(S0.mean(), S0.min(), S0.max()) print(S0n.mean(), S0n.min(), S0n.max()) assert_(S0n.min() > S0.min()) assert_(S0n.max() < S0.max()) assert_equal(np.round(S0n.mean()), 100)
def test_nlmeans_random_noise(): S0 = 100 + 2 * np.random.standard_normal((22, 23, 30)) masker = np.zeros(S0.shape[:3]).astype(bool) masker[8:15, 8:15, 8:15] = 1 for mask in [None, masker]: S0nb = non_local_means(S0, sigma=np.std(S0), rician=False, mask=mask) assert_(S0nb[mask].min() > S0[mask].min()) assert_(S0nb[mask].max() < S0[mask].max()) assert_equal(np.round(S0nb[mask].mean()), 100) S0nb = non_local_means(S0, sigma=np.std(S0), rician=False, mask=mask) assert_(S0nb[mask].min() > S0[mask].min()) assert_(S0nb[mask].max() < S0[mask].max()) assert_equal(np.round(S0nb[mask].mean()), 100)
def test_nlmeans_4D_and_mask(): S0 = 200 * np.ones((20, 20, 20, 3), dtype='f8') mask = np.zeros((20, 20, 20)) mask[10, 10, 10] = 1 S0n = non_local_means(S0, sigma=1, mask=mask, rician=True) assert_equal(S0.shape, S0n.shape) assert_equal(np.round(S0n[10, 10, 10]), 200) assert_equal(S0n[8, 8, 8], 0)
def test_nlmeans_random_noise(): S0 = 100 + 2 * np.random.standard_normal((22, 23, 30)) S0nb = non_local_means(S0, sigma=np.ones((22, 23, 30)) * np.std(S0), rician=False) print(S0.mean(), S0.min(), S0.max()) print(S0nb.mean(), S0nb.min(), S0nb.max()) assert_(S0nb.min() > S0.min()) assert_(S0nb.max() < S0.max()) assert_equal(np.round(S0nb.mean()), 100)
def test_nlmeans_static(): S0 = 100 * np.ones((20, 20, 20), dtype='f8') S0nb = non_local_means(S0, sigma=1.0, rician=False) assert_array_almost_equal(S0, S0nb)
``non_local_means`` denoising. For ``non_local_means`` first we need to estimate the standard deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired on a 1.5T Siemens scanner with a 4 array head coil. """ sigma = estimate_sigma(data, N=4) """ For the denoised version of the original data which preserves sharper features, we perform non-local means with smaller patch size. """ den_small = non_local_means( data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) """ For the denoised version of the original data that implies more smoothing, we perform non-local means with larger patch size. """ den_large = non_local_means( data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1,
""" In order to call ``non_local_means`` first you need to estimate the standard deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired on a 1.5T Siemens scanner with a 4 array head coil. """ sigma = estimate_sigma(data, N=4) t = time() """ Calling the main function ``non_local_means`` """ den = non_local_means(data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) print("total time", time() - t) t = time() den = nlmeans(data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) print("total time", time() - t) """
deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired on a 1.5T Siemens scanner with a 4 array head coil. """ sigma = estimate_sigma(data, N=4) t = time() """ Calling the main function ``non_local_means`` """ den = non_local_means( data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) print("total time", time() - t) t = time() den = nlmeans(data, sigma=sigma, mask=mask, patch_radius= 1, block_radius = 1, rician= True) print("total time", time() - t) """ Let us plot the axial slice of the denoised output """ axial_middle = data.shape[2] / 2