def test_wavelet_denoising_nd(): rstate = np.random.RandomState(1234) for method in ['VisuShrink', 'BayesShrink']: for ndim in range(1, 5): # Generate a very simple test image if ndim < 3: img = 0.2*np.ones((128, )*ndim) else: img = 0.2*np.ones((16, )*ndim) img[(slice(5, 13), ) * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # Mark H. 2018.08: # The issue arises because when ndim in [1, 2] # ``waverecn`` calls ``_match_coeff_dims`` # Which includes a numpy 1.15 deprecation. # for larger number of dimensions _match_coeff_dims isn't called # for some reason. anticipated_warnings = (PYWAVELET_ND_INDEXING_WARNING if ndim < 3 else None) with expected_warnings([anticipated_warnings]): # Verify that SNR is improved with internally estimated sigma denoised = restoration.denoise_wavelet(noisy, method=method) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy)
def test_clear_border_non_binary_3d(): image3d = np.array( [[[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], [3, 4, 3, 4, 2], [3, 3, 2, 1, 2]], [[1, 2, 3, 1, 2], [3, 3, 5, 4, 2], [3, 4, 5, 4, 2], [3, 3, 2, 1, 2]], [[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], [3, 4, 3, 4, 2], [3, 3, 2, 1, 2]], ]) result = clear_border(image3d) expected = np.array( [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 5, 0, 0], [0, 0, 5, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], ]) assert_array_equal(result, expected) assert_(not np.all(image3d == result))
def test_wavelet_denoising_levels(): rstate = np.random.RandomState(1234) ndim = 2 N = 256 wavelet = 'db1' # Generate a very simple test image img = 0.2*np.ones((N, )*ndim) img[[slice(5, 13), ] * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet) denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=1) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) psnr_denoised_1 = compare_psnr(img, denoised_1) # multi-level case should outperform single level case assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy) # invalid number of wavelet levels results in a ValueError max_level = pywt.dwt_max_level(np.min(img.shape), pywt.Wavelet(wavelet).dec_len) with testing.raises(ValueError): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=max_level+1) with testing.raises(ValueError): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=-1)
def test_mask(): length = 100 ramps = [np.linspace(0, 4 * np.pi, length), np.linspace(0, 8 * np.pi, length), np.linspace(0, 6 * np.pi, length)] image = np.vstack(ramps) mask_1d = np.ones((length,), dtype=np.bool) mask_1d[0] = mask_1d[-1] = False for i in range(len(ramps)): # mask all ramps but the i'th one mask = np.zeros(image.shape, dtype=np.bool) mask |= mask_1d.reshape(1, -1) mask[i, :] = False # unmask i'th ramp image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask) image_unwrapped = unwrap_phase(image_wrapped) image_unwrapped -= image_unwrapped[0, 0] # remove phase shift # The end of the unwrapped array should have value equal to the # endpoint of the unmasked ramp assert_array_almost_equal_nulp(image_unwrapped[:, -1], image[i, -1]) assert_(np.ma.isMaskedArray(image_unwrapped)) # Same tests, but forcing use of the 3D unwrapper by reshaping with expected_warnings(['length 1 dimension']): shape = (1,) + image_wrapped.shape image_wrapped_3d = image_wrapped.reshape(shape) image_unwrapped_3d = unwrap_phase(image_wrapped_3d) # remove phase shift image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0] assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1], image[i, -1])
def test_wavelet_threshold(): rstate = np.random.RandomState(1234) img = astro_gray sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # employ a single, user-specified threshold instead of BayesShrink sigmas with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=sigma) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy) # either method or threshold must be defined with testing.raises(ValueError): _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None) # warns if a threshold is provided in a case where it would be ignored with expected_warnings(["Thresholding method ", PYWAVELET_ND_INDEXING_WARNING]): _wavelet_threshold(noisy, wavelet='db1', method='BayesShrink', threshold=sigma)
def check_wrap_around(ndim, axis): # create a ramp, but with the last pixel along axis equalling the first elements = 100 ramp = np.linspace(0, 12 * np.pi, elements) ramp[-1] = ramp[0] image = ramp.reshape(tuple([elements if n == axis else 1 for n in range(ndim)])) image_wrapped = np.angle(np.exp(1j * image)) index_first = tuple([0] * ndim) index_last = tuple([-1 if n == axis else 0 for n in range(ndim)]) # unwrap the image without wrap around with warnings.catch_warnings(): # We do not want warnings about length 1 dimensions warnings.simplefilter("ignore") image_unwrap_no_wrap_around = unwrap_phase(image_wrapped, seed=0) print('endpoints without wrap_around:', image_unwrap_no_wrap_around[index_first], image_unwrap_no_wrap_around[index_last]) # without wrap around, the endpoints of the image should differ assert_(abs(image_unwrap_no_wrap_around[index_first] - image_unwrap_no_wrap_around[index_last]) > np.pi) # unwrap the image with wrap around wrap_around = [n == axis for n in range(ndim)] with warnings.catch_warnings(): # We do not want warnings about length 1 dimensions warnings.simplefilter("ignore") image_unwrap_wrap_around = unwrap_phase(image_wrapped, wrap_around, seed=0) print('endpoints with wrap_around:', image_unwrap_wrap_around[index_first], image_unwrap_wrap_around[index_last]) # with wrap around, the endpoints of the image should be equal assert_almost_equal(image_unwrap_wrap_around[index_first], image_unwrap_wrap_around[index_last])
def test_denoise_tv_chambolle_1d(): """Apply the TV denoising algorithm on a 1D sinusoid.""" x = 125 + 100*np.sin(np.linspace(0, 8*np.pi, 1000)) x += 20 * np.random.rand(x.size) x = np.clip(x, 0, 255) res = restoration.denoise_tv_chambolle(x.astype(np.uint8), weight=0.1) assert_(res.dtype == np.float) assert_(res.std() * 255 < x.std())
def test_no_denoising_for_small_h(): img = np.zeros((40, 40)) img[10:-10, 10:-10] = 1. img += 0.3*np.random.randn(*img.shape) # very small h should result in no averaging with other patches denoised = restoration.denoise_nl_means(img, 7, 5, 0.01, fast_mode=True, multichannel=True) assert_(np.allclose(denoised, img)) denoised = restoration.denoise_nl_means(img, 7, 5, 0.01, fast_mode=False, multichannel=True) assert_(np.allclose(denoised, img))
def test_denoise_tv_chambolle_3d(): """Apply the TV denoising algorithm on a 3D image representing a sphere.""" x, y, z = np.ogrid[0:40, 0:40, 0:40] mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2 mask = 100 * mask.astype(np.float) mask += 60 mask += 20 * np.random.rand(*mask.shape) mask[mask < 0] = 0 mask[mask > 255] = 255 res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=0.1) assert_(res.dtype == np.float) assert_(res.std() * 255 < mask.std())
def test_denoise_tv_bregman_3d(): img = checkerboard.copy() # add some random noise img += 0.5 * img.std() * np.random.rand(*img.shape) img = np.clip(img, 0, 1) out1 = restoration.denoise_tv_bregman(img, weight=10) out2 = restoration.denoise_tv_bregman(img, weight=5) # make sure noise is reduced in the checkerboard cells assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std()) assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_denoise_nl_means_multichannel(): # for true 3D data, 3D denoising is better than denoising as 2D+channels img = np.zeros((13, 10, 8)) img[6, 4:6, 2:-2] = 1. sigma = 0.3 imgn = img + sigma * np.random.randn(*img.shape) denoised_wrong_multichannel = restoration.denoise_nl_means( imgn, 3, 4, 0.6 * sigma, fast_mode=True, multichannel=True) denoised_ok_multichannel = restoration.denoise_nl_means( imgn, 3, 4, 0.6 * sigma, fast_mode=True, multichannel=False) psnr_wrong = compare_psnr(img, denoised_wrong_multichannel) psnr_ok = compare_psnr(img, denoised_ok_multichannel) assert_(psnr_ok > psnr_wrong)
def test_denoise_bilateral_color(): img = checkerboard.copy()[:50, :50] # add some random noise img += 0.5 * img.std() * np.random.rand(*img.shape) img = np.clip(img, 0, 1) out1 = restoration.denoise_bilateral(img, sigma_color=0.1, sigma_spatial=10, multichannel=True) out2 = restoration.denoise_bilateral(img, sigma_color=0.2, sigma_spatial=20, multichannel=True) # make sure noise is reduced in the checkerboard cells assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std()) assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_wavelet_denoising(img, multichannel, convert2ycbcr): rstate = np.random.RandomState(1234) sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # Verify that SNR is improved when true sigma is used with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # Verify that SNR is improved with internally estimated sigma with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, multichannel=multichannel, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # SNR is improved less with 1 wavelet level than with the default. with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised_1 = restoration.denoise_wavelet(noisy, multichannel=multichannel, wavelet_levels=1, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) assert_(psnr_denoised > psnr_denoised_1) assert_(psnr_denoised_1 > psnr_noisy) # Test changing noise_std (higher threshold, so less energy in signal) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma, multichannel=multichannel, rescale_sigma=True) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): res2 = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel, rescale_sigma=True) assert_(np.sum(res1**2) <= np.sum(res2**2))
def test_wavelet_denoising_levels(rescale_sigma): rstate = np.random.RandomState(1234) ndim = 2 N = 256 wavelet = 'db1' # Generate a very simple test image img = 0.2*np.ones((N, )*ndim) img[(slice(5, 13), ) * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet, rescale_sigma=rescale_sigma) denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=1, rescale_sigma=rescale_sigma) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) # multi-level case should outperform single level case assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy) # invalid number of wavelet levels results in a ValueError or UserWarning max_level = pywt.dwt_max_level(np.min(img.shape), pywt.Wavelet(wavelet).dec_len) if Version(pywt.__version__) < '1.0.0': # exceeding max_level raises a ValueError in PyWavelets 0.4-0.5.2 with testing.raises(ValueError): with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=max_level + 1, rescale_sigma=rescale_sigma) else: # exceeding max_level raises a UserWarning in PyWavelets >= 1.0.0 with expected_warnings([ 'all coefficients will experience boundary effects']): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=max_level + 1, rescale_sigma=rescale_sigma) with testing.raises(ValueError): with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=-1, rescale_sigma=rescale_sigma)
def test_no_denoising_for_small_h(fast_mode, dtype): img = np.zeros((40, 40)) img[10:-10, 10:-10] = 1. img += 0.3*np.random.randn(*img.shape) img = img.astype(dtype) # very small h should result in no averaging with other patches denoised = restoration.denoise_nl_means(img, 7, 5, 0.01, fast_mode=fast_mode, multichannel=True) assert_(np.allclose(denoised, img)) denoised = restoration.denoise_nl_means(img, 7, 5, 0.01, fast_mode=fast_mode, multichannel=True) assert_(np.allclose(denoised, img))
def test_wavelet_denoising(): rstate = np.random.RandomState(1234) # version with one odd-sized dimension astro_gray_odd = astro_gray[:, :-1] astro_odd = astro[:, :-1] for img, multichannel, convert2ycbcr in [(astro_gray, False, False), (astro_gray_odd, False, False), (astro_odd, True, False), (astro_odd, True, True)]: sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # Verify that SNR is improved when true sigma is used with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel, convert2ycbcr=convert2ycbcr) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy) # Verify that SNR is improved with internally estimated sigma with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, multichannel=multichannel, convert2ycbcr=convert2ycbcr) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy) # SNR is improved less with 1 wavelet level than with the default. denoised_1 = restoration.denoise_wavelet(noisy, multichannel=multichannel, wavelet_levels=1, convert2ycbcr=convert2ycbcr) psnr_denoised_1 = compare_psnr(img, denoised_1) assert_(psnr_denoised > psnr_denoised_1) assert_(psnr_denoised_1 > psnr_noisy) # Test changing noise_std (higher threshold, so less energy in signal) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma, multichannel=multichannel) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): res2 = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel) assert_(np.sum(res1**2) <= np.sum(res2**2))
def test_denoise_nl_means_multichannel(fast_mode, dtype): # for true 3D data, 3D denoising is better than denoising as 2D+channels img = np.zeros((13, 10, 8), dtype=dtype) img[6, 4:6, 2:-2] = 1. sigma = 0.3 imgn = img + sigma * np.random.randn(*img.shape) imgn = imgn.astype(dtype) denoised_wrong_multichannel = restoration.denoise_nl_means( imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=True) denoised_ok_multichannel = restoration.denoise_nl_means( imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=False) psnr_wrong = peak_signal_noise_ratio(img, denoised_wrong_multichannel) psnr_ok = peak_signal_noise_ratio(img, denoised_ok_multichannel) assert_(psnr_ok > psnr_wrong)
def test_range(): """Output of edge detection should be in [0, 1]""" image = np.random.random((100, 100)) for detector in (filters.sobel, filters.scharr, filters.prewitt, filters.roberts): out = detector(image) assert_(out.min() >= 0, "Minimum of `{0}` is smaller than zero".format( detector.__name__) ) assert_(out.max() <= 1, "Maximum of `{0}` is larger than 1".format( detector.__name__) )
def test_denoise_tv_chambolle_weighting(): # make sure a specified weight gives consistent results regardless of # the number of input image dimensions rstate = np.random.RandomState(1234) img2d = astro_gray.copy() img2d += 0.15 * rstate.standard_normal(img2d.shape) img2d = np.clip(img2d, 0, 1) # generate 4D image by tiling img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2)) w = 0.2 denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w) denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w) assert_(structural_similarity(denoised_2d, denoised_4d[:, :, 0, 0]) > 0.99)
def test_denoise_nl_means_2d(): img = np.zeros((40, 40)) img[10:-10, 10:-10] = 1. sigma = 0.3 img += sigma * np.random.randn(*img.shape) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=True, multichannel=True, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std()) denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=False, multichannel=True, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std())
def test_range(): """Output of edge detection should be in [0, 1]""" image = np.random.random((100, 100)) for detector in (filters.sobel, filters.scharr, filters.prewitt, filters.roberts, filters.farid): out = detector(image) assert_(out.min() >= 0, "Minimum of `{0}` is smaller than zero".format( detector.__name__) ) assert_(out.max() <= 1, "Maximum of `{0}` is larger than 1".format( detector.__name__) )
def test_denoise_nl_means_3d(): img = np.zeros((20, 20, 10)) img[5:-5, 5:-5, 3:-3] = 1. sigma = 0.3 img += sigma * np.random.randn(*img.shape) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(img, 5, 4, 0.2, fast_mode=True, multichannel=False, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std()) denoised = restoration.denoise_nl_means(img, 5, 4, 0.2, fast_mode=False, multichannel=False, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std())
def test_denoise_nl_means_multichannel(): img = np.zeros((21, 20, 10)) img[10, 9:11, 2:-2] = 1. img += 0.3 * np.random.randn(*img.shape) denoised_wrong_multichannel = restoration.denoise_nl_means( img, 5, 4, 0.1, fast_mode=True, multichannel=True) denoised_ok_multichannel = restoration.denoise_nl_means(img, 5, 4, 0.1, fast_mode=True, multichannel=False) snr_wrong = 10 * np.log10(1. / ((denoised_wrong_multichannel - img)**2).mean()) snr_ok = 10 * np.log10(1. / ((denoised_ok_multichannel - img)**2).mean()) assert_(snr_ok > snr_wrong)
def test_laplacian_pyramid_max_layers(): for downscale in [2, 3, 5, 7]: img = np.random.randn(32, 8) pyramid = pyramids.pyramid_laplacian(img, downscale=downscale, multichannel=False) max_layer = int(np.ceil(math.log(np.max(img.shape), downscale))) for layer, out in enumerate(pyramid): if layer < max_layer: # should not reach all axes as size 1 prior to final level assert_(np.max(out.shape) > 1) # total number of images is max_layer + 1 assert_equal(max_layer, layer) # final layer should be size 1 on all axes assert_array_equal((out.shape), (1, 1))
def test_clear_border_non_binary_3d(): image3d = np.array([ [[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], [3, 4, 3, 4, 2], [3, 3, 2, 1, 2]], [[1, 2, 3, 1, 2], [3, 3, 5, 4, 2], [3, 4, 5, 4, 2], [3, 3, 2, 1, 2]], [[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], [3, 4, 3, 4, 2], [3, 3, 2, 1, 2]], ]) result = clear_border(image3d) expected = np.array([ [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 5, 0, 0], [0, 0, 5, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], ]) assert_array_equal(result, expected) assert_(not np.all(image3d == result))
def test_unwrap_3d_all_masked(): # all elements masked image = np.ma.zeros((10, 10, 10)) image[:] = np.ma.masked unwrap = unwrap_phase(image) assert_(np.ma.isMaskedArray(unwrap)) assert_(np.all(unwrap.mask)) # 1 unmasked element, still zero edges image = np.ma.zeros((10, 10, 10)) image[:] = np.ma.masked image[0, 0, 0] = 0 unwrap = unwrap_phase(image) assert_(np.ma.isMaskedArray(unwrap)) assert_(np.sum(unwrap.mask) == 999) # all but one masked assert_(unwrap[0, 0, 0] == 0)
def test_denoise_tv_chambolle_weighting(): # make sure a specified weight gives consistent results regardless of # the number of input image dimensions rstate = np.random.RandomState(1234) img2d = astro_gray.copy() img2d += 0.15 * rstate.standard_normal(img2d.shape) img2d = np.clip(img2d, 0, 1) # generate 4D image by tiling img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2)) w = 0.2 denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w) denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w) assert_(measure.compare_ssim(denoised_2d, denoised_4d[:, :, 0, 0]) > 0.99)
def test_wavelet_denoising(img, multichannel, convert2ycbcr): rstate = np.random.RandomState(1234) sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) channel_axis = -1 if multichannel else None # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # Verify that SNR is improved with internally estimated sigma denoised = restoration.denoise_wavelet(noisy, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # SNR is improved less with 1 wavelet level than with the default. denoised_1 = restoration.denoise_wavelet(noisy, channel_axis=channel_axis, wavelet_levels=1, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) assert_(psnr_denoised > psnr_denoised_1) assert_(psnr_denoised_1 > psnr_noisy) # Test changing noise_std (higher threshold, so less energy in signal) res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma, channel_axis=channel_axis, rescale_sigma=True) res2 = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, rescale_sigma=True) assert_(np.sum(res1**2) <= np.sum(res2**2))
def test_denoise_nl_means_3d(): img = np.zeros((12, 12, 8)) img[5:-5, 5:-5, 2:-2] = 1. sigma = 0.3 imgn = img + sigma * np.random.randn(*img.shape) psnr_noisy = compare_psnr(img, imgn) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, fast_mode=True, multichannel=False, sigma=s) # make sure noise is reduced assert_(compare_psnr(img, denoised) > psnr_noisy) denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, fast_mode=False, multichannel=False, sigma=s) # make sure noise is reduced assert_(compare_psnr(img, denoised) > psnr_noisy)
def test_calibrate_denoiser_extra_output(): parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2} _, (parameters_tested, losses) = calibrate_denoiser(noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges, extra_output=True) all_denoised = [ _invariant_denoise(noisy_img, _denoise_wavelet, denoiser_kwargs=denoiser_kwargs) for denoiser_kwargs in parameters_tested ] ground_truth_losses = [mse(img, test_img) for img in all_denoised] assert_(np.argmin(losses) == np.argmin(ground_truth_losses))
def test_denoise_nl_means_3d(fast_mode, dtype): img = np.zeros((12, 12, 8), dtype=dtype) img[5:-5, 5:-5, 2:-2] = 1. sigma = 0.3 imgn = img + sigma * np.random.randn(*img.shape) imgn = imgn.astype(dtype) psnr_noisy = peak_signal_noise_ratio(img, imgn) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, fast_mode=fast_mode, multichannel=False, sigma=s) # make sure noise is reduced assert_(peak_signal_noise_ratio(img, denoised) > psnr_noisy)
def test_denoise_tv_bregman_float_result_range(): # astronaut image img = astro_gray.copy() int_astro = np.multiply(img, 255).astype(np.uint8) assert_(np.max(int_astro) > 1) denoised_int_astro = restoration.denoise_tv_bregman(int_astro, weight=60.0) # test if the value range of output float data is within [0.0:1.0] assert_(denoised_int_astro.dtype == np.float) assert_(np.max(denoised_int_astro) <= 1.0) assert_(np.min(denoised_int_astro) >= 0.0)
def test_denoise_tv_chambolle_float_result_range(): # astronaut image img = astro_grayT int_astroT = torch.mul(img, 255).type(torch.uint8) assert_(torch.max(int_astroT) > 1) denoised_int_astroT = denoise_tv_chambolle_torch(int_astroT, weight=0.1) # test if the value range of output float data is within [0.0:1.0] assert_((denoised_int_astroT.numpy()).dtype == np.float) assert_(torch.max(denoised_int_astroT) <= 1.0) assert_(torch.min(denoised_int_astroT) >= 0.0)
def test_wavelet_denoising_levels(): rstate = np.random.RandomState(1234) ndim = 2 N = 256 wavelet = 'db1' # Generate a very simple test image img = 0.2*np.ones((N, )*ndim) img[(slice(5, 13), ) * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet) denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=1) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) psnr_denoised_1 = compare_psnr(img, denoised_1) # multi-level case should outperform single level case assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy) # invalid number of wavelet levels results in a ValueError or UserWarning max_level = pywt.dwt_max_level(np.min(img.shape), pywt.Wavelet(wavelet).dec_len) if Version(pywt.__version__) < '1.0.0': # exceeding max_level raises a ValueError in PyWavelets 0.4-0.5.2 with testing.raises(ValueError): with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=max_level + 1) else: # exceeding max_level raises a UserWarning in PyWavelets >= 1.0.0 with expected_warnings([ 'all coefficients will experience boundary effects']): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=max_level + 1) with testing.raises(ValueError): with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]): restoration.denoise_wavelet( noisy, wavelet=wavelet, wavelet_levels=-1)
def test_unwrap_2d_all_masked(): # Segmentation fault when image is masked array with a all elements masked # GitHub issue #1347 # all elements masked image = np.ma.zeros((10, 10)) image[:] = np.ma.masked unwrap = unwrap_phase(image) assert_(np.ma.isMaskedArray(unwrap)) assert_(np.all(unwrap.mask)) # 1 unmasked element, still zero edges image = np.ma.zeros((10, 10)) image[:] = np.ma.masked image[0, 0] = 0 unwrap = unwrap_phase(image) assert_(np.ma.isMaskedArray(unwrap)) assert_(np.sum(unwrap.mask) == 99) # all but one masked assert_(unwrap[0, 0] == 0)
def test_denoise_nl_means_2drgb(): # reduce image size because nl means is very slow img = np.copy(astro[:50, :50]) # add some random noise sigma = 0.5 img += sigma * img.std() * np.random.random(img.shape) img = np.clip(img, 0, 1) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(img, 7, 9, 0.3, fast_mode=True, multichannel=True, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std()) denoised = restoration.denoise_nl_means(img, 7, 9, 0.3, fast_mode=False, multichannel=True, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std())
def test_denoise_tv_chambolle_weighting(): # make sure a specified weight gives consistent results regardless of # the number of input image dimensions rstate = np.random.RandomState(1234) img2d = astro_gray.copy() img2d += 0.15 * rstate.standard_normal(img2d.shape) img2d = np.clip(img2d, 0, 1) # generate 4D image by tiling img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2)) img2dT = torch.tensor(img2d) img4dT = torch.tensor(img4d) w = 0.2 denoised_2d = denoise_tv_chambolle_torch(img2dT, weight=w) denoised_4d = denoise_tv_chambolle_torch(img4dT, weight=w) assert_( measure.compare_ssim(denoised_2d.numpy(), denoised_4d[:, :, 0, 0].numpy()) > 0.99)
def test_wavelet_denoising_channel_axis(channel_axis, convert2ycbcr): rstate = np.random.RandomState(1234) sigma = 0.1 img = astro_odd noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) img = np.moveaxis(img, -1, channel_axis) noisy = np.moveaxis(noisy, -1, channel_axis) # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy)
def assert_phase_almost_equal(a, b, *args, **kwargs): """An assert_almost_equal insensitive to phase shifts of n*2*pi.""" shift = 2 * np.pi * np.round((b.mean() - a.mean()) / (2 * np.pi)) with warnings.catch_warnings(): warnings.simplefilter("ignore") print('assert_phase_allclose, abs', np.max(np.abs(a - (b - shift)))) print('assert_phase_allclose, rel', np.max(np.abs((a - (b - shift)) / a))) if np.ma.isMaskedArray(a): assert_(np.ma.isMaskedArray(b)) assert_array_equal(a.mask, b.mask) au = np.asarray(a) bu = np.asarray(b) with warnings.catch_warnings(): warnings.simplefilter("ignore") print('assert_phase_allclose, no mask, abs', np.max(np.abs(au - (bu - shift)))) print('assert_phase_allclose, no mask, rel', np.max(np.abs((au - (bu - shift)) / au))) assert_array_almost_equal_nulp(a + shift, b, *args, **kwargs)
def test_denoise_bilateral_multichannel_deprecation(): img = checkerboard.copy()[:50, :50] # add some random noise img += 0.5 * img.std() * np.random.rand(*img.shape) img = np.clip(img, 0, 1) with expected_warnings(["`multichannel` is a deprecated argument"]): out1 = restoration.denoise_bilateral(img, sigma_color=0.1, sigma_spatial=10, multichannel=True) with expected_warnings(["`multichannel` is a deprecated argument"]): out2 = restoration.denoise_bilateral(img, sigma_color=0.2, sigma_spatial=20, multichannel=True) # make sure noise is reduced in the checkerboard cells assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std()) assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_wavelet_denoising_nd(): rstate = np.random.RandomState(1234) for method in ['VisuShrink', 'BayesShrink']: for ndim in range(1, 5): # Generate a very simple test image if ndim < 3: img = 0.2*np.ones((128, )*ndim) else: img = 0.2*np.ones((16, )*ndim) img[[slice(5, 13), ] * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # Verify that SNR is improved with internally estimated sigma denoised = restoration.denoise_wavelet(noisy, method=method) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy)
def assert_phase_almost_equal(a, b, *args, **kwargs): """An assert_almost_equal insensitive to phase shifts of n*2*pi.""" shift = 2 * np.pi * np.round((b.mean() - a.mean()) / (2 * np.pi)) with expected_warnings([r'invalid value encountered|\A\Z', r'divide by zero encountered|\A\Z']): print('assert_phase_allclose, abs', np.max(np.abs(a - (b - shift)))) print('assert_phase_allclose, rel', np.max(np.abs((a - (b - shift)) / a))) if np.ma.isMaskedArray(a): assert_(np.ma.isMaskedArray(b)) assert_array_equal(a.mask, b.mask) assert_(a.fill_value == b.fill_value) au = np.asarray(a) bu = np.asarray(b) with expected_warnings([r'invalid value encountered|\A\Z', r'divide by zero encountered|\A\Z']): print('assert_phase_allclose, no mask, abs', np.max(np.abs(au - (bu - shift)))) print('assert_phase_allclose, no mask, rel', np.max(np.abs((au - (bu - shift)) / au))) assert_array_almost_equal_nulp(a + shift, b, *args, **kwargs)
def test_denoise_nl_means_2d_multichannel(): # reduce image size because nl means is slow img = np.copy(astro[:50, :50]) img = np.concatenate((img, ) * 2, ) # 6 channels # add some random noise sigma = 0.1 imgn = img + sigma * np.random.standard_normal(img.shape) imgn = np.clip(imgn, 0, 1) for fast_mode in [True, False]: for s in [sigma, 0]: for n_channels in [2, 3, 6]: psnr_noisy = compare_psnr(img[..., :n_channels], imgn[..., :n_channels]) denoised = restoration.denoise_nl_means(imgn[..., :n_channels], 3, 5, h=0.75 * sigma, fast_mode=fast_mode, multichannel=True, sigma=s) psnr_denoised = compare_psnr(denoised[..., :n_channels], img[..., :n_channels]) # make sure noise is reduced assert_(psnr_denoised > psnr_noisy)
def test_wavelet_threshold(): rstate = np.random.RandomState(1234) img = astro_gray sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # employ a single, user-specified threshold instead of BayesShrink sigmas denoised = _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=sigma) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy) # either method or threshold must be defined with testing.raises(ValueError): _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None) # warns if a threshold is provided in a case where it would be ignored with expected_warnings(["Thresholding method "]): _wavelet_threshold(noisy, wavelet='db1', method='BayesShrink', threshold=sigma)
def test_denoise_nl_means_2d(fast_mode): img = np.zeros((40, 40)) img[10:-10, 10:-10] = 1. sigma = 0.3 img += sigma * np.random.randn(*img.shape) img_f32 = img.astype('float32') for s in [sigma, 0]: denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=fast_mode, multichannel=False, sigma=s) # make sure noise is reduced assert_(img.std() > denoised.std()) denoised_f32 = restoration.denoise_nl_means(img_f32, 7, 5, 0.2, fast_mode=fast_mode, multichannel=False, sigma=s) # make sure noise is reduced assert_(img.std() > denoised_f32.std()) # Sheck single precision result assert np.allclose(denoised_f32, denoised, atol=1e-2)
def test_denoise_tv_chambolle_2d(): # astronaut image img = astro_gray.copy() # add noise to astronaut img += 0.5 * img.std() * np.random.rand(*img.shape) # clip noise so that it does not exceed allowed range for float images. img = np.clip(img, 0, 1) # denoise denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1) # which dtype? assert_(denoised_astro.dtype in [np.float, np.float32, np.float64]) from scipy import ndimage as ndi grad = ndi.morphological_gradient(img, size=((3, 3))) grad_denoised = ndi.morphological_gradient(denoised_astro, size=((3, 3))) # test if the total variation has decreased assert_(grad_denoised.dtype == np.float) assert_(np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()))
def test_unwrap_3d_middle_wrap_around(): # Segmentation fault in 3D unwrap phase with middle dimension connected # GitHub issue #1171 image = np.zeros((20, 30, 40), dtype=np.float32) unwrap = unwrap_phase(image, wrap_around=[False, True, False]) assert_(np.all(unwrap == 0))
def test_unwrap_2d_compressed_mask(): # ValueError when image is masked array with a compressed mask (no masked # elments). GitHub issue #1346 image = np.ma.zeros((10, 10)) unwrap = unwrap_phase(image) assert_(np.all(unwrap == 0))
def test_denoise_tv_chambolle_4d(): """ TV denoising for a 4D input.""" im = 255 * np.random.rand(8, 8, 8, 8) res = restoration.denoise_tv_chambolle(im.astype(np.uint8), weight=0.1) assert_(res.dtype == np.float) assert_(res.std() * 255 < im.std())
def test_cycle_spinning_multichannel(): sigma = 0.1 rstate = np.random.RandomState(1234) for multichannel in True, False: if multichannel: img = astro # can either omit or be 0 along the channels axis valid_shifts = [1, (0, 1), (1, 0), (1, 1), (1, 1, 0)] # can either omit or be 1 on channels axis. valid_steps = [1, 2, (1, 2), (1, 2, 1)] # too few or too many shifts or non-zero shift on channels invalid_shifts = [(1, 1, 2), (1, ), (1, 1, 0, 1)] # too few or too many shifts or any shifts <= 0 invalid_steps = [(1, ), (1, 1, 1, 1), (0, 1), (-1, -1)] else: img = astro_gray valid_shifts = [1, (0, 1), (1, 0), (1, 1)] valid_steps = [1, 2, (1, 2)] invalid_shifts = [(1, 1, 2), (1, )] invalid_steps = [(1, ), (1, 1, 1), (0, 1), (-1, -1)] noisy = img.copy() + 0.1 * rstate.randn(*(img.shape)) denoise_func = restoration.denoise_wavelet func_kw = dict(sigma=sigma, multichannel=multichannel) # max_shifts=0 is equivalent to just calling denoise_func with expected_warnings([PYWAVELET_ND_INDEXING_WARNING, DASK_NOT_INSTALLED_WARNING]): dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=0, func_kw=func_kw, multichannel=multichannel) dn = denoise_func(noisy, **func_kw) assert_equal(dn, dn_cc) # denoising with cycle spinning will give better PSNR than without for max_shifts in valid_shifts: with expected_warnings([PYWAVELET_ND_INDEXING_WARNING, DASK_NOT_INSTALLED_WARNING]): dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=max_shifts, func_kw=func_kw, multichannel=multichannel) assert_(compare_psnr(img, dn_cc) > compare_psnr(img, dn)) for shift_steps in valid_steps: with expected_warnings([PYWAVELET_ND_INDEXING_WARNING, DASK_NOT_INSTALLED_WARNING]): dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=2, shift_steps=shift_steps, func_kw=func_kw, multichannel=multichannel) assert_(compare_psnr(img, dn_cc) > compare_psnr(img, dn)) for max_shifts in invalid_shifts: with testing.raises(ValueError): dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=max_shifts, func_kw=func_kw, multichannel=multichannel) for shift_steps in invalid_steps: with testing.raises(ValueError): dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=2, shift_steps=shift_steps, func_kw=func_kw, multichannel=multichannel)