def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft'): """Performs wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the SureShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ coeffs = pywt.wavedecn(img, wavelet=wavelet) detail_coeffs = coeffs[-1]['d' * img.ndim] if sigma is None: # Estimates via the noise via method in [2] sigma = np.median(np.abs(detail_coeffs)) / 0.67448975019608171 if threshold is None: # The BayesShrink threshold from [1]_ in docstring threshold = sigma**2 / np.sqrt(max(img.var() - sigma**2, 0)) denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in coeffs[1:]] denoised_root = pywt.threshold(coeffs[0], value=threshold, mode=mode) denoised_coeffs = [denoised_root] + [d for d in denoised_detail] return pywt.waverecn(denoised_coeffs, wavelet)
def uncompress_image(compressed_image, level, im_shape): # Breaking Compressed Wavlet of Image into RGB Layers (Wavlet Compression works on 2D arrays only) arr_r_compressed = compressed_image[:, :, 0] arr_g_compressed = compressed_image[:, :, 1] arr_b_compressed = compressed_image[:, :, 2] x_shape, y_shape = im_shape slices = generate_wavlet_slices(x_shape, y_shape, level=level) # Converting Compressed Coefficients back into Original Wavlet form (which the pywt library can work with) r_coeff = pywt.array_to_coeffs(arr_r_compressed, slices, output_format='wavedecn') g_coeff = pywt.array_to_coeffs(arr_g_compressed, slices, output_format='wavedecn') b_coeff = pywt.array_to_coeffs(arr_b_compressed, slices, output_format='wavedecn') # Using PYWT to uncompress the wavlet coefficients back into their image form r_reconstructed = pywt.waverecn(r_coeff, 'db2', mode='periodization').astype(np.uint8) g_reconstructed = pywt.waverecn(g_coeff, 'db2', mode='periodization').astype(np.uint8) b_reconstructed = pywt.waverecn(b_coeff, 'db2', mode='periodization').astype(np.uint8) # Stacking the uncompressed R. G. B. images into the final RGB image reconstructed_im = (np.stack( (r_reconstructed, g_reconstructed, b_reconstructed), 2)).astype(np.uint8) return reconstructed_im
def wavelet_based_BG_subtraction(image, num_levels, noise_lvl): coeffs = wavedecn(image, 'db1', level=None) #decomposition coeffs2 = coeffs.copy() for BGlvl in range(1, num_levels): coeffs[-BGlvl] = { k: np.zeros_like(v) for k, v in coeffs[-BGlvl].items() } #set lvl 1 details to zero Background = waverecn(coeffs, 'db1') #reconstruction del coeffs BG_unfiltered = Background Background = gaussian_filter( Background, sigma=2**num_levels) #gaussian filter sigma = 2^#lvls coeffs2[0] = np.ones_like(coeffs2[0]) #set approx to one (constant) for lvl in range(1, np.size(coeffs2) - noise_lvl): coeffs2[lvl] = {k: np.zeros_like(v) for k, v in coeffs2[lvl].items() } #keep first detail lvl only Noise = waverecn(coeffs2, 'db1') #reconstruction del coeffs2 return Background, Noise, BG_unfiltered
def test_waverecn_empty_coeff(): coeffs = [np.ones((2, 2, 2)), {}, {}] assert_equal(pywt.waverecn(coeffs, 'db1').shape, (8, 8, 8)) assert_equal(pywt.waverecn(coeffs, 'db1').shape, (8, 8, 8)) coeffs = [np.ones((2, 2, 2)), {}, {'daa': np.ones((4, 4, 4))}] coeffs = [np.ones((2, 2, 2)), {}, {}, {'daa': np.ones((8, 8, 8))}] assert_equal(pywt.waverecn(coeffs, 'db1').shape, (16, 16, 16))
def inverse(self, m, wv = 'db4'): msyn = np.zeros(mesh.nC) coeff_wv = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per') array_wv = pywt.coeffs_to_array(coeff_wv) coeff_back = pywt.array_to_coeffs(m.reshape(array_wv[0].shape, order = 'F'),array_wv[1]) coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per') return Utils.mkvc(coeff_m)
def apply_dwt_filter(y, dwt_type, dwt_level, dwt_thresh_func, dwt_thresh_type): coeffs = pywt.wavedecn(y, dwt_type, level=dwt_level) for i in range(1, dwt_level + 1): coeffs[i]["d"] = pywt.threshold( coeffs[i]["d"], thselect(coeffs[i]["d"], dwt_thresh_type), dwt_thresh_func) return (pywt.waverecn(coeffs, dwt_type))
def dwt(self): #imLists = [IMAGEPATH + "a01_1.tif", IMAGEPATH + "a01_2.tif"] start = time.time() self.show_Image_DWt(self.image1) self.show_Image_DWt(self.image2) # show_DWt1D() coeffs = self.calculate_coeffs(self.image1) arr, coeff_slices = pywt.coeffs_to_array(coeffs) #show_DWt1D(coeffs) # show_Image_DWt(coeffs,target) #self.show_DWt1D() coeffs2 = self.calculate_coeffs(self.image2) arr2, coeff_slices2 = pywt.coeffs_to_array(coeffs2) startPCA = time.time() # p=PCA() #array = p.PCA(arr, arr2)#FusionMethod f = Fusion(arr, arr2) array = f.PCA() endPCA = time.time() print('Gesamtzeit PCA: {:5.3f}s'.format(endPCA - startPCA)) coeffs_from_arr = pywt.array_to_coeffs(array, coeff_slices, output_format='wavedecn') #self.show_DWt1D() self.target = pywt.waverecn(coeffs_from_arr, wavelet='db1') self.plot() end = time.time() print('Gesamtzeit DWT: {:5.3f}s'.format((end - start) - (endPCA - startPCA))) print('Gesamtzeit DWT and PCA: {:5.3f}s'.format(end - start)) return self.target
def decomp(cA, wavelet, levels, mode='constant', omissions=([], False)): """ n-dimensional discrete wavelet decompisition and reconstruction Parameters ---------- cA : array_like n-dimensional array with input data. wavelet : Wavelet object or name string Wavelet to use. levels : int The number of decomposition steps to perform. mode : string, optional The mode of signal padding, defaults to constant omissions: tuple(list, bool), optional List of DETAIL levels to omit, if bool is true omit cA Returns ------- nD array of reconstructed data. """ if omissions[0] and max(omissions[0]) > levels: raise ValueError( "Omission level %d is too high. Maximum allowed is %d." % (max(omissions[0]), levels)) coeffs = pywt.wavedecn(cA, wavelet, level=levels, mode=mode) coeffs = omit(coeffs, omissions) return pywt.waverecn(coeffs, wavelet, mode=mode)
def calc_wavelet_th(line, wavelet_name, count_process, max_process, sgm_n, coef_m, smoothing): len_line = len(line) rep_line = line[::-1] + line[::] + line[::-1] coeffs = pywt.wavedecn(rep_line, wavelet_name[count_process]) list_, sep_ = pywt.coeffs_to_array(coeffs) m_max = int(np.log2(len(list_))) ### m(frequency) filter ### for m in range(1, m_max + 1): for n in range(2**(m_max - m), 2**(m_max - m) * 2): if m > m_max * coef_m[count_process] and n != 0: list_[n] = 0 lambda_ = np.sqrt(2.0 * np.log( len(rep_line))) * sgm_n[count_process] ### n(location) filter ## # plt.plot(np.arange(len(list_)),list_);plt.hlines(lambda_,xmin=0,xmax=len(list_)); list_ = [ x if (i < len(list_) * 0.01 or np.abs(x) > lambda_) else 0 for i, x in enumerate(list_) ] # plt.plot(np.arange(len(list_)),list_);plt.hlines(-lambda_,xmin=0,xmax=len(list_));plt.show(); coeffs = pywt.array_to_coeffs(list_, sep_) reline = pywt.waverecn(coeffs, wavelet_name[count_process]) # plt.plot(np.arange(len(reline)),reline);plt.plot(np.arange(len(rep_line)),rep_line);plt.show(); if count_process != max_process - 1: res = calc_wavelet_th(list(reline[len_line:len_line * 2]), wavelet_name, count_process + 1, max_process, sgm_n, coef_m, smoothing) else: if smoothing: res = calc_wavelet_smooth(list(reline[len_line:len_line * 2]), 'Haar', 3) else: res = list(reline[len_line:len_line * 2]) return res
def adj_op(self, coeffs): """ Define the wavelet adjoint operator. This method returns the reconstructed image. Parameters ---------- coeffs: np.ndarray the wavelet coefficients. Returns ------- data: np.ndarray((m, n)) or np.ndarray((m, n, p)) the 2D or 3D reconstructed data. """ self.coeffs = coeffs if self.undecimated: coeffs_dict = self.unflatten(coeffs, self.coeffs_shape) data = pywt.iswtn(coeffs_dict, self.pywt_transform) else: coeffs_dict = self.unflatten(coeffs, self.coeffs_shape) data = pywt.waverecn(coeffs=coeffs_dict, wavelet=self.pywt_transform, mode=self.mode) return data
def test_wavedecn_coeff_reshape_axes_subset(): # verify round trip is correct when only a subset of axes are transformed: # wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn # This is done for wavedec{1, 2, n} rng = np.random.RandomState(1234) mode = 'symmetric' w = pywt.Wavelet('db2') N = 16 ndim = 3 for axes in [(-1, ), (0, ), (1, ), (0, 1), (1, 2), (0, 2), None]: x1 = rng.randn(*([N] * ndim)) coeffs = pywt.wavedecn(x1, w, mode=mode, axes=axes) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, axes=axes) if axes is not None: # if axes is not None, it must be provided to coeffs_to_array assert_raises(ValueError, pywt.coeffs_to_array, coeffs) # mismatched axes size assert_raises(ValueError, pywt.coeffs_to_array, coeffs, axes=(0, 1, 2, 3)) assert_raises(ValueError, pywt.coeffs_to_array, coeffs, axes=()) coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices) x1r = pywt.waverecn(coeffs2, w, mode=mode, axes=axes) assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def _transform(self, m, wv = 'db4'): msyn = np.zeros(mesh.nC) coeff_map = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per') array_map = pywt.coeffs_to_array(coeff_map) coeff_map = pywt.array_to_coeffs(m.reshape(array_map[0].shape,order= 'F'),array_map[1]) coeff_back_map = pywt.waverecn(coeff_map,wv, mode = 'per') return Utils.mkvc(coeff_back_map)
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft'): """Performs wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the SureShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ coeffs = pywt.wavedecn(img, wavelet=wavelet) detail_coeffs = coeffs[-1]['d' * img.ndim] if sigma is None: # Estimates via the noise via method in [2] sigma = np.median(np.abs(detail_coeffs)) / 0.67448975019608171 if threshold is None: # The BayesShrink threshold from [1]_ in docstring threshold = sigma**2 / np.sqrt(max(img.var() - sigma**2, 0)) denoised_detail = [{ key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level } for level in coeffs[1:]] denoised_root = pywt.threshold(coeffs[0], value=threshold, mode=mode) denoised_coeffs = [denoised_root] + [d for d in denoised_detail] return pywt.waverecn(denoised_coeffs, wavelet)
def test_waverecn(): rstate = np.random.RandomState(1234) # test 1D through 4D cases for nd in range(1, 5): x = rstate.randn(*(4, ) * nd) coeffs = pywt.wavedecn(x, 'db1') assert_(len(coeffs) == 3) assert_allclose(pywt.waverecn(coeffs, 'db1'), x, rtol=tol_double)
def test_waverecn_axes_subsets(): rstate = np.random.RandomState(0) data = rstate.standard_normal((8, 8, 8, 8)) # test all combinations of 3 out of 4 axes transformed for axes in combinations((0, 1, 2, 3), 3): coefs = pywt.wavedecn(data, 'haar', axes=axes) rec = pywt.waverecn(coefs, 'haar', axes=axes) assert_allclose(rec, data, atol=1e-14)
def test_waverecn_int_axis(): # waverecn should also work for axes as an integer rstate = np.random.RandomState(0) data = rstate.standard_normal((8, 8)) for axis in [0, 1]: coefs = pywt.wavedecn(data, 'haar', axes=axis) rec = pywt.waverecn(coefs, 'haar', axes=axis) assert_allclose(rec, data, atol=1e-14)
def iwtn(data_to_iwt, wavelet='db4', mode='per', dims=None, dimOpt=None, dimLenOpt=None): return pywt.waverecn(mat2wvlt(data_to_iwt, dims, dimOpt, dimLenOpt), wavelet, mode)
def test_waverecn(): rstate = np.random.RandomState(1234) # test 1D through 4D cases for nd in range(1, 5): x = rstate.randn(*(4, )*nd) coeffs = pywt.wavedecn(x, 'db1') assert_(len(coeffs) == 3) assert_allclose(pywt.waverecn(coeffs, 'db1'), x, rtol=tol_double)
def test_per_axis_wavelets_and_modes(): # tests seperate wavelet and edge mode for each axis. rstate = np.random.RandomState(1234) data = rstate.randn(24, 24, 16) # wavelet can be a string or wavelet object wavelets = (pywt.Wavelet('haar'), 'sym2', 'db2') # The default number of levels should be the minimum over this list max_levels = [ pywt._dwt.dwt_max_level(nd, nf) for nd, nf in zip(data.shape, wavelets) ] # mode can be a string or a Modes enum modes = ('symmetric', 'periodization', pywt._extensions._pywt.Modes.reflect) coefs = pywt.wavedecn(data, wavelets, modes) assert_allclose(pywt.waverecn(coefs, wavelets, modes), data, atol=1e-14) assert_equal(min(max_levels), len(coefs[1:])) coefs = pywt.wavedecn(data, wavelets[:1], modes) assert_allclose(pywt.waverecn(coefs, wavelets[:1], modes), data, atol=1e-14) coefs = pywt.wavedecn(data, wavelets, modes[:1]) assert_allclose(pywt.waverecn(coefs, wavelets, modes[:1]), data, atol=1e-14) # length of wavelets or modes doesn't match the length of axes assert_raises(ValueError, pywt.wavedecn, data, wavelets[:2]) assert_raises(ValueError, pywt.wavedecn, data, wavelets, mode=modes[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets, mode=modes[:2]) # dwt2/idwt2 also support per-axis wavelets/modes data2 = data[..., 0] coefs2 = pywt.wavedec2(data2, wavelets[:2], modes[:2]) assert_allclose(pywt.waverec2(coefs2, wavelets[:2], modes[:2]), data2, atol=1e-14) assert_equal(min(max_levels[:2]), len(coefs2[1:]))
def test_waverecn_all_wavelets_modes(): # test 2D case using all wavelets and modes rstate = np.random.RandomState(1234) r = rstate.randn(80, 96) for wavelet in wavelist: for mode in pywt.Modes.modes: coeffs = pywt.wavedecn(r, wavelet, mode=mode) assert_allclose(pywt.waverecn(coeffs, wavelet, mode=mode), r, rtol=tol_single, atol=tol_single)
def deriv(self, m, v=None, wv = 'db4'): if v is not None: coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per') array_wv = pywt.coeffs_to_array(coeff_wv) coeff_back = pywt.array_to_coeffs(v,array_wv[1]) coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per') return Utils.mkvc(coeff_m) else: print "not implemented"
def inverse_wavelet_transform(w_coeffs_rgb, coeff_slices, x_shape): x_hat = np.zeros(x_shape) for i in range(w_coeffs_rgb.shape[0]): w_coeffs_list = pywt.array_to_coeffs(w_coeffs_rgb[i, :, :], coeff_slices) x_hat[0, :, :, i] = pywt.waverecn(w_coeffs_list, wavelet='db4', mode='periodization') return x_hat
def reconstruct_local_maxima(frames, fraction_maxima=0.1, level_max=3, wavelet='haar'): print('Decomposing frames ..') coeffs = pywt.wavedecn(frames, wavelet, level=level_max) filtered_coeffs = filter_local_maxima(coeffs, fraction_maxima) print('Reconstructing frames ..') reconstructed = pywt.waverecn(filtered_coeffs, wavelet) return reconstructed
def _rmatvec(self, x): if self.reshape: x = np.reshape(x, self.dimsd) x = pywt.array_to_coeffs(x, self.sl, output_format='wavedecn') y = pywt.waverecn(x, wavelet=self.waveletadj, mode='periodization', axes=(self.dir, )) y = self.pad.rmatvec(y.ravel()) return y
def test_waverecn_accuracies(): # testing 3D only here rstate = np.random.RandomState(1234) x0 = rstate.randn(4, 4, 4) for dt, tol in dtypes_and_tolerances: x = x0.astype(dt) if np.iscomplexobj(x): x += 1j*rstate.randn(4, 4, 4).astype(x.real.dtype) coeffs = pywt.wavedecn(x.astype(dt), 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def test_waverecn_accuracies(): # testing 3D only here rstate = np.random.RandomState(1234) x0 = rstate.randn(4, 4, 4) for dt, tol in dtypes_and_tolerances: x = x0.astype(dt) if np.iscomplexobj(x): x += 1j * rstate.randn(4, 4, 4).astype(x.real.dtype) coeffs = pywt.wavedecn(x.astype(dt), 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def waveletDenoise(data): # data is num_neurons x time_frames wavelet = pywt.Wavelet('db4') # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = pywt.dwt_max_level(data.shape[1], wavelet) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) data_denoise = np.zeros(np.shape(data)) shift = 4 for c in np.arange(-shift, shift + 1): data_shift = np.roll(data, c, 1) for i in range(np.shape(data)[0]): coeffs = pywt.wavedecn(data_shift[i, :], wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] detail_coeffs = dcoeffs[-1]['d'] # rescaling using a single estimation of level noise based on first level coefficients. # Consider regions with detail coefficients exactly zero to be masked out # detail_coeffs = detail_coeffs[np.nonzero(detail_coeffs)] # 75th quantile of the underlying, symmetric noise distribution denom = scipy.stats.norm.ppf(0.75) sigma = np.median(np.abs(detail_coeffs)) / denom np.shape(sigma) sigma_mat = np.tile(sigma, (wavelet_levels, 1)) np.shape(sigma_mat) tot_num_coeffs = pywt.wavedecn_size(coeffs) # universal threshold threshold = np.sqrt(2 * np.log(tot_num_coeffs)) threshold = sigma * threshold denoised_detail = [{ key: pywt.threshold(level[key], value=threshold, mode='hard') for key in level } for level in dcoeffs] # Dict of unique threshold coefficients for each detail coeff. array denoised_coeffs = [coeffs[0]] + denoised_detail data_denoise[i, :] = data_denoise[i, :] + np.roll( pywt.waverecn(denoised_coeffs, wavelet), -c)[:data_denoise.shape[1]] data_denoise = data_denoise / (2 * shift + 1) return data_denoise
def calc_wavelet_smooth(line, wavelet_name, level=None): rep_line = line[::-1] + line[::] + line[::-1] + line[::] N = len(rep_line) coeffs = pywt.wavedecn(rep_line, wavelet_name, level=level) list_, sep_ = pywt.coeffs_to_array(coeffs) level = level if level is not None else int(np.log2(N)) list_ = [x if j < N / 2**level else 0 for j, x in enumerate(list_)] coeffs = pywt.array_to_coeffs(list_, sep_) smooth = pywt.waverecn(coeffs, wavelet_name) # plt.plot(np.arange(N),smooth,np.arange(N),rep_line);plt.title('name:{}, level:{}'.format(wavelet_name,level));plt.show(); len_line = len(line) return list(smooth[len_line:len_line * 2])
def test_per_axis_wavelets_and_modes(): # tests seperate wavelet and edge mode for each axis. rstate = np.random.RandomState(1234) data = rstate.randn(24, 24, 16) # wavelet can be a string or wavelet object wavelets = (pywt.Wavelet('haar'), 'sym2', 'db2') # The default number of levels should be the minimum over this list max_levels = [pywt._dwt.dwt_max_level(nd, nf) for nd, nf in zip(data.shape, wavelets)] # mode can be a string or a Modes enum modes = ('symmetric', 'periodization', pywt._extensions._pywt.Modes.reflect) coefs = pywt.wavedecn(data, wavelets, modes) assert_allclose(pywt.waverecn(coefs, wavelets, modes), data, atol=1e-14) assert_equal(min(max_levels), len(coefs[1:])) coefs = pywt.wavedecn(data, wavelets[:1], modes) assert_allclose(pywt.waverecn(coefs, wavelets[:1], modes), data, atol=1e-14) coefs = pywt.wavedecn(data, wavelets, modes[:1]) assert_allclose(pywt.waverecn(coefs, wavelets, modes[:1]), data, atol=1e-14) # length of wavelets or modes doesn't match the length of axes assert_raises(ValueError, pywt.wavedecn, data, wavelets[:2]) assert_raises(ValueError, pywt.wavedecn, data, wavelets, mode=modes[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets, mode=modes[:2]) # dwt2/idwt2 also support per-axis wavelets/modes data2 = data[..., 0] coefs2 = pywt.wavedec2(data2, wavelets[:2], modes[:2]) assert_allclose(pywt.waverec2(coefs2, wavelets[:2], modes[:2]), data2, atol=1e-14) assert_equal(min(max_levels[:2]), len(coefs2[1:]))
def task(dir, name): X, shape = load_images(dir + name + '/') arrays = [] coeff_slices = [] for (i, x) in enumerate(X): coeffs = pywt.wavedec(x, 'db14', level=3) arr, coeff_slice = pywt.coeffs_to_array(coeffs) arrays.append(arr) coeff_slices.append(coeff_slice) mer = marge(arrays) coeffs_from_arr = pywt.array_to_coeffs(mer, coeff_slices[0]) cam_recon = pywt.waverecn(coeffs_from_arr, wavelet='db14') cv2.imwrite('out_' + name + '.jpg', cam_recon.reshape(shape))
def initialize_wl_operators(self): if self.use_decimated: H = lambda x: pywt.wavedecn(x, wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) Ht = lambda x: pywt.waverecn(x, wavelet=self.wl_type, axes=self.axes) else: if use_swtn: H = lambda x: pywt.swtn(x, wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) Ht = lambda x: pywt.iswtn(x, wavelet=self.wl_type, axes=self.axes) else: H = lambda x: pywt.swt2(np.squeeze(x), wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) # Ht = lambda x : pywt.iswt2(x, wavelet=self.wl_type) Ht = lambda x: pywt.iswt2(x, wavelet=self.wl_type)[np.newaxis, ...] return (H, Ht)
def test_multilevel_dtypes_nd(): wavelet = pywt.Wavelet('haar') for dt_in, dt_out in zip(dtypes_in, dtypes_out): # wavedecn, waverecn x = np.ones((8, 8), dtype=dt_in) errmsg = "wrong dtype returned for {0} input".format(dt_in) cA, coeffsD2, coeffsD1 = pywt.wavedecn(x, wavelet, level=2) assert_(cA.dtype == dt_out, "wavedecn: " + errmsg) for key, c in coeffsD1.items(): assert_(c.dtype == dt_out, "wavedecn: " + errmsg) for key, c in coeffsD2.items(): assert_(c.dtype == dt_out, "wavedecn: " + errmsg) x_roundtrip = pywt.waverecn([cA, coeffsD2, coeffsD1], wavelet) assert_(x_roundtrip.dtype == dt_out, "waverecn: " + errmsg)
def wavelet_denoising(NomDeLImage, Nlevel): image=su.PullFromSlicer(NomDeLImage) NumpyImage=sitk.GetArrayFromImage(image) max_lev = 6 # how many levels of decomposition to draw coeffs = pywt.wavedecn(NumpyImage, 'db2', mode='zero', level=max_lev) #voir https://pywavelets.readthedocs.io/en/latest/ref/nd-dwt-and-idwt.html#pywt.wavedecn for i in range(Nlevel-max_lev): coeffs[(max_lev-i)] = {k: np.zeros_like(v) for k, v in coeffs[(max_lev-i)].items()} #remove highest frequency coeffs[-(max_lev-i)] = {k: np.zeros_like(v) for k, v in coeffs[-(max_lev-i)].items()} #remove highest frequency matrice_ondelette=pywt.waverecn(coeffs, 'db2') #mode periodic ou zero image_ondelette=sitk.GetImageFromArray(matrice_ondelette) image_ondelette.SetSpacing(image.GetSpacing()) image_ondelette.SetDirection(image.GetDirection()) image_ondelette.SetOrigin(image.GetOrigin()) su.PushToSlicer(image_ondelette,'image_DenoisWave_level0-'+str(Nlevel))
def adj_op(self, x): if (self.wav == 'dirac'): return np.reshape(x, self.shape) if (self.wav == 'fourier'): return np.fft.ifftn(np.reshape(x, self.shape)) if (self.wav == "dct"): return scipy.fft.idctn(np.reshape(x, self.shape), norm='ortho') coeffs_from_arr = pywt.unravel_coeffs(x, self.coeff_slices, self.coeff_shapes, output_format='wavedecn') return pywt.waverecn(coeffs_from_arr, wavelet=self.wav, mode='periodic', axes=self.axes)
def PrintReconstructions(coeffs, n): arr, coeff_slices = pywt.coeffs_to_array(coeffs) #Removing Details for i in range(n,len(coeff_slices)): arr[coeff_slices[i]['ad']] = 0 arr[coeff_slices[i]['dd']] = 0 arr[coeff_slices[i]['da']] = 0 D1 = pywt.array_to_coeffs(arr, coeff_slices) dCat = pywt.waverecn(D1, wavelet) plt.figure() plt.title('Reconstructed with level %i of details' %(n-1)) plt.imshow(dCat,cmap=colormap) return
def test_waverecn_coeff_reshape_odd(): # verify round trip is correct: # wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn rng = np.random.RandomState(1234) x1 = rng.randn(35, 33) for mode in pywt.Modes.modes: for wave in ['haar', ]: w = pywt.Wavelet(wave) maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len) if maxlevel == 0: continue coeffs = pywt.wavedecn(x1, w, mode=mode) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs) coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices) x1r = pywt.waverecn(coeffs2, w, mode=mode) # truncate reconstructed values to original shape x1r = x1r[[slice(s) for s in x1.shape]] assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def apply_dwt_filter(y, dwt_type, dwt_level, dwt_thresh_func, dwt_thresh_type): coeffs = pywt.wavedecn(y, dwt_type, level=dwt_level) for i in range(1,dwt_level+1): coeffs[i]["d"] = pywt.threshold(coeffs[i]["d"], thselect(coeffs[i]["d"], dwt_thresh_type), dwt_thresh_func) return(pywt.waverecn(coeffs, dwt_type))
def time_waverecn(self, D, n, wavelet, dtype): pywt.waverecn(self.data, wavelet)
def inverse_wavelet_transform(w_coeffs_rgb, coeff_slices, x_shape): x_hat = np.zeros(x_shape) for i in range(w_coeffs_rgb.shape[0]): w_coeffs_list = pywt.array_to_coeffs(w_coeffs_rgb[i,:,:], coeff_slices) x_hat[0,:,:,i] = pywt.waverecn(w_coeffs_list, wavelet='db4', mode='periodization') return x_hat
def test_wavedecn_complex(): data = np.ones((4, 4, 4)) + 1j coeffs = pywt.wavedecn(data, 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), data, rtol=1e-12)
def test_waverecn_lists(): # support coefficient arrays specified as lists instead of arrays coeffs = [[[1.0]], {'ad': [[0.0]], 'da': [[0.0]], 'dd': [[0.0]]}] assert_equal(pywt.waverecn(coeffs, 'db1').shape, (2, 2))
def test_waverecn_dtypes(): x = np.ones((4, 4, 4)) for dt, tol in dtypes_and_tolerances: coeffs = pywt.wavedecn(x.astype(dt), 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the BayesShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. Notes ----- Reference [1]_ used four levels of wavelet decomposition. To be more flexible for a range of input sizes, the implementation here stops 3 levels prior to the maximum level of decomposition for `img` (the exact # of levels thus depends on `img.shape` and the chosen wavelet). BayesShrink variance estimation doesn't work well on levels with extremely small coefficient arrays. This is the rationale for skipping a few of the coarsest levels. The user can override the automated setting by explicitly specifying `wavelet_levels`. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ wavelet = pywt.Wavelet(wavelet) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for img dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in img.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(img, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * img.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if threshold is None: # The BayesShrink thresholds from [1]_ in docstring var = sigma**2 threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)
def _wavelet_threshold(image, wavelet, method=None, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet thresholding. Parameters ---------- image : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. method : {'BayesShrink', 'VisuShrink'}, optional Thresholding method to be used. The currently supported methods are "BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a user-specified ``threshold`` must be supplied instead. threshold : float, optional The thresholding value to apply during wavelet coefficient thresholding. The default value (None) uses the selected ``method`` to estimate appropriate threshold(s) for noise removal. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. :DOI:`10.1109/83.862633` .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. :DOI:`10.1093/biomet/81.3.425` """ wavelet = pywt.Wavelet(wavelet) if not wavelet.orthogonal: warn(("Wavelet thresholding was designed for use with orthogonal " "wavelets. For nonorthogonal wavelets such as {}, results are " "likely to be suboptimal.").format(wavelet.name)) # original_extent is used to workaround PyWavelets issue #80 # odd-sized input results in an image with 1 extra sample after waverecn original_extent = tuple(slice(s) for s in image.shape) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in image.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * image.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if method is not None and threshold is not None: warn(("Thresholding method {} selected. The user-specified threshold " "will be ignored.").format(method)) if threshold is None: var = sigma**2 if method is None: raise ValueError( "If method is None, a threshold must be provided.") elif method == "BayesShrink": # The BayesShrink thresholds from [1]_ in docstring threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] elif method == "VisuShrink": # The VisuShrink thresholds from [2]_ in docstring threshold = _universal_thresh(image, sigma) else: raise ValueError("Unrecognized method: {}".format(method)) if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]