def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft'): """Performs wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the SureShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ coeffs = pywt.wavedecn(img, wavelet=wavelet) detail_coeffs = coeffs[-1]['d' * img.ndim] if sigma is None: # Estimates via the noise via method in [2] sigma = np.median(np.abs(detail_coeffs)) / 0.67448975019608171 if threshold is None: # The BayesShrink threshold from [1]_ in docstring threshold = sigma**2 / np.sqrt(max(img.var() - sigma**2, 0)) denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in coeffs[1:]] denoised_root = pywt.threshold(coeffs[0], value=threshold, mode=mode) denoised_coeffs = [denoised_root] + [d for d in denoised_detail] return pywt.waverecn(denoised_coeffs, wavelet)
def setup(self, D, n, wavelet, dtype): try: from pywt import waverecn except ImportError: raise NotImplementedError("waverecn not available") super(WaverecnTimeSuite, self).setup(D, n, wavelet, dtype) self.data = pywt.wavedecn(self.data, wavelet)
def test_waverecn_axes_errors(): data = np.ones((8, 8, 8)) c = pywt.wavedecn(data, 'haar') # repeated axes not allowed assert_raises(ValueError, pywt.waverecn, c, 'haar', axes=(1, 1)) # out of range axis not allowed assert_raises(ValueError, pywt.waverecn, c, 'haar', axes=(0, 1, 3))
def test_wavedecn_shapes_and_size(): wav = pywt.Wavelet('db2') for data_shape in [(33, ), (64, 32), (1, 15, 30)]: for axes in [None, 0, -1]: for mode in pywt.Modes.modes: coeffs = pywt.wavedecn(np.ones(data_shape), wav, mode=mode, axes=axes) # verify that the shapes match the coefficient shapes shapes = pywt.wavedecn_shapes(data_shape, wav, mode=mode, axes=axes) assert_equal(coeffs[0].shape, shapes[0]) expected_size = coeffs[0].size for level in range(1, len(coeffs)): for k, v in coeffs[level].items(): expected_size += v.size assert_equal(shapes[level][k], v.shape) # size can be determined from either the shapes or coeffs size = pywt.wavedecn_size(shapes) assert_equal(size, expected_size) size = pywt.wavedecn_size(coeffs) assert_equal(size, expected_size)
def test_wavedecn_coeff_reshape_axes_subset(): # verify round trip is correct when only a subset of axes are transformed: # wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn # This is done for wavedec{1, 2, n} rng = np.random.RandomState(1234) mode = 'symmetric' w = pywt.Wavelet('db2') N = 16 ndim = 3 for axes in [(-1, ), (0, ), (1, ), (0, 1), (1, 2), (0, 2), None]: x1 = rng.randn(*([N] * ndim)) coeffs = pywt.wavedecn(x1, w, mode=mode, axes=axes) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, axes=axes) if axes is not None: # if axes is not None, it must be provided to coeffs_to_array assert_raises(ValueError, pywt.coeffs_to_array, coeffs) # mismatched axes size assert_raises(ValueError, pywt.coeffs_to_array, coeffs, axes=(0, 1, 2, 3)) assert_raises(ValueError, pywt.coeffs_to_array, coeffs, axes=()) coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices) x1r = pywt.waverecn(coeffs2, w, mode=mode, axes=axes) assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_waverecn_axes_subsets(): rstate = np.random.RandomState(0) data = rstate.standard_normal((8, 8, 8, 8)) # test all combinations of 3 out of 4 axes transformed for axes in combinations((0, 1, 2, 3), 3): coefs = pywt.wavedecn(data, 'haar', axes=axes) rec = pywt.waverecn(coefs, 'haar', axes=axes) assert_allclose(rec, data, atol=1e-14)
def test_waverecn_int_axis(): # waverecn should also work for axes as an integer rstate = np.random.RandomState(0) data = rstate.standard_normal((8, 8)) for axis in [0, 1]: coefs = pywt.wavedecn(data, 'haar', axes=axis) rec = pywt.waverecn(coefs, 'haar', axes=axis) assert_allclose(rec, data, atol=1e-14)
def test_waverecn(): rstate = np.random.RandomState(1234) # test 1D through 4D cases for nd in range(1, 5): x = rstate.randn(*(4, )*nd) coeffs = pywt.wavedecn(x, 'db1') assert_(len(coeffs) == 3) assert_allclose(pywt.waverecn(coeffs, 'db1'), x, rtol=tol_double)
def test_waverecn_all_wavelets_modes(): # test 2D case using all wavelets and modes rstate = np.random.RandomState(1234) r = rstate.randn(80, 96) for wavelet in wavelist: for mode in pywt.Modes.modes: coeffs = pywt.wavedecn(r, wavelet, mode=mode) assert_allclose(pywt.waverecn(coeffs, wavelet, mode=mode), r, rtol=tol_single, atol=tol_single)
def test_array_to_coeffs_invalid_inputs(): coeffs = pywt.wavedecn(np.ones(2), 'haar') arr, arr_slices = pywt.coeffs_to_array(coeffs) # empty list of array slices assert_raises(ValueError, pywt.array_to_coeffs, arr, []) # invalid format name assert_raises(ValueError, pywt.array_to_coeffs, arr, arr_slices, 'foo')
def test_waverecn_accuracies(): # testing 3D only here rstate = np.random.RandomState(1234) x0 = rstate.randn(4, 4, 4) for dt, tol in dtypes_and_tolerances: x = x0.astype(dt) if np.iscomplexobj(x): x += 1j*rstate.randn(4, 4, 4).astype(x.real.dtype) coeffs = pywt.wavedecn(x.astype(dt), 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def test_dwtn_max_level(): # predicted and empirical dwtn_max_level match for wav in [pywt.Wavelet('db2'), 'sym8']: for data_shape in [(33, ), (64, 32), (1, 15, 30)]: for axes in [None, 0, -1]: for mode in pywt.Modes.modes: coeffs = pywt.wavedecn(np.ones(data_shape), wav, mode=mode, axes=axes) max_lev = pywt.dwtn_max_level(data_shape, wav, axes) assert_equal(len(coeffs[1:]), max_lev)
def _matvec(self, x): x = self.pad.matvec(x) if self.reshape: x = np.reshape(x, self.dimsd) y = pywt.coeffs_to_array(pywt.wavedecn(x, wavelet=self.wavelet, level=self.level, mode='periodization', axes=(self.dir, )), axes=(self.dir, ))[0] return y.ravel()
def waveletDenoise(data): # data is num_neurons x time_frames wavelet = pywt.Wavelet('db4') # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = pywt.dwt_max_level(data.shape[1], wavelet) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) data_denoise = np.zeros(np.shape(data)) shift = 4 for c in np.arange(-shift, shift + 1): data_shift = np.roll(data, c, 1) for i in range(np.shape(data)[0]): coeffs = pywt.wavedecn(data_shift[i, :], wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] detail_coeffs = dcoeffs[-1]['d'] # rescaling using a single estimation of level noise based on first level coefficients. # Consider regions with detail coefficients exactly zero to be masked out # detail_coeffs = detail_coeffs[np.nonzero(detail_coeffs)] # 75th quantile of the underlying, symmetric noise distribution denom = scipy.stats.norm.ppf(0.75) sigma = np.median(np.abs(detail_coeffs)) / denom np.shape(sigma) sigma_mat = np.tile(sigma, (wavelet_levels, 1)) np.shape(sigma_mat) tot_num_coeffs = pywt.wavedecn_size(coeffs) # universal threshold threshold = np.sqrt(2 * np.log(tot_num_coeffs)) threshold = sigma * threshold denoised_detail = [{ key: pywt.threshold(level[key], value=threshold, mode='hard') for key in level } for level in dcoeffs] # Dict of unique threshold coefficients for each detail coeff. array denoised_coeffs = [coeffs[0]] + denoised_detail data_denoise[i, :] = data_denoise[i, :] + np.roll( pywt.waverecn(denoised_coeffs, wavelet), -c)[:data_denoise.shape[1]] data_denoise = data_denoise / (2 * shift + 1) return data_denoise
def deriv(self, m, v=None, wv='db1'): if v is not None: coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx, self.mesh.nCy, order='F'), wv, mode='per') array_wv = pywt.coeffs_to_array(coeff_wv) return Utils.mkvc(array_wv[0]) else: print "not implemented"
def get_wavelet_shape(shape, wave_name, axes, level): zshape = [((i + 1) // 2) * 2 for i in shape] tmp = pywt.wavedecn(np.zeros(zshape), wave_name, mode='zero', axes=axes, level=level) tmp, coeff_slices = pywt.coeffs_to_array(tmp, axes=axes) oshape = tmp.shape return oshape, coeff_slices
def calc_wavelet_smooth(line, wavelet_name, level=None): rep_line = line[::-1] + line[::] + line[::-1] + line[::] N = len(rep_line) coeffs = pywt.wavedecn(rep_line, wavelet_name, level=level) list_, sep_ = pywt.coeffs_to_array(coeffs) level = level if level is not None else int(np.log2(N)) list_ = [x if j < N / 2**level else 0 for j, x in enumerate(list_)] coeffs = pywt.array_to_coeffs(list_, sep_) smooth = pywt.waverecn(coeffs, wavelet_name) # plt.plot(np.arange(N),smooth,np.arange(N),rep_line);plt.title('name:{}, level:{}'.format(wavelet_name,level));plt.show(); len_line = len(line) return list(smooth[len_line:len_line * 2])
def wavelet_transform(x): w_coeffs_rgb = [] for i in range(x.shape[4]): w_coeffs_list = pywt.wavedecn(x[0, :, :, :, i], 'db4', level=None, mode='periodization') w_coeffs, coeff_slices = pywt.coeffs_to_array(w_coeffs_list) w_coeffs_rgb.append(w_coeffs) w_coeffs_rgb = np.array(w_coeffs_rgb) return w_coeffs_rgb, coeff_slices
def inverse(self, m, wv='db4'): msyn = np.zeros(mesh.nC) coeff_wv = pywt.wavedecn(msyn.reshape(self.mesh.nCx, self.mesh.nCy, order='F'), wv, mode='per') array_wv = pywt.coeffs_to_array(coeff_wv) coeff_back = pywt.array_to_coeffs( m.reshape(array_wv[0].shape, order='F'), array_wv[1]) coeff_m = pywt.waverecn(coeff_back, wv, mode='per') return Utils.mkvc(coeff_m)
def _transform(self, m, wv='db4'): msyn = np.zeros(mesh.nC) coeff_map = pywt.wavedecn(msyn.reshape(self.mesh.nCx, self.mesh.nCy, order='F'), wv, mode='per') array_map = pywt.coeffs_to_array(coeff_map) coeff_map = pywt.array_to_coeffs( m.reshape(array_map[0].shape, order='F'), array_map[1]) coeff_back_map = pywt.waverecn(coeff_map, wv, mode='per') return Utils.mkvc(coeff_back_map)
def Wavelet_Features(dset, states, mode='db1', level=6, img_type='ad', path_folder="F://ashhab/system2"): new_dset = [] for img in dset: try: # applying wavelet transformation with spicfic paramters Coefficients = pywt.wavedecn(img, mode, level=level) Result = Coefficients[1][img_type] # converting the feature from 2D matrix to 1D array feature = Result.ravel() new_dset.append(feature) except: continue try: # applying Feature selection sel = VarianceThreshold(threshold=(.8 * (1 - .8))) vector = sel.fit_transform(new_dset) new_dset = vector.tolist() except: with open('system2_failur.txt', 'a') as f: f.write('%s\n' % 'family__{}__level__{}__image_type__{}'.format( mode, level, img_type)) return label = ['a{}'.format(i) for i in range(1, len(new_dset[0]) + 1)] label = str(label) label = label.replace("'", '') label = label[1:-1] + ',class' # Writing the feature to the specified path (parameter path_folder). with open( '{}/family__{}__level__{}__image_type__{}.txt'.format( path_folder, mode, level, img_type), 'a') as f: f.write('%s\n' % str(label)) for i, state in zip(new_dset, states): i.append(state) i = str(i) i = i[1:-1] with open( '{}/family__{}__level__{}__image_type__{}.txt'.format( path_folder, mode, level, img_type), 'a') as f: f.write('%s\n' % str(i))
def test_per_axis_wavelets_and_modes(): # tests seperate wavelet and edge mode for each axis. rstate = np.random.RandomState(1234) data = rstate.randn(24, 24, 16) # wavelet can be a string or wavelet object wavelets = (pywt.Wavelet('haar'), 'sym2', 'db2') # The default number of levels should be the minimum over this list max_levels = [pywt._dwt.dwt_max_level(nd, nf) for nd, nf in zip(data.shape, wavelets)] # mode can be a string or a Modes enum modes = ('symmetric', 'periodization', pywt._extensions._pywt.Modes.reflect) coefs = pywt.wavedecn(data, wavelets, modes) assert_allclose(pywt.waverecn(coefs, wavelets, modes), data, atol=1e-14) assert_equal(min(max_levels), len(coefs[1:])) coefs = pywt.wavedecn(data, wavelets[:1], modes) assert_allclose(pywt.waverecn(coefs, wavelets[:1], modes), data, atol=1e-14) coefs = pywt.wavedecn(data, wavelets, modes[:1]) assert_allclose(pywt.waverecn(coefs, wavelets, modes[:1]), data, atol=1e-14) # length of wavelets or modes doesn't match the length of axes assert_raises(ValueError, pywt.wavedecn, data, wavelets[:2]) assert_raises(ValueError, pywt.wavedecn, data, wavelets, mode=modes[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets[:2]) assert_raises(ValueError, pywt.waverecn, coefs, wavelets, mode=modes[:2]) # dwt2/idwt2 also support per-axis wavelets/modes data2 = data[..., 0] coefs2 = pywt.wavedec2(data2, wavelets[:2], modes[:2]) assert_allclose(pywt.waverec2(coefs2, wavelets[:2], modes[:2]), data2, atol=1e-14) assert_equal(min(max_levels[:2]), len(coefs2[1:]))
def deriv(self, m, v=None, wv='db4'): if v is not None: coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx, self.mesh.nCy, order='F'), wv, mode='per') array_wv = pywt.coeffs_to_array(coeff_wv) coeff_back = pywt.array_to_coeffs(v, array_wv[1]) coeff_m = pywt.waverecn(coeff_back, wv, mode='per') return Utils.mkvc(coeff_m) else: print "not implemented"
def test_unravel_invalid_inputs(): coeffs = pywt.wavedecn(np.ones(2), 'haar') arr, slices, shapes = pywt.ravel_coeffs(coeffs) # empty list for slices or shapes assert_raises(ValueError, pywt.unravel_coeffs, arr, slices, []) assert_raises(ValueError, pywt.unravel_coeffs, arr, [], shapes) # unequal length for slices/shapes assert_raises(ValueError, pywt.unravel_coeffs, arr, slices[:-1], shapes) # invalid format name assert_raises(ValueError, pywt.unravel_coeffs, arr, slices, shapes, 'foo')
def initialize_wl_operators(self): if self.use_decimated: H = lambda x: pywt.wavedecn(x, wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) Ht = lambda x: pywt.waverecn(x, wavelet=self.wl_type, axes=self.axes) else: if use_swtn: H = lambda x: pywt.swtn(x, wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) Ht = lambda x: pywt.iswtn(x, wavelet=self.wl_type, axes=self.axes) else: H = lambda x: pywt.swt2(np.squeeze(x), wavelet=self.wl_type, axes=self.axes, level=self.decomp_lvl) # Ht = lambda x : pywt.iswt2(x, wavelet=self.wl_type) Ht = lambda x: pywt.iswt2(x, wavelet=self.wl_type)[np.newaxis, ...] return (H, Ht)
def test_multilevel_dtypes_nd(): wavelet = pywt.Wavelet('haar') for dt_in, dt_out in zip(dtypes_in, dtypes_out): # wavedecn, waverecn x = np.ones((8, 8), dtype=dt_in) errmsg = "wrong dtype returned for {0} input".format(dt_in) cA, coeffsD2, coeffsD1 = pywt.wavedecn(x, wavelet, level=2) assert_(cA.dtype == dt_out, "wavedecn: " + errmsg) for key, c in coeffsD1.items(): assert_(c.dtype == dt_out, "wavedecn: " + errmsg) for key, c in coeffsD2.items(): assert_(c.dtype == dt_out, "wavedecn: " + errmsg) x_roundtrip = pywt.waverecn([cA, coeffsD2, coeffsD1], wavelet) assert_(x_roundtrip.dtype == dt_out, "waverecn: " + errmsg)
def direct_dwt(self, x): """Perform the direct wavelet transform. :param x: Data to transform. :type x: `numpy.array_like` :return: Transformed data. :rtype: list """ return pywt.wavedecn(x, wavelet=self.wavelet, axes=self.axes, mode=self.pad_on_demand, level=self.level)
def wavelet_denoising(NomDeLImage, Nlevel): image=su.PullFromSlicer(NomDeLImage) NumpyImage=sitk.GetArrayFromImage(image) max_lev = 6 # how many levels of decomposition to draw coeffs = pywt.wavedecn(NumpyImage, 'db2', mode='zero', level=max_lev) #voir https://pywavelets.readthedocs.io/en/latest/ref/nd-dwt-and-idwt.html#pywt.wavedecn for i in range(Nlevel-max_lev): coeffs[(max_lev-i)] = {k: np.zeros_like(v) for k, v in coeffs[(max_lev-i)].items()} #remove highest frequency coeffs[-(max_lev-i)] = {k: np.zeros_like(v) for k, v in coeffs[-(max_lev-i)].items()} #remove highest frequency matrice_ondelette=pywt.waverecn(coeffs, 'db2') #mode periodic ou zero image_ondelette=sitk.GetImageFromArray(matrice_ondelette) image_ondelette.SetSpacing(image.GetSpacing()) image_ondelette.SetDirection(image.GetDirection()) image_ondelette.SetOrigin(image.GetOrigin()) su.PushToSlicer(image_ondelette,'image_DenoisWave_level0-'+str(Nlevel))
def wavelet_energy(x, mother, nivel): coeffs = pywt.wavedecn(x, wavelet=mother, level=nivel) arr, _ = pywt.coeffs_to_array(coeffs) Et = np.sum(arr**2) cA = coeffs[0] Ea = 100 * np.sum(cA**2) / Et Ed = [] for k in range(1, len(coeffs)): cD = list(coeffs[k].values()) cD = np.asarray(cD) Ed.append(100 * np.sum(cD**2) / Et) return Ea, Ed
def test_default_level(): # default level is the maximum permissible for the transformed axes data = np.ones((128, 32, 4)) wavelet = ('db8', 'db1') for dec_func in [pywt.wavedec2, pywt.wavedecn]: for axes in [(0, 1), (2, 1), (0, 2)]: c = dec_func(data, wavelet, axes=axes) max_lev = np.min([pywt.dwt_max_level(data.shape[ax], wav) for ax, wav in zip(axes, wavelet)]) assert_equal(len(c[1:]), max_lev) for ax in [0, 1]: c = pywt.wavedecn(data, wavelet[ax], axes=(ax, )) assert_equal(len(c[1:]), pywt.dwt_max_level(data.shape[ax], wavelet[ax]))
def test_coeffs_to_array_padding(): rng = np.random.RandomState(1234) x1 = rng.randn(32, 32) mode = 'symmetric' coeffs = pywt.wavedecn(x1, 'db2', mode=mode) # padding=None raises a ValueError when tight packing is not possible assert_raises(ValueError, pywt.coeffs_to_array, coeffs, padding=None) # set padded values to nan coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, padding=np.nan) npad = np.sum(np.isnan(coeff_arr)) assert_(npad > 0) # pad with zeros coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, padding=0) assert_(np.sum(np.isnan(coeff_arr)) == 0) assert_(np.sum(coeff_arr == 0) == npad) # Haar case with N as a power of 2 can be tightly packed coeffs_haar = pywt.wavedecn(x1, 'haar', mode=mode) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs_haar, padding=None) # shape of coeff_arr will match in this case, but not in general assert_equal(coeff_arr.shape, x1.shape)
def wavelet3d(I, level, wavelet="sym3", threshold=64.0, hard=True): mode = "symmetric" arr = np.float32(I) coeffs = wavedecn(arr, wavelet=wavelet, mode=mode, level=level) coeffs_H = list(coeffs) if hard: coeffs_H[0][coeffs_H[0] < threshold] = 0 else: coeffs_H[0] = np.sign(coeffs_H[0]) * np.abs(coeffs_H[0] - threshold) arr_rec = waverecn(coeffs_H, wavelet=wavelet, mode=mode) return arr_rec
def wavelet_denoising2(NomDeLImage, Nlevel): image=su.PullFromSlicer(NomDeLImage) NumpyImage=sitk.GetArrayFromImage(image) max_lev = 6 # how many levels of decomposition to draw coeffs = pywt.wavedecn(NumpyImage, 'db2', mode='zero', level=max_lev) #voir https://pywavelets.readthedocs.io/en/latest/ref/nd-dwt-and-idwt.html#pywt.wavedecn for levelR in range (max_lev-Nlevel): sigma = (1/0.6745) * maddest( coeffs[max_lev-levelR] ) uthresh = sigma * np.sqrt( 2*np.log( len( NumpyImage ) ) ) coeffs[(max_lev-levelR)] = ( pywt.threshold( i, value=uthresh, mode='hard' ) for i in coeffs[(max_lev-levelR)] ) matrice_ondelette=pywt.waverecn(coeffs, 'db2', mode='per') #mode periodic ou zero image_ondelette=sitk.GetImageFromArray(matrice_ondelette) image_ondelette.SetSpacing(image.GetSpacing()) image_ondelette.SetDirection(image.GetDirection()) image_ondelette.SetOrigin(image.GetOrigin()) su.PushToSlicer(image_ondelette,'image_DenoisWave_level0-'+str(Nlevel))
def _hdot_internal(x, bases, ntot, nmax, nlevels, sqrtP, nx, ny, real_type): nband = x.shape[0] nbasis = len(bases) alpha = np.zeros((nbasis, nband, nmax), dtype=real_type) for b in range(nbasis): base = bases[b] for l in range(nband): # decompose alphal = pywt.wavedecn(x[l], base, mode='zero', level=nlevels) # ravel and pad tmp, _, _ = pywt.ravel_coeffs(alphal) alpha[b, l] = np.pad(tmp / sqrtP, (0, nmax - ntot[b]), mode='constant') return alpha
def waveDriftFilter(self, signal, n=9): ''' :param signal: signal input :param n: level of decomposition :return: signal - baseline The signal is decomposed into 9 level and only the last coeffictient [1] is used to reconstruct the baseline To remove baseline wondering the baseline is substracted from the input signal ''' waveletName = "bior1.5" coeffs = pywt.wavedecn(signal, waveletName, level=n) for i in range(2, len(coeffs), 1): coeffs[i] = self.ignoreCoefficient(coeffs[i]) baseline = pywt.waverecn(coeffs, waveletName) filtered = signal - baseline[:len(signal)] return filtered
def reconstruct(frames, dictionary, wavelet='haar'): """Filter and reconstruct video using wavelets Parameters ---------- frames : ndarray input array of gray frames, shape: (nframes, height, width) dictionary : dict levels and coeffs to keep coeff : str Selects one of the 8 subsets of the 3D wavelets coefficient "cube", corresponding to the chosen wavelet level. 3 characters long string, ex: 'aaa', 'aad', 'ada', 'add', ... a : approximation d : details order : (time, y, x) Returns ------- rec : ndarray reconstructed frames from the chosen wavelet level and coeff """ levels = list(dictionary.keys()) print(levels) level_max = max(levels) if level_max > 0: coeffs = pywt.wavedecn(frames, wavelet, level=level_max) print(len(coeffs)) print(coeffs[1].keys()) for i in range(len(coeffs)): level = level_max - i + 1 if i == 0: if 0 not in levels: coeffs[0] = np.zeros_like(coeffs[0]) continue for key in list(coeffs[i].keys()): if level in levels: if key in dictionary[level]: continue coeffs[i][key] = np.zeros_like(coeffs[i][key]) print('Reconstructing frames ..') rec = pywt.waverecn(coeffs, wavelet) else: rec = frames return rec
def f_g(x): """Compute the coast function and its gradient for given input x. :param x: input point, ndarray of float64. :return: f, g - f the function value, scalar; - g the gradient, ndarray of same size and type as x. Notes: - We cast from (at input) and to (at output) float64, as l-bgs wants 8-byte floats. - There are losts of shape manipulations, some of them could possibly be avoided. This is due to l-bfgs using 1d arrays whereas pywt relies on lists of (tupples of) arrays. Here it was chosen to be as explicit and clear as possible, possibly sacrificing some speed along the way. Written by P. DERIAN 2018-01-09. Updated by P. DERIAN 2018-01-11: generic n-d version. """ ### rebuild motion field # reshape 1d vector to 3d array x = x.reshape(shape).astype(self.core.dtype) # extract coefficients, complement with finer scales and reshape for pywt # Note: ideally we would not complement, as finer scales are zeros. This is made # necessary by pywt. C_list = (pywt.array_to_coeffs( xi, self.slices[i][:step + 1], output_format='wavedecn') + self.C_list[i][step + 1:] for i, xi in enumerate(x)) # rebuild motion field U = (pywt.waverecn(Ci, self.wav, mode=self.wav_boundary_condition) for Ci in C_list) ### evaluate DFD and gradient func_value, G = self.core.DFD_gradient(self.im0, self.im1, U) # decompose gradient over wavelet basis, keep coefficients only up to current step G_list = (pywt.wavedecn( Gi, self.wav, level=self.levels_decomp, mode=self.wav_boundary_condition)[:step + 1] for Gi in G) # reshape as array, flatten and concatenate for l-bfgs G_array = numpy.hstack( (pywt.coeffs_to_array(Gi)[0].ravel() for Gi in G_list)) ### evaluate regularizer and its gradient # [TODO] ### return DFD (+ regul) value, and its gradient w.r.t. x return func_value, G_array.astype(numpy.float64)
def Ondelette_raconte(NomDeLImage): timeRMR1 = time.time() image=su.PullFromSlicer(NomDeLImage) NumpyImage=sitk.GetArrayFromImage(image) max_lev = 2 # how many levels of decomposition to draw c = pywt.wavedecn(NumpyImage, 'db2', mode='zero', level=max_lev) #voir https://pywavelets.readthedocs.io/en/latest/ref/nd-dwt-and-idwt.html#pywt.wavedecn #coeffs[-2] = {k: np.zeros_like(v) for k, v in coeffs[-2].items()} #matrice_ondelette=pywt.waverecn(c, 'db2') mode periodic ou zero #image_ondelette=sitk.GetImageFromArray(matrice_ondelette) #su.PushToSlicer(image_ondelette,'image_ondelette') c_arr,c_slices= pywt.coeffs_to_array(c, padding=0, axes=None) ddd=c_arr[c_slices[2]['ddd']] #ddd=sitk.GetImageFromArray(c_arr[c_slices[2]['ddd']]) #details aaa=c_arr[c_slices[0]] #aaa=sitk.GetImageFromArray(c_arr[c_slices[0]]) #average IndiceQualite=SpatialFrequencyOptim2(ddd)/SpatialFrequencyOptim2(aaa) print IndiceQualite timeRMR2 = time.time() TimeForrunFunctionRMR2 = timeRMR2 - timeRMR1 print(u"La fonction de traitement s'est executée en " + str(TimeForrunFunctionRMR2) +" secondes")
def test_waverecn_coeff_reshape_odd(): # verify round trip is correct: # wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn rng = np.random.RandomState(1234) x1 = rng.randn(35, 33) for mode in pywt.Modes.modes: for wave in ['haar', ]: w = pywt.Wavelet(wave) maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len) if maxlevel == 0: continue coeffs = pywt.wavedecn(x1, w, mode=mode) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs) coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices) x1r = pywt.waverecn(coeffs2, w, mode=mode) # truncate reconstructed values to original shape x1r = x1r[[slice(s) for s in x1.shape]] assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def wavelet_recover(image, num_coeffs=None, mode='soft', wavelet_levels=None): # #创建小波对象(db1) wavelet = pywt.Wavelet('db1') a = image.shape[0] b = image.shape[1] # 确定分解的level if wavelet_levels is None: dlen = wavelet.dec_len l1 = pywt.dwt_max_level(a, dlen) l2 = pywt.dwt_max_level(b, dlen) wavelet_levels = np.min([l1, l2]) wavelet_levels = max(wavelet_levels - 3, 1) all_coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels) # #返回值:[cA_n, cD_n, cD_n-1, …, cD_2, cD_1] # #列表形式,n与分解level相关(例如level=5时,有6个返回值), # #CA_n是平均系数(approximation coefficients), # #CD_n~CD_1是细节系数(details coefficients) detail_coeffs = all_coeffs[1:] alldata = [] for level in detail_coeffs: for key in level: level[key] = level[key].tolist() alldata = np.append(alldata, level[key]) alldata = np.abs(alldata) alldata = np.sort(alldata) sh = all_coeffs[0].shape basecoeffs = sh[0] * sh[1] threshold = alldata[-(num_coeffs - basecoeffs)] # #根据threshold重新计算系数(根据不同的模型) denoised_detail = [{ key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level } for level in detail_coeffs] denoised_coeffs = [all_coeffs[0]] + denoised_detail parameter = np.array(pywt.waverecn(denoised_coeffs, wavelet))[:a, :b] return parameter
def test_ravel_invalid_input(): # wavedec ravel does not support any coefficient arrays being set to None coeffs = pywt.wavedec(np.ones(8), 'haar') coeffs[1] = None assert_raises(ValueError, pywt.ravel_coeffs, coeffs) # wavedec2 ravel cannot have None or a tuple/list of None coeffs = pywt.wavedec2(np.ones((8, 8)), 'haar') coeffs[1] = (None, None, None) assert_raises(ValueError, pywt.ravel_coeffs, coeffs) coeffs[1] = [None, None, None] assert_raises(ValueError, pywt.ravel_coeffs, coeffs) coeffs[1] = None assert_raises(ValueError, pywt.ravel_coeffs, coeffs) # wavedecn ravel cannot have any dictionary elements as None coeffs = pywt.wavedecn(np.ones((8, 8, 8)), 'haar') coeffs[1]['ddd'] = None assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
def time_wavedecn(self, D, n, wavelet, dtype): pywt.wavedecn(self.data, wavelet)
plt.figure() plt.title('Reconstructed with level %i of details' %(n-1)) plt.imshow(dCat,cmap=colormap) return colormap=plt.get_cmap('gray') cat = imageio.imread('/home/az/Desktop/Wavelets/Imagem/Im.jpg') cat = cat[:,:,0] plt.figure() plt.title('Original Image') plt.imshow(cat,cmap=colormap) wavelet = 'db2' lv = 7 coeffs = pywt.wavedecn(cat, wavelet, level=lv) arr, coeff_slices = pywt.coeffs_to_array(coeffs) for n in range(1,len(coeff_slices)): PrintReconstructions(coeffs,n) plt.figure() vec = [np.linalg.norm(arr[coeff_slices[0]])] for i in range(1,7): vec.append(np.linalg.norm(arr[coeff_slices[i]['dd']])) vec = vec/np.linalg.norm(vec) plt.plot([0,1,2,3,4,5,6], vec, 'o') plt.grid()
def apply_dwt_filter(y, dwt_type, dwt_level, dwt_thresh_func, dwt_thresh_type): coeffs = pywt.wavedecn(y, dwt_type, level=dwt_level) for i in range(1,dwt_level+1): coeffs[i]["d"] = pywt.threshold(coeffs[i]["d"], thselect(coeffs[i]["d"], dwt_thresh_type), dwt_thresh_func) return(pywt.waverecn(coeffs, dwt_type))
yp = np.concatenate(((0, ), yp, (shape[1], ))) min_dy = np.min(np.diff(yp)) img = np.zeros(shape) for ix, x in enumerate(xp[:-1]): for iy, y in enumerate(yp[:-1]): slices = [slice(x, xp[ix+1]), slice(y, yp[iy+1])] val = rstate.rand(1)[0] img[slices] = val return img # create an anisotropic piecewise constant image img = mondrian((128, 128)) # perform DWT coeffs_dwt = pywt.wavedecn(img, wavelet='db1', level=None) # convert coefficient dictionary to a single array coeff_array_dwt, _ = pywt.coeffs_to_array(coeffs_dwt) # perform fully seperable DWT fswavedecn_result = pywt.fswavedecn(img, wavelet='db1') nnz_dwt = np.sum(coeff_array_dwt != 0) nnz_fswavedecn = np.sum(fswavedecn_result.coeffs != 0) print("Number of nonzero wavedecn coefficients = {}".format(np.sum(nnz_dwt))) print("Number of nonzero fswavedecn coefficients = {}".format(np.sum(nnz_fswavedecn))) img = mondrian() fig, axes = plt.subplots(1, 3)
def test_wavedecn_complex(): data = np.ones((4, 4, 4)) + 1j coeffs = pywt.wavedecn(data, 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), data, rtol=1e-12)
def test_waverecn_dtypes(): x = np.ones((4, 4, 4)) for dt, tol in dtypes_and_tolerances: coeffs = pywt.wavedecn(x.astype(dt), 'db1') assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the BayesShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. Notes ----- Reference [1]_ used four levels of wavelet decomposition. To be more flexible for a range of input sizes, the implementation here stops 3 levels prior to the maximum level of decomposition for `img` (the exact # of levels thus depends on `img.shape` and the chosen wavelet). BayesShrink variance estimation doesn't work well on levels with extremely small coefficient arrays. This is the rationale for skipping a few of the coarsest levels. The user can override the automated setting by explicitly specifying `wavelet_levels`. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ wavelet = pywt.Wavelet(wavelet) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for img dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in img.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(img, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * img.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if threshold is None: # The BayesShrink thresholds from [1]_ in docstring var = sigma**2 threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)
def wtn(data_to_wt, wavelet='db4', mode='per', dims=None, dimOpt=None, dimLenOpt=None): xfmData = pywt.wavedecn(data_to_wt,wavelet,mode) return wvlt2mat(xfmData, dims, dimOpt, dimLenOpt)
def _wavelet_threshold(image, wavelet, method=None, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet thresholding. Parameters ---------- image : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. method : {'BayesShrink', 'VisuShrink'}, optional Thresholding method to be used. The currently supported methods are "BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a user-specified ``threshold`` must be supplied instead. threshold : float, optional The thresholding value to apply during wavelet coefficient thresholding. The default value (None) uses the selected ``method`` to estimate appropriate threshold(s) for noise removal. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. :DOI:`10.1109/83.862633` .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. :DOI:`10.1093/biomet/81.3.425` """ wavelet = pywt.Wavelet(wavelet) if not wavelet.orthogonal: warn(("Wavelet thresholding was designed for use with orthogonal " "wavelets. For nonorthogonal wavelets such as {}, results are " "likely to be suboptimal.").format(wavelet.name)) # original_extent is used to workaround PyWavelets issue #80 # odd-sized input results in an image with 1 extra sample after waverecn original_extent = tuple(slice(s) for s in image.shape) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in image.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * image.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if method is not None and threshold is not None: warn(("Thresholding method {} selected. The user-specified threshold " "will be ignored.").format(method)) if threshold is None: var = sigma**2 if method is None: raise ValueError( "If method is None, a threshold must be provided.") elif method == "BayesShrink": # The BayesShrink thresholds from [1]_ in docstring threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] elif method == "VisuShrink": # The VisuShrink thresholds from [2]_ in docstring threshold = _universal_thresh(image, sigma) else: raise ValueError("Unrecognized method: {}".format(method)) if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]