def denoise(nblck,filename,mode='sym', wv='sym5' ): from statsmodels.robust import mad #noisy_coefs = pywt.wavedec(nblck, 'sym5', level=5, mode='per') noisy_coefs = pywt.wavedec(nblck, wavelet=wv, mode=mode) #level=5, #dwt is for single level decomposition; wavedecoding is for more levels sigma = mad(noisy_coefs[-1]) #uthresh=np.std(ca)/2 uthresh = sigma*np.sqrt(2*np.log(len(nblck))) denoised = noisy_coefs[:] denoised[1:] = [pywt.threshold(i, value=uthresh,mode='soft') for i in denoised[1:]] signal = pywt.waverec(denoised, wavelet=wv, mode=mode) from matplotlib import pyplot as plt fig, axes = plt.subplots(1, 2, sharey=True, sharex=True,figsize=(8,4)) ax1, ax2 = axes ax1.plot(signal) #ax1.set_xlim(0,2**10) ax1.set_title("Recovered Signal") ax1.margins(.1) ax2.plot(nblck) ax2.set_title("Noisy Signal") for ax in fig.axes: ax.tick_params(labelbottom=False, top=False, bottom=False, left=False, right=False) fig.tight_layout() fig.savefig(filename+'_'+wv+'.pdf') plt.clf() return signal
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft'): """Performs wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the SureShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ coeffs = pywt.wavedecn(img, wavelet=wavelet) detail_coeffs = coeffs[-1]['d' * img.ndim] if sigma is None: # Estimates via the noise via method in [2] sigma = np.median(np.abs(detail_coeffs)) / 0.67448975019608171 if threshold is None: # The BayesShrink threshold from [1]_ in docstring threshold = sigma**2 / np.sqrt(max(img.var() - sigma**2, 0)) denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in coeffs[1:]] denoised_root = pywt.threshold(coeffs[0], value=threshold, mode=mode) denoised_coeffs = [denoised_root] + [d for d in denoised_detail] return pywt.waverecn(denoised_coeffs, wavelet)
def denoise(self, series, configs): #perform normalization normalized = (series - series.mean()) / (series.max() - series.min()) #WaveShrink cA, cD = pywt.dwt(normalized, configs['preprocessing']['denoise']['wavelet']) #threshold selection thr = np.std(normalized) / 23 cA_shrinked = pywt.threshold( cA, thr, mode=configs['preprocessing']['denoise']['thr_mode']) cD_shrinked = pywt.threshold( cD, thr, mode=configs['preprocessing']['denoise']['thr_mode']) #reconstructs data from the given shrinked coefficients denoised = pywt.idwt(cA_shrinked, cD_shrinked, configs['preprocessing']['denoise']['wavelet']) if len(denoised) > len(normalized): denoised = denoised[:-1] # what???!!! if len(denoised.shape) > 1: denoised = [x[0] for x in denoised] self.normalized = normalized self.denoised = denoised
def apply_threshold(output, scaler = 1., input=None): """ output approx and detail coefficients, arranged in level value exactly as output from swt: e.g. [(cA1, cD1), (cA2, cD2), ..., (cAn, cDn)] scaler float to allow runtime tuning of thresholding input vector with length len(output). If not None, these values are used for thresholding if None, then the vector applies a calculation to estimate the proper thresholding given this waveform. """ for j in range(len(output)): cA, cD = output[j] if input is None: dev = np.median(np.abs(cD - np.median(cD)))/0.6745 thresh = math.sqrt(2*math.log(len(cD)))*dev*scaler else: threshA = scaler*input[j][0] threshD = scaler*input[j][1] cA = pywt.threshold(cA, threshA, 'soft') cD = pywt.threshold(cD, threshD, 'soft') output[j] = (cA, cD)
def wave(arr): return_ = np.diff(arr) # return_=return_[1:] # close2 / close # print(pywt.wavelist()) #Do the Wavelet Transform for the return and inverse transformation #return_ is a dataframe series method = 'haar' mode_ = "soft" (ca, cd) = pywt.dwt(return_, method) cat = pywt.threshold(ca, 0.3 * np.std(ca), mode=mode_) cdt = pywt.threshold(cd, 0.3 * np.std(cd), mode=mode_) tx = pywt.idwt(cat, cdt, method, "smooth") # tx=pd.DataFrame(tx,index=return_.index) #Get back to the Stock price using denoising wavelet transform start_price = arr[0] # txx=tx.iloc[:,0] # txx=np.exp(tx) # txx=np.array(txx) temp = np.array([start_price]) np.hstack((temp, tx)) txx = np.cumsum(tx) # txx = pd.Series(txx, index=arr.index) # txx=pd.DataFrame(txx,index=return_.index) return txx
def wavelet_thresholding(array_of_wavelet_coeff, image, mode_thresholding): """ :param array_of_wavelet_coeff: wavelet coefficients: Approximation, horizontal detail, vertical detail and diagonal detail coefficients respectively :param threshold: threshold for Approximation component; :param mode_thresholding: {'soft', 'hard', 'greater', 'less'} :return: thresholded wavelet coefficients. """ cA = array_of_wavelet_coeff[0] denoise_array = [cA] for i in range(len(array_of_wavelet_coeff) - 1): cH = pywt.threshold(array_of_wavelet_coeff[i + 1][0], threshold_value( array_of_wavelet_coeff[i + 1][2], len(array_of_wavelet_coeff[i + 1][0]), image), mode=mode_thresholding) cV = pywt.threshold(array_of_wavelet_coeff[i + 1][1], threshold_value( array_of_wavelet_coeff[i + 1][2], len(array_of_wavelet_coeff[i + 1][1]), image), mode=mode_thresholding) cD = pywt.threshold(array_of_wavelet_coeff[i + 1][2], threshold_value( array_of_wavelet_coeff[i + 1][2], len(array_of_wavelet_coeff[i + 1][2]), image), mode=mode_thresholding) denoise_array.append((cH, cV, cD)) return denoise_array
def __apply_wavelet_transform(self, x): (ca, cd) = pywt.dwt(x, "haar") cat = pywt.threshold(ca, np.std(ca), mode="soft") cdt = pywt.threshold(cd, np.std(cd), mode="soft") tx = pywt.idwt(cat, cdt, "haar") return tx
def test_nonnegative_garotte(): thresh = 0.3 data_real = np.linspace(-1, 1, 100) for dtype in float_dtypes: if dtype in real_dtypes: data = np.asarray(data_real, dtype=dtype) else: data = np.asarray(data_real + 0.1j, dtype=dtype) d_hard = pywt.threshold(data, thresh, 'hard') d_soft = pywt.threshold(data, thresh, 'soft') d_garotte = pywt.threshold(data, thresh, 'garotte') # check dtypes assert_equal(d_hard.dtype, data.dtype) assert_equal(d_soft.dtype, data.dtype) assert_equal(d_garotte.dtype, data.dtype) # values < threshold are zero lt = np.where(np.abs(data) < thresh) assert_(np.all(d_garotte[lt] == 0)) # values > than the threshold are intermediate between soft and hard gt = np.where(np.abs(data) > thresh) gt_abs_garotte = np.abs(d_garotte[gt]) assert_(np.all(gt_abs_garotte < np.abs(d_hard[gt]))) assert_(np.all(gt_abs_garotte > np.abs(d_soft[gt])))
def wavelet_denoise(self, wavelet='db1', plot='Y', level=1): ### only built for Level 1 decomp self.Xtrans = self.XData.transpose() self.Xnormtrans = [] self.wavecoef = [] self.thresh_wavecoef = [] self.denoisedXtrans = [] for i in range(len(self.Xtrans)): self.maxwave = pwt.dwt_max_level(len(self.Xtrans[i]), wavelet) if level != 'Auto': self.level = level self.wavecoef.append( pwt.wavedec(self.Xtrans[i], wavelet, level=self.level)) self.thresh_A = (np.average(np.abs( self.wavecoef[i][0]))) - (np.std(np.abs(self.wavecoef[i][0]))) self.thresh_D = (np.average(np.abs( self.wavecoef[i][1]))) - (np.std(np.abs(self.wavecoef[i][1]))) self.thresh_wavecoef.append( (pwt.threshold(self.wavecoef[i][0], self.thresh_A), (pwt.threshold(self.wavecoef[i][1], self.thresh_D)))) self.denoisedXtrans.append( pwt.idwt(self.thresh_wavecoef[-1][0], self.thresh_wavecoef[-1][1], wavelet)) self.XDataNT = np.asarray(self.denoisedXtrans) self.XData = self.XDataNT.transpose() if plot != 'N': plt.plot(self.Xtrans[plot]) plt.plot(self.denoisedXtrans[plot]) plt.show()
def encodes(self, o: TSTensor): if self.magnitude <= 0: return o """ 1. Adapted from waveletSmooth function found here: http://connor-johnson.com/2016/01/24/using-pywavelets-to-remove-high-frequency-noise/ 2. Threshold equation and using hard mode in threshold as mentioned in section '3.2 denoising based on optimized singular values' from paper by Tomas Vantuch: http://dspace.vsb.cz/bitstream/handle/10084/133114/VAN431_FEI_P1807_1801V001_2018.pdf """ seq_len = o.shape[-1] # Decompose to get the wavelet coefficients coeff = pywt.wavedec(o.cpu(), self.wavelet, mode=self.pad_mode) if self.thr is None: # Calculate sigma for threshold as defined in http://dspace.vsb.cz/bitstream/handle/10084/133114/VAN431_FEI_P1807_1801V001_2018.pdf # As noted by @harshit92 MAD referred to in the paper is Mean Absolute Deviation not Median Absolute Deviation sigma = (1 / 0.6745) * maddest(coeff[-self.level]) # Calculate the univeral threshold uthr = sigma * np.sqrt(2 * np.log(seq_len)) coeff[1:] = (pywt.threshold(c, value=uthr, mode=self.thr_mode) for c in coeff[1:]) elif self.thr == 'random': coeff[1:] = (pywt.threshold(c, value=np.random.rand(), mode=self.thr_mode) for c in coeff[1:]) else: coeff[1:] = (pywt.threshold(c, value=self.thr, mode=self.thr_mode) for c in coeff[1:]) # Reconstruct the signal using the thresholded coefficients output = o.new( pywt.waverec(coeff, self.wavelet, mode=self.pad_mode)[..., :seq_len]) if self.ex is not None: output[..., self.ex, :] = o[..., self.ex, :] return output
def w2d(imArray,imagePath): mode='db1' (ca, cd) = pywt.dwt(imArray,mode) cat = pywt.threshold(ca, np.std(ca)/250) cdt = pywt.threshold(cd, np.std(cd)/250) ts_rec = pywt.idwt(cat, cdt, mode) return np.array(cat)
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft'): """Performs wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the SureShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ coeffs = pywt.wavedecn(img, wavelet=wavelet) detail_coeffs = coeffs[-1]['d' * img.ndim] if sigma is None: # Estimates via the noise via method in [2] sigma = np.median(np.abs(detail_coeffs)) / 0.67448975019608171 if threshold is None: # The BayesShrink threshold from [1]_ in docstring threshold = sigma**2 / np.sqrt(max(img.var() - sigma**2, 0)) denoised_detail = [{ key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level } for level in coeffs[1:]] denoised_root = pywt.threshold(coeffs[0], value=threshold, mode=mode) denoised_coeffs = [denoised_root] + [d for d in denoised_detail] return pywt.waverecn(denoised_coeffs, wavelet)
def wavelet_tansform(raw): (ca, cd) = pywt.dwt(raw, "haar") cat = pywt.threshold(ca, np.std(ca), mode="soft") cdt = pywt.threshold(cd, np.std(cd), mode="soft") trans_raw = pywt.idwt(cat, cdt, "haar") if np.isnan(trans_raw).any(): return raw return trans_raw
def haar(img): coefs = pywt.wavedec2(img, 'haar', level=2) coefs[1] = pywt.threshold(coefs[1], 5, 'soft', 0) coefs[2] = pywt.threshold(coefs[2], 5, 'soft', 0) print(coefs[1], coefs[2]) con_img = pywt.waverec2(coefs, 'haar') con_img = con_img.astype(np.uint8) # 进行类型转换 return con_img
def FISTA_Thick(P, W, K, phi, lambda2, lambda2_ref, m, n, lambda_threshold, delta_noise, tol): db4 = pywt.Wavelet('db4') dirty_F = form_F_dirty(K, W, P, phi, lambda2, lambda2_ref, n) X_temp = dirty_F X = X_temp t_new = 1 niter = int(np.floor(lambda_threshold / delta_noise)) for i in range(0, niter): X_old = X_temp t_old = t_new #Gradient comb = X F_comb = form_P_meas(W, comb, phi, lambda2, lambda2_ref, m) D = F_comb - P if i % 1000 == 0: objf = 0.5 * np.sqrt(np.sum(np.abs(D)**2)) print("Iteration - ", i, ": ", objf) comb = comb - form_F_li(K, D, phi, lambda2, lambda2_ref, n) aux_Xreal = comb.real aux_Ximag = comb.imag re_coeffs = pywt.wavedec(aux_Xreal, db4, level=3, mode='zpd') im_coeffs = pywt.wavedec(aux_Ximag, db4, level=3, mode='zpd') thres_re_coeffs = [] for j in re_coeffs: thres_j = pywt.threshold(j, lambda_threshold, 'soft') thres_re_coeffs.append(thres_j) thres_im_coeffs = [] for k in im_coeffs: thres_k = pywt.threshold(k, lambda_threshold, 'soft') thres_im_coeffs.append(thres_k) aux_Xreal = pywt.waverec(thres_re_coeffs, 'db4', mode='zpd') aux_Ximag = pywt.waverec(thres_im_coeffs, 'db4', mode='zpd') X_temp = aux_Xreal + 1j * aux_Ximag norm = np.sum(np.abs(X_temp - X_old)) if norm <= tol: print("Iterations: ", i) print("Exit due to tolerance: ", norm, "<= ", tol) break #Step using the Lipschitz constant t_new = (1 + np.sqrt(1 + 4 * t_old**2)) / 2 aux_Xreal = X_temp.real + (t_old - 1) / t_new * (X_temp.real - X_old.real) aux_Ximag = X_temp.imag + (t_old - 1) / t_new * (X_temp.imag - X_old.imag) X = aux_Xreal + 1j * aux_Ximag lambda_threshold = lambda_threshold - delta_noise print("Max iterations reached") return X_temp
def denoise(X): print('denoising') X_denoised = [] for x in X: thd = np.empty(x.shape) thd[0] = pywt.threshold(x[0], np.median(x[0]), 'hard') thd[1] = pywt.threshold(x[1], np.median(x[1]), 'hard') X_denoised.append(thd) return X_denoised
def wavelet_transformation(audio_data): (ca, cd) = pywt.dwt(audio_data, 'haar') cat = pywt.threshold(ca, np.std(ca) / 2, 'soft') cdt = pywt.threshold(cd, np.std(cd) / 2, 'soft') audio_data_transformed = pywt.idwt(cat, cdt, 'haar') return audio_data_transformed
def DWT(series): ca, cb = pywt.dwt(series, 'haar') cat = pywt.threshold(ca, np.std(ca) / 2, mode='soft') cbt = pywt.threshold(cb, np.std(cb) / 2, mode='soft') signal = pywt.idwt(cat, cbt, 'haar') DWT8 = ca[:, :-8] #sorted in Ascending return DWT8
def dwt_smooth(x, wavelet): cA, cD = pywt.dwt(x, wavelet) def make_threshold(x): return np.std(x) * np.sqrt(1 * np.log(x.size)) cAt = pywt.threshold(cA, make_threshold(cA), mode="soft") cDt = pywt.threshold(cD, make_threshold(cD), mode="soft") tx = pywt.idwt(cAt, cDt, wavelet) return tx
def dwt_smoother(x, wavelet, smooth_factor: float = 1.) -> np.ndarray: cA, cD = pywt.dwt(x, wavelet) cAt = pywt.threshold(cA, smoothing_threshold(cA, smooth_factor), mode='soft') cDt = pywt.threshold(cD, smoothing_threshold(cD, smooth_factor), mode='soft') tx = pywt.idwt(cAt, cDt, wavelet) return tx
def softthreshold_bandas(listpatches, umbrales, t): listsearch = listpatches.copy() n = 4 * t**2 # Lenght of the patches in wavelet domain (n = 4xtxt) auxHV = range(n / 4, 3 * n / 4) # Positions of WH and WV bands auxD = range(3 * n / 4, n) # Positions of WD band HVbands = listsearch[:, :, auxHV, :] listsearch[:, :, auxHV, :] = pywt.threshold(HVbands, umbrales[0], 'soft') Dbands = listsearch[:, :, auxD, :] listsearch[:, :, auxD, :] = pywt.threshold(Dbands, umbrales[1], 'soft') return listsearch
def main(file_name): d = 0 #take d also as input #load a data y y = [] """ with open(file_name,'r+') as f: reader = csv.reader(f,delimiter = ' ', quotechar='|') for number in reader: y.append(number[d]) ts=y (ca, cd) = pywt.dwt(ts,'haar') """ series = read_csv(file_name, header=0, index_col=0, squeeze=True) series.columns = ['a', 'b', 'c', 'd'] series['e'] = pow((series['a'] * series['a'] + series['b'] * series['b'] + series['c'] * series['c']), 0.5) df1 = pd.DataFrame({'$a': series['e']}) df = df1.iloc[:, 0] #print(df) (ca, cd) = pywt.dwt(df, 'haar') cat = pywt.threshold(ca, np.std(ca) / 2, 'soft') cdt = pywt.threshold(cd, np.std(cd) / 2, 'soft') ts_rec = pywt.idwt(cat, cdt, 'haar') plt.close('all') plt.subplot(211) # Original coefficients plt.plot(ca, '--*b') plt.plot(cd, '--*r') # Thresholded coefficients plt.plot(cat, '--*c') plt.plot(cdt, '--*m') plt.legend(['ca', 'cd', 'ca_thresh', 'cd_thresh'], loc=0) plt.grid(True) plt.subplot(212) #plt.plot(ts) plt.plot(df) #plt.hold('on') plt.plot(ts_rec, 'r') plt.legend(['original signal', 'reconstructed signal']) plt.grid(True) plt.show()
def waveletSmooth(x, wavelet="db4", level=None, sigma_type='donoho', plot=False, title=None, mode='per'): if level: if len(x) < 2**level: ''' you don't have enough data to proceed. no smoothing''' return x try: # coeff = pywt.wavedec(x, wavelet=wavelet,mode=mode) coeff = pywt.wavedec(x, wavelet=wavelet, level=level, mode=mode) except: ''' too little info for specific level. just default max_level and donoho''' coeff = pywt.wavedec(x, wavelet=wavelet, mode=mode) sigma_type = 'donoho' if sigma_type and (sigma_type == 'donoho'): coeff[1:] = (pywt.threshold(i, value=_sigma_est_dwt(i), mode="soft") for i in coeff[1:]) elif sigma_type and (sigma_type == 'mad'): sigma = mad(coeff[1]) uthresh = sigma * np.sqrt(2 * np.log(len(x))) coeff[1:] = (pywt.threshold(i, value=uthresh, mode="soft") for i in coeff[1:]) elif sigma_type and (sigma_type == 'SURE'): ''' Stein’s Unbiased Risk Estimate(SURE) ''' n = len(x) uthresh = np.sqrt(2 * np.log(n * np.log2(n))) coeff[1:] = (pywt.threshold(i, value=uthresh, mode="soft") for i in coeff[1:]) else: coeff[1:] = (pywt.threshold(i, value=np.std(i) / 2, mode='soft') for i in coeff[1:]) y = pywt.waverec(coeff, wavelet=wavelet, mode=mode) if plot: f, ax = plt.subplots() plt.plot(x, color="b", alpha=0.5) plt.plot(y, color="b") if title: ax.set_title(title) ax.set_xlim((0, len(y))) return y
def wavelet_denosing_6_levels(self,data): coeffs = pywt.wavedec(data, 'coif5', mode='symmetric', level=6) # 将波分为6层,这行代码是一个参数是数据,第二个参数选择小波基这里选coif5,第三个点是模型默认是symmetric,第四个参数是分的层数 cA6, cD6, cD5, cD4, cD3, cD2, cD1 = coeffs sD6 = pywt.threshold(cD6, 0.014, 'soft') sD5 = pywt.threshold(cD5, 0.014, 'soft') sD4 = pywt.threshold(cD4, 0.014, 'soft') sD3 = pywt.threshold(cD3, 0.014, 'soft') sD2 = np.zeros(len(cD2)) sD1 = np.zeros(len(cD1)) coeffs2 = [cA6, sD6, sD5, sD4, sD3, sD2, sD1] meta = pywt.waverec(coeffs2, 'coif5') if len(meta) > len(data): meta = meta[:-1] return meta
def _wavelet(diff, c): import pywt coeffs = pywt.wavedec2(diff, 'db1') for index, coef in enumerate(coeffs): if type(coef) == tuple: coef = list(coef) coef = [pywt.threshold(array, lambda1 * theta1) for array in coef] coef = tuple(coef) coeffs[index] = coef else: coef = pywt.threshold(coef, c) coeffs[index] = coef return pywt.waverec2(coeffs, 'db1')
def VISUshrink_T(filterdim, LL, HH, tscale=1 / 3): """ VISUShrink thresholding :param filterdim: shape of the image :param LL: 2x low pass :param HH: 2x high pass :param tscale: scaling factor for threshold :return: updated LL(LL1) and HH(HH1) """ number = filterdim[0] * filterdim[1] TLL = np.sqrt((np.std(LL)**2) * (np.log(number))) THH = np.sqrt((np.std(HH)**2) * (np.log(number))) LL1 = pywt.threshold(LL, TLL * tscale, 'soft') HH1 = pywt.threshold(HH, THH * tscale, 'soft') return LL1, HH1
def wavelet(data): thresh = math.sqrt(2 * math.log10(len(data))) #固定阈值处理,WLSTM2 wavelet_result = [] for i in range(data.shape[1]): if i >= 1: data_test = data[:, i] coeffs = pywt.wavedec(data_test, "haar", level=2) #二级小波变换 thresh1 = threshhold(coeffs[-1]) #无偏风险估计阈值 thresh2 = threshhold(coeffs[-2]) #无偏风险估计阈值 #coeffs[-2] = pywt.threshold(coeffs[-2], thresh, 'soft') #coeffs[-1] = pywt.threshold(coeffs[-1], thresh, 'soft') coeffs[-2] = pywt.threshold(coeffs[-2], thresh2, 'soft') coeffs[-1] = pywt.threshold(coeffs[-1], thresh1, 'soft') #无偏风险估计阈值处理,WLSTM1 #coeffs[-2] = np.zeros_like(coeffs[-2]) #coeffs[-1] = np.zeros_like(coeffs[-1]) #x = np.arange(len(data_test)) result = pywt.waverec(coeffs, "haar") #小波重构 wavelet_result.append(result[:len(data)]) else: result = np.zeros((len(data), )) wavelet_result.append(result) wavelet_result = np.array(wavelet_result) data = np.transpose(wavelet_result) #第二次小波变换 wavelet_result2 = [] for j in range(data.shape[1]): if i >= 1: data_test = data[:, j] coeffs = pywt.wavedec(data_test, "haar", level=2) thresh1 = threshhold(coeffs[-1]) thresh2 = threshhold(coeffs[-2]) #coeffs[-2] = pywt.threshold(coeffs[-2], thresh, 'soft') #coeffs[-1] = pywt.threshold(coeffs[-1], thresh, 'soft') coeffs[-2] = pywt.threshold(coeffs[-2], thresh2, 'soft') coeffs[-1] = pywt.threshold(coeffs[-1], thresh1, 'soft') #coeffs[-2] = np.zeros_like(coeffs[-2]) #coeffs[-1] = np.zeros_like(coeffs[-1]) result = pywt.waverec(coeffs, "haar") wavelet_result2.append(result[:len(result)]) else: result = np.zeros((len(data), )) wavelet_result2.append(result) wavelet_result2 = np.transpose(np.array(wavelet_result2)) return wavelet_result2
def lowpassfilter(signal, thresh=0.50, wavelet="db4"): thresh = thresh * np.nanmax(signal) coeff = pywt.wavedec(signal, wavelet, mode="per") coeff[1:] = (pywt.threshold(i, value=thresh, mode="soft") for i in coeff[1:]) reconstructed_signal = pywt.waverec(coeff, wavelet, mode="per") return reconstructed_signal
def _global_threshold(self, coeffs, mode ): threshold = self._threshold( self._sigma_image, self._image ) coeffs[1:] = [ [ pywt.threshold( detail, value=threshold, mode=mode ) for detail in coeff ] for coeff in coeffs[1:] ] return coeffs
def _levels_threshold( self, coeffs, mode ): thresholds = self._threshold( self._sigma_image, self._image, coeffs ) coeffs[1:] = [ tuple([ pywt.threshold( detail, value=thresholds[i][j], mode=mode ) for detail, j in zip( coeff, range(3) ) ]) for coeff, i in zip( coeffs[1:], range( len( coeffs ) - 1 ) ) ] return coeffs
def wavelet(image, scale = 1): # This thresholds the data based on db1 wavelet. coeffs2 = pywt.dwt2(image[:, :, 0], 'db1') # This assigns the directionality thresholded arrays to variables. LL, (LH, HL, HH) = coeffs2 # This line helps eliminate the cloud but... coeffs2 = LL * 0, (LH * 1, HL * 1, HH * 1) # Reconstruct the image based on our removal of the LL (low frequency) component. new_img = pywt.idwt2(coeffs2, 'db1') # Print statements for evaluation purposes. print("Reconstructed image parameters") print("Standard Deviations: ", np.std(new_img)) print("Means: ", np.mean(new_img)) print("Maxima: ", np.amax(new_img)) # Thresholding based on the mean and std of the image.. thresholded_image = pywt.threshold(new_img, np.mean(new_img) + scale * np.std(new_img), substitute=0, mode='greater') # For image viewing purposes. # plt.subplot(131) # plt.imshow(image[:, :, 0], cmap=plt.cm.gray) # plt.title("Original") # plt.subplot(132) # plt.imshow(new_img, cmap=plt.cm.gray) # plt.title("After") # plt.subplot(133) # plt.imshow(thresholded_image, cmap=plt.cm.gray) # plt.title("Thresholded") # plt.show() return thresholded_image
def dwt_denoise_line_in_time(signal_in: np.ndarray, im_index: int, threshold_function: bool, padding: str, wavelet_conf) -> np.ndarray: """ Definition of the temporal denoising for DWT. Parameters ---------- signal_in : np.ndarray 1D input signal. Returns ------- np.ndarray Multilevel 1D inverse discrete wavelet transform. """ if threshold_function: threshold_mode = 'soft' else: threshold_mode = 'hard' coef = pywt.wavedec(signal_in, wavelet=wavelet_conf.m_wavelet, level=None, mode=padding) sigma = mad(coef[-1]) threshold = sigma * np.sqrt(2 * np.log(len(signal_in))) denoised = coef[:] denoised[1:] = (pywt.threshold(i, value=threshold, mode=threshold_mode) for i in denoised[1:]) return pywt.waverec(denoised, wavelet=wavelet_conf.m_wavelet, mode=padding)
def apply_threshold(output, scaler = 1., input=None): """ output is a list of vectors (cA and cD, approximation and detail coefficients) exactly as you would expect from swt decomposition. e.g. [(cA1, cD1), (cA2, cD2), ..., (cAn, cDn)] If input is none, this function will calculate the tresholds automatically for each waveform. Otherwise it will use the tresholds passed in, assuming that the length of the input is the same as the length of the output list. input looks like: [threshold1, threshold2, ..., thresholdn] scaler is a tuning parameter that will be multiplied on all thresholds. Default = 1 (0.8?) """ for j in range(len(output)): cA, cD = output[j] if input is None: dev = np.median(np.abs(cD - np.median(cD)))/0.6745 thresh = math.sqrt(2*math.log(len(cD)))*dev*scaler else: thresh = scaler*input[j] cD = pywt.threshold(cD, thresh, mode='hard') output[j] = (cA, cD)
def denoise(noisy_img, mode, level, noiseSigma): coeffs = pywt.wavedec2(noisy_img, mode, level=level) # Thresholding the detail (i.e. high frequency) coefficiens # using a Donoho-Johnstone universal threshold threshold = noiseSigma * np.sqrt(2 * np.log2(noisy_img.size)) rec_coeffs = coeffs rec_coeffs[1:] = (pywt.threshold(i, value=threshold, mode="soft") for i in rec_coeffs[1:]) return rec_coeffs
def thresh_median(triangle, trind): newtri=np.zeros(triangle.shape,dtype=np.int16) for b in range(trind.shape[0]-1): band=get_band(triangle,trind,b) med=np.median(np.abs(band)) thresh_band=pywt.threshold(band,med) print med triangle=set_band(triangle,trind,thresh_band,b) return triangle
def wavelet(input_signal, wavelet_shape, threshold): import pywt # we have to pad the signal to make it a power of two next_power_of_two = int(numpy.floor(numpy.log2(len(input_signal))) + 1) padded_input_signal = _pad(input_signal, 2**next_power_of_two) wt_coeffs = pywt.wavedec(padded_input_signal, wavelet_shape, level=None, mode='per') denoised_coeffs = wt_coeffs[:] denoised_coeffs[1:] = (pywt.threshold(i, value=threshold) for i in denoised_coeffs[1:]) recon = pywt.waverec(denoised_coeffs, wavelet_shape, mode='per') start_offset = (len(padded_input_signal) - len(input_signal)) / 2.0 return recon[start_offset:(start_offset + len(input_signal))]
def test_threshold_firm(): thresh = 0.2 thresh2 = 3 * thresh data_real = np.linspace(-1, 1, 100) for dtype in float_dtypes: if dtype in real_dtypes: data = np.asarray(data_real, dtype=dtype) else: data = np.asarray(data_real + 0.1j, dtype=dtype) if data.real.dtype == np.float32: rtol = atol = 1e-6 else: rtol = atol = 1e-14 d_hard = pywt.threshold(data, thresh, 'hard') d_soft = pywt.threshold(data, thresh, 'soft') d_firm = pywt.threshold_firm(data, thresh, thresh2) # check dtypes assert_equal(d_hard.dtype, data.dtype) assert_equal(d_soft.dtype, data.dtype) assert_equal(d_firm.dtype, data.dtype) # values < threshold are zero lt = np.where(np.abs(data) < thresh) assert_(np.all(d_firm[lt] == 0)) # values > than the threshold are equal to hard-thresholding gt = np.where(np.abs(data) >= thresh2) assert_allclose(np.abs(d_hard[gt]), np.abs(d_firm[gt]), rtol=rtol, atol=atol) # other values are intermediate between soft and hard thresholding mt = np.where(np.logical_and(np.abs(data) > thresh, np.abs(data) < thresh2)) mt_abs_firm = np.abs(d_firm[mt]) assert_(np.all(mt_abs_firm < np.abs(d_hard[mt]))) assert_(np.all(mt_abs_firm > np.abs(d_soft[mt])))
def coeffs_process(self,coeffs): # coeffs_out = coeffs for level in range(len(coeffs)): if level == 0: continue if level == 1: std = np.std(coeffs[level]) #coeffs[level]=self.retain_nmax_coeffs(3,coeffs[level]) coeffs[level]=pywt.threshold(coeffs[level],std,'hard') continue if level == 2: #coeffs[level] = self.retain_nmax_coeffs(3, coeffs[level]) std = np.std(coeffs[level]) coeffs[level] = pywt.threshold(coeffs[level], std*2, 'hard') continue # if level == 3: # # coeffs[level] = self.retain_nmax_coeffs(3, coeffs[level]) # std = np.std(coeffs[level]) # coeffs[level] = pywt.threshold(coeffs[level], std*2, 'hard') # continue else: coeffs[level] = self.retain_nmax_coeffs(0, coeffs[level]) return coeffs
def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet denoising. Parameters ---------- img : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `img` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. threshold : float, optional The thresholding value. All wavelet coefficients less than this value are set to 0. The default value (None) uses the BayesShrink method found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. Notes ----- Reference [1]_ used four levels of wavelet decomposition. To be more flexible for a range of input sizes, the implementation here stops 3 levels prior to the maximum level of decomposition for `img` (the exact # of levels thus depends on `img.shape` and the chosen wavelet). BayesShrink variance estimation doesn't work well on levels with extremely small coefficient arrays. This is the rationale for skipping a few of the coarsest levels. The user can override the automated setting by explicitly specifying `wavelet_levels`. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. DOI: 10.1109/83.862633 .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. DOI: 10.1093/biomet/81.3.425 """ wavelet = pywt.Wavelet(wavelet) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for img dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in img.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(img, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * img.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if threshold is None: # The BayesShrink thresholds from [1]_ in docstring var = sigma**2 threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)
def test_threshold(): # soft data = np.linspace(1, 4, 7) soft_result = [0., 0., 0., 0.5, 1., 1.5, 2.] assert_allclose(pywt.threshold(data, 2, 'soft'), np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, 'soft'), -np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'soft'), [[0, 1]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'soft'), [[0, 0]] * 2, rtol=1e-12) # hard data = np.linspace(1, 4, 7) hard_result = [0., 0., 2., 2.5, 3., 3.5, 4.] assert_allclose(pywt.threshold(data, 2, 'hard'), np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, 'hard'), -np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'hard'), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'hard'), [[0, 2]] * 2, rtol=1e-12) # greater data = np.linspace(1, 4, 7) assert_allclose(pywt.threshold(data, 2, 'greater'), np.array([0., 0., 2., 2.5, 3., 3.5, 4.]), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'greater'), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'greater'), [[0, 2]] * 2, rtol=1e-12) # less data = np.linspace(1, 4, 7) assert_allclose(pywt.threshold(data, 2, 'less'), np.array([1., 1.5, 2., 0., 0., 0., 0.]), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'less'), [[1, 0]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'less'), [[1, 2]] * 2, rtol=1e-12)
def test_threshold(): data = np.linspace(1, 4, 7) # soft soft_result = [0.0, 0.0, 0.0, 0.5, 1.0, 1.5, 2.0] assert_allclose(pywt.threshold(data, 2, "soft"), np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, "soft"), -np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, "soft"), [[0, 1]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, "soft"), [[0, 0]] * 2, rtol=1e-12) # hard hard_result = [0.0, 0.0, 2.0, 2.5, 3.0, 3.5, 4.0] assert_allclose(pywt.threshold(data, 2, "hard"), np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, "hard"), -np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, "hard"), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, "hard"), [[0, 2]] * 2, rtol=1e-12) # greater greater_result = [0.0, 0.0, 2.0, 2.5, 3.0, 3.5, 4.0] assert_allclose(pywt.threshold(data, 2, "greater"), np.array(greater_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, "greater"), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, "greater"), [[0, 2]] * 2, rtol=1e-12) # less assert_allclose(pywt.threshold(data, 2, "less"), np.array([1.0, 1.5, 2.0, 0.0, 0.0, 0.0, 0.0]), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, "less"), [[1, 0]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, "less"), [[1, 2]] * 2, rtol=1e-12) # invalid assert_raises(ValueError, pywt.threshold, data, 2, "foo")
def apply_dwt_filter(y, dwt_type, dwt_level, dwt_thresh_func, dwt_thresh_type): coeffs = pywt.wavedecn(y, dwt_type, level=dwt_level) for i in range(1,dwt_level+1): coeffs[i]["d"] = pywt.threshold(coeffs[i]["d"], thselect(coeffs[i]["d"], dwt_thresh_type), dwt_thresh_func) return(pywt.waverecn(coeffs, dwt_type))
def _wavelet_threshold(image, wavelet, method=None, threshold=None, sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet thresholding. Parameters ---------- image : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. method : {'BayesShrink', 'VisuShrink'}, optional Thresholding method to be used. The currently supported methods are "BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a user-specified ``threshold`` must be supplied instead. threshold : float, optional The thresholding value to apply during wavelet coefficient thresholding. The default value (None) uses the selected ``method`` to estimate appropriate threshold(s) for noise removal. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the best approximation of the original image. wavelet_levels : int or None, optional The number of wavelet decomposition levels to use. The default is three less than the maximum number of possible decomposition levels (see Notes below). Returns ------- out : ndarray Denoised image. References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet thresholding for image denoising and compression." Image Processing, IEEE Transactions on 9.9 (2000): 1532-1546. :DOI:`10.1109/83.862633` .. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation by wavelet shrinkage." Biometrika 81.3 (1994): 425-455. :DOI:`10.1093/biomet/81.3.425` """ wavelet = pywt.Wavelet(wavelet) if not wavelet.orthogonal: warn(("Wavelet thresholding was designed for use with orthogonal " "wavelets. For nonorthogonal wavelets such as {}, results are " "likely to be suboptimal.").format(wavelet.name)) # original_extent is used to workaround PyWavelets issue #80 # odd-sized input results in an image with 1 extra sample after waverecn original_extent = tuple(slice(s) for s in image.shape) # Determine the number of wavelet decomposition levels if wavelet_levels is None: # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = np.min( [pywt.dwt_max_level(s, dlen) for s in image.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ detail_coeffs = dcoeffs[-1]['d' * image.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') if method is not None and threshold is not None: warn(("Thresholding method {} selected. The user-specified threshold " "will be ignored.").format(method)) if threshold is None: var = sigma**2 if method is None: raise ValueError( "If method is None, a threshold must be provided.") elif method == "BayesShrink": # The BayesShrink thresholds from [1]_ in docstring threshold = [{key: _bayes_thresh(level[key], var) for key in level} for level in dcoeffs] elif method == "VisuShrink": # The VisuShrink thresholds from [2]_ in docstring threshold = _universal_thresh(image, sigma) else: raise ValueError("Unrecognized method: {}".format(method)) if np.isscalar(threshold): # A single threshold for all coefficient arrays denoised_detail = [{key: pywt.threshold(level[key], value=threshold, mode=mode) for key in level} for level in dcoeffs] else: # Dict of unique threshold coefficients for each detail coeff. array denoised_detail = [{key: pywt.threshold(level[key], value=thresh[key], mode=mode) for key in level} for thresh, level in zip(threshold, dcoeffs)] denoised_coeffs = [coeffs[0]] + denoised_detail return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]
def test_threshold(): data = np.linspace(1, 4, 7) # soft soft_result = [0., 0., 0., 0.5, 1., 1.5, 2.] assert_allclose(pywt.threshold(data, 2, 'soft'), np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, 'soft'), -np.array(soft_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'soft'), [[0, 1]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'soft'), [[0, 0]] * 2, rtol=1e-12) # soft thresholding complex values assert_allclose(pywt.threshold([[1j, 2j]] * 2, 1, 'soft'), [[0j, 1j]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1+1j, 2+2j]] * 2, 6, 'soft'), [[0, 0]] * 2, rtol=1e-12) complex_data = [[1+2j, 2+2j]]*2 for thresh in [1, 2]: assert_allclose(pywt.threshold(complex_data, thresh, 'soft'), _soft(complex_data, thresh), rtol=1e-12) # test soft thresholding with non-default substitute argument s = 5 assert_allclose(pywt.threshold([[1j, 2]] * 2, 1.5, 'soft', substitute=s), [[s, 0.5]] * 2, rtol=1e-12) # soft: no divide by zero warnings when input contains zeros assert_allclose(pywt.threshold(np.zeros(16), 2, 'soft'), np.zeros(16), rtol=1e-12) # hard hard_result = [0., 0., 2., 2.5, 3., 3.5, 4.] assert_allclose(pywt.threshold(data, 2, 'hard'), np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold(-data, 2, 'hard'), -np.array(hard_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'hard'), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'hard'), [[0, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'hard', substitute=s), [[s, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1+1j, 2+2j]] * 2, 2, 'hard'), [[0, 2+2j]] * 2, rtol=1e-12) # greater greater_result = [0., 0., 2., 2.5, 3., 3.5, 4.] assert_allclose(pywt.threshold(data, 2, 'greater'), np.array(greater_result), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'greater'), [[1, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'greater'), [[0, 2]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'greater', substitute=s), [[s, 2]] * 2, rtol=1e-12) # greater doesn't allow complex-valued inputs assert_raises(ValueError, pywt.threshold, [1j, 2j], 2, 'greater') # less assert_allclose(pywt.threshold(data, 2, 'less'), np.array([1., 1.5, 2., 0., 0., 0., 0.]), rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'less'), [[1, 0]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 1, 'less', substitute=s), [[1, s]] * 2, rtol=1e-12) assert_allclose(pywt.threshold([[1, 2]] * 2, 2, 'less'), [[1, 2]] * 2, rtol=1e-12) # less doesn't allow complex-valued inputs assert_raises(ValueError, pywt.threshold, [1j, 2j], 2, 'less') # invalid assert_raises(ValueError, pywt.threshold, data, 2, 'foo')
import numpy as np import math from PIL import Image x = [] f = open('../fft/image1.png_hor.txt', 'r') for line in f: x.append(float(line)) wavelet = pywt.Wavelet('bior2.8') #levels = int( math.floor( math.log(image.shape[0], 2) ) ) WaveletCoeffs = pywt.wavedec( x, wavelet, level=None) #threshold = noiseSigma*math.sqrt(2*math.log(image.size, 2)) threshold = 100*math.sqrt(2*math.log(len(x),2)) NewWaveletCoeffs = map (lambda x: pywt.threshold(x,threshold), WaveletCoeffs) result = pywt.waverec( NewWaveletCoeffs, wavelet) plt.figure(1) plt.subplot(211) plt.plot(x) plt.subplot(212) plt.plot(result) plt.show() ''' wavelet = pywt.Wavelet('haar')