def denoise(): wave = 'db4' sig = 20 tau1 = 3*sig tau2 = 3*sig/2 noisyLena = lena + np.random.normal(scale = sig, size=lena.shape) lw = pywt.wavedec2(noisyLena, wave, level=4) lwt1 = hardThresh(lw, tau1) lwt2 = softThresh(lw, tau2) rlena1 = pywt.waverec2(lwt1, wave) rlena2 = pywt.waverec2(lwt2, wave) plt.subplot(131) plt.imshow(noisyLena, cmap=plt.cm.Greys_r) plt.axis('off') plt.subplot(132) plt.imshow(rlena1, cmap=plt.cm.Greys_r) plt.axis('off') plt.subplot(133) plt.imshow(rlena2, cmap=plt.cm.Greys_r) plt.axis('off') plt.savefig('denoise.pdf') plt.clf()
def generate_basis(): """generate the basis""" x = np.zeros((56, 56)) coefs = pywt.wavedec2(x, 'db1') n_levels = len(coefs) basis = [] for i in range(n_levels): coefs[i] = list(coefs[i]) n_filters = len(coefs[i]) for j in range(n_filters): for m in range(coefs[i][j].shape[0]): try: for n in range(coefs[i][j].shape[1]): coefs[i][j][m][n] = 1 temp_basis = pywt.waverec2(coefs, 'db1') basis.append(temp_basis) coefs[i][j][m][n] = 0 except IndexError: coefs[i][j][m] = 1 temp_basis = pywt.waverec2(coefs, 'db1') basis.append(temp_basis) coefs[i][j][m] = 0 basis = np.array(basis) return basis
def create_haar_dictionary(p=8): import pywt c = pywt.wavedec2(np.zeros((p, p)), 'haar') D = [] for k in range(1, len(c)): for i in range(3): ck = c[k][i] l = ck.shape[0] for j in range(l): for m in range(l): ck[j, m] = 1 D += [pywt.waverec2(c, 'haar')] ck[j, m] = 0 ck = c[0] l = ck.shape[0] for j in range(l): for m in range(l): ck[j, m] = 1 D += [pywt.waverec2(c, 'haar')] ck[j, m] = 0 D = np.array(D).reshape(-1, p*p) Dn = [] for i in range(15): Dn += _translate(D[i].reshape((p, p))) Dn = np.array(Dn).reshape((-1, p*p)) i0 = np.sum(abs(Dn), axis=1) != 0 return Dn[i0]
def generate_basis(im_dim=64, db_num=1): """generate the basis""" x = np.zeros((im_dim, im_dim)) coefs = pywt.wavedec2(x, 'db{}'.format(db_num)) n_levels = len(coefs) basis = [] for i in range(n_levels): coefs[i] = list(coefs[i]) n_filters = len(coefs[i]) for j in range(n_filters): for m in range(coefs[i][j].shape[0]): try: for n in range(coefs[i][j].shape[1]): coefs[i][j][m][n] = 1 #i-th unit vector temp_basis = pywt.waverec2( coefs, 'db{}'.format(db_num) ) #apply wavelet decoder to e_i to get i-th column basis.append(temp_basis) coefs[i][j][m][n] = 0 except IndexError: coefs[i][j][m] = 1 temp_basis = pywt.waverec2(coefs, 'db{}'.format(db_num)) basis.append(temp_basis) coefs[i][j][m] = 0 basis = np.array(basis) return basis
def construct_Wminv(d=8,wave_name='db1'): """generate the basis""" x = np.zeros((d, d)) coefs = pywt.wavedec2(x, wave_name) n_levels = len(coefs) basis = [] for i in range(n_levels): coefs[i] = list(coefs[i]) n_filters = len(coefs[i]) for j in range(n_filters): for m in range(coefs[i][j].shape[0]): try: for n in range(coefs[i][j].shape[1]): coefs[i][j][m][n] = 1 temp_basis = pywt.waverec2(coefs, wave_name) basis.append(temp_basis) coefs[i][j][m][n] = 0 except IndexError: coefs[i][j][m] = 1 temp_basis = pywt.waverec2(coefs, wave_name) basis.append(temp_basis) coefs[i][j][m] = 0 W_ = np.array(basis) dnew = W_.shape[0] W_ = W_.reshape(( d*d,dnew)) return W_
def create_dictionary_haar(p=8): import pywt c = pywt.wavedec2(np.zeros((p, p)), 'haar') D = [] for k in range(1, len(c)): for i in range(3): ck = c[k][i] l = ck.shape[0] for j in range(l): for m in range(l): ck[j, m] = 1 D += [pywt.waverec2(c, 'haar')] ck[j, m] = 0 ck = c[0] l = ck.shape[0] for j in range(l): for m in range(l): ck[j, m] = 1 D += [pywt.waverec2(c, 'haar')] ck[j, m] = 0 D = np.array(D).reshape(-1, p * p) Dn = [] for i in range(15): Dn += translate(D[i].reshape((p, p))) return np.array(Dn).reshape((-1, p * p))
def extractNoise(image,wavelet, level, mode='sym'): """Extracts noise signal that is locally Gaussian N(0,sigma^2)""" imageData = image2array(image) outputData = [] #Procesando canales de colores for n, imageBand in enumerate(imageData): ##print "*", n # calculando la descomposici?n wavelet 8-tap daubechies QMF imageCoeffs = pywt.wavedec2(imageBand, wavelet, mode, level) outputBandCoeffs = [imageCoeffs[0]] # cA # eliminando el coeficiente que contiene toda la informaci?n L y dejando s?lo los detalles del imageCoeffs[0] # para cada banda de wavelet for n, imageDetails in enumerate(imageCoeffs): ##print "**", n #print imageDetails resDetails = [] # para cada subbanda V, H, D for n, imageDetail in enumerate(imageDetails): ##print "*** detail antes ", n, " - ", imageDetail.size # #print imageDetail # estimando la varianza local resDetail = lawmlN(imageDetail) resDetails.append(resDetail) # #print "*** detail despues", " - ", imageDetail.size # #print resDetail # #print " " #imageCoeffs[n] = None outputBandCoeffs.append(resDetails) # reconstruyendo la imagen con los nuevos coeficientes wavelet newBand = pywt.waverec2(outputBandCoeffs, wavelet, mode) outputData.append(newBand) outputData = numpy.array(outputData) return outputData
def wavelet_dec_rec(img_blr, resize_factor=0.25): ''' wavelet_dec_rec Take a picture, does a wavelet decompsition, remove the low frequency (approximation) and highest details (noises) and return the recomposed picture ''' img_shape = img_blr.shape need_resize = abs(resize_factor - 1) > 0.001 level = int(6 - log(1 / resize_factor, 2)) if need_resize: img_blr_resize = cv2.resize(img_blr, None, fx=resize_factor, fy=resize_factor) else: img_blr_resize = img_blr coeffs = pywt.wavedec2(img_blr_resize, "db8", level=level) #remove the low freq (approximation) coeffs[0].fill(0) #remove the highest details (noises) coeffs[-1][0].fill(0) coeffs[-1][1].fill(0) coeffs[-1][2].fill(0) img_rec_resize = pywt.waverec2(coeffs, "db8") if need_resize: img_rec = cv2.resize(img_rec_resize, (img_shape[1], img_shape[0])) else: img_rec = img_rec_resize return img_rec
def wavelet_inverse(coeffs, locations, wavelet, mode='symmetric', axes=(-2, -1)): '''Wrapper for the multilevel 2D inverse DWT. Parameters ---------- coeffs : array_like Combined coefficients. locations : list Indices where the coefficients for each block are located. wavelet : str Wavelet to use. mode : str, optional Signal extension mode. axes : tuple, optional Axes over which to compute the IDWT. Returns ------- array_like Inverse transform of wavelet transform, the original image. Notes ----- coeffs, locations are the output of forward(). ''' # Split coefficients out into coefficient list coeff_list = split_chunks(coeffs, locations) return pywt.waverec2(coeff_list, wavelet, mode, axes)
def Wavelet(pan, hs): M, N, c = pan.shape m, n, C = hs.shape ratio = int(np.round(M / m)) print('get sharpening ratio: ', ratio) assert int(np.round(M / m)) == int(np.round(N / n)) #upsample u_hs = upsample_interp23(hs, ratio) pan = np.squeeze(pan) pc = pywt.wavedec2(pan, 'haar', level=2) rec = [] for i in range(C): temp_dec = pywt.wavedec2(u_hs[:, :, i], 'haar', level=2) pc[0] = temp_dec[0] temp_rec = pywt.waverec2(pc, 'haar') temp_rec = np.expand_dims(temp_rec, -1) rec.append(temp_rec) I_Wavelet = np.concatenate(rec, axis=-1) #adjustment I_Wavelet[I_Wavelet < 0] = 0 I_Wavelet[I_Wavelet > 1] = 1 return np.uint8(I_Wavelet * 255)
def cdf97_2d_inverse(coeffs, locations, axes=(-2, -1)): '''Inverse 2D Cohen–Daubechies–Feauveau 9/7 wavelet. Parameters ---------- coeffs : array_like Stitched together wavelet transform. locations : list Output of cdf97_2d_forward(). axes : tuple, optional Axes to perform wavelet transform across. Returns ------- array_like Inverse CDF97 transform. ''' # Split coefficients out into coefficient list coeff_list = split_chunks(coeffs, locations) return pywt.waverec2(coeff_list, wavelet='bior4.4', mode='periodization', axes=axes)
def haar_recomp(params, rest_coeff, final_arr): reverse_flatten(rest_coeff, final_arr, 0) final_tuple = [ params] for i in rest_coeff: final_tuple.append(i) final_tuple = tuple(final_tuple) return pywt.waverec2(final_tuple, 'haar')
def w2d(img, mode='haar', level=1): imArray = cv2.imread(img) #Datatype conversions #convert to grayscale imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY ) #convert to float imArray = np.float32(imArray) imArray /= 255.; # compute coefficients coeffs=pywt.wavedec2(imArray, mode, level=level) #print len(coeffs) #Process Coefficients coeffs_H=list(coeffs[1][0]) coeffs_H *= 0 coeffs[1][0] = coeffs_H # reconstruction imArray_H=pywt.waverec2(coeffs, mode); imArray_H *= 255.; imArray_H = np.uint8(imArray_H) #Display result cv2.imshow('image',imArray_H) cv2.waitKey(0) cv2.destroyAllWindows()
def idwt2(self): """ Test pypwt for DWT reconstruction (waverec2). """ W = self.W levels = self.levels # inverse DWT with pypwt W.forward() logging.info("computing Wavelets.inverse from pypwt") t0 = time() W.inverse() logging.info("Wavelets.inverse took %.3f ms" % elapsed_ms(t0)) if self.do_pywt: # inverse DWT with pywt Wpy = pywt.wavedec2(self.data, self.wname, mode=per_kw, level=levels) logging.info("computing waverec2 from pywt") _ = pywt.waverec2(Wpy, self.wname, mode=per_kw) logging.info("pywt took %.3f ms" % elapsed_ms(t0)) # Check reconstruction W_image = W.image maxerr = _calc_errors(self.data, W_image, "[rec]") self.assertTrue(maxerr < self.tol, msg="[%s] something wrong with the reconstruction (errmax = %e)" % (self.wname, maxerr))
def munchetal_filter(im, wlevel, sigma, wname='db15'): # Wavelet decomposition: coeffs = pywt.wavedec2(im.astype(np.float32), wname, level=wlevel) coeffsFlt = [coeffs[0]] # FFT transform of horizontal frequency bands: for i in range(1, wlevel + 1): # FFT: fcV = np.fft.fftshift(np.fft.fft(coeffs[i][1], axis=0)) my, mx = fcV.shape # Damping of vertical stripes: damp = 1 - np.exp(-(np.arange(-np.floor(my / 2.), -np.floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2))) dampprime = np.kron(np.ones((1, mx)), damp.reshape((damp.shape[0], 1))) fcV = fcV * dampprime # Inverse FFT: fcVflt = np.real(np.fft.ifft(np.fft.ifftshift(fcV), axis=0)) cVHDtup = (coeffs[i][0], fcVflt, coeffs[i][2]) coeffsFlt.append(cVHDtup) # Get wavelet reconstruction: im_f = np.real(pywt.waverec2(coeffsFlt, wname)) # Return image according to input type: if (im.dtype == 'uint16'): # Check extrema for uint16 images: im_f[im_f < np.iinfo(np.uint16).min] = np.iinfo(np.uint16).min im_f[im_f > np.iinfo(np.uint16).max] = np.iinfo(np.uint16).max # Return filtered image (an additional row and/or column might be present): return im_f[0:im.shape[0], 0:im.shape[1]].astype(np.uint16) else: return im_f[0:im.shape[0], 0:im.shape[1]]
def whash(image, hash_size=8, image_scale=None, mode='haar', remove_max_haar_ll=True): import pywt if image_scale is not None: assert image_scale & (image_scale - 1) == 0, "image_scale is not power of 2" else: image_natural_scale = 2**int(numpy.log2(min(image.size))) image_scale = max(image_natural_scale, hash_size) ll_max_level = int(numpy.log2(image_scale)) level = int(numpy.log2(hash_size)) assert hash_size & (hash_size - 1) == 0, "hash_size is not power of 2" assert level <= ll_max_level, "hash_size in a wrong range" dwt_level = ll_max_level - level image = image.convert("L").resize((image_scale, image_scale), Image.ANTIALIAS) pixels = numpy.asarray(image) / 255 if remove_max_haar_ll: coeffs = pywt.wavedec2(pixels, 'haar', level=ll_max_level) coeffs = list(coeffs) coeffs[0] *= 0 pixels = pywt.waverec2(coeffs, 'haar') coeffs = pywt.wavedec2(pixels, mode, level=dwt_level) dwt_low = coeffs[0] med = numpy.median(dwt_low) diff = dwt_low > med return diff
def inv(self, wavelet_vector): '''Inverse WT cVec_list: vector containing all wavelet coefficients as vectrized in __call__''' #check if shapes of the coefficient matrices are known if self.cMat_shapes == []: print("Call WT first to obtain shapes of coefficient matrices") return None cVec_shapes = list(map(np.prod, self.cMat_shapes)) split_indices = list(accumulate(cVec_shapes)) cVec_list = np.split(wavelet_vector, split_indices) #reverse amplification cVec_list = [ cVec_list[j] / self.amplify[j] for j in range(3 * self.level + 1) ] #back to level format coeffs = [np.reshape(cVec_list[0], self.cMat_shapes[0])] for j in range(self.level): triple = cVec_list[3 * j + 1:3 * (j + 1) + 1] triple = [ np.reshape(triple[i], self.cMat_shapes[1 + 3 * j + i]) for i in range(3) ] coeffs = coeffs + [tuple(triple)] return pywt.waverec2(coeffs, wavelet=self.wavelet)
def testWave(img1, img2): transf1 = pywt.wavedec2(img1, 'haar', level=4) transf2 = pywt.wavedec2(img2, 'haar', level=4) assert len(transf1) == len(transf2) recWave = [] for k in range(len(transf1)): # 处理低频分量 if k == 0: loWeight1, loWeight2 = varianceWeight(transf1[0], transf2[0]) lowFreq = np.zeros(transf2[0].shape) row, col = transf1[0].shape for i in range(row): for j in range(col): lowFreq[i, j] = loWeight1 * transf1[0][ i, j] + loWeight2 * transf2[0][i, j] recWave.append(lowFreq) continue # 处理高频分量 cvtArray = [] for array1, array2 in zip(transf1[k], transf2[k]): tmp_row, tmp_col = array1.shape highFreq = np.zeros((tmp_row, tmp_col)) var1 = getVarianceImg(array1) var2 = getVarianceImg(array2) for i in range(tmp_row): for j in range(tmp_col): highFreq[i, j] = array1[i, j] if var1[i, j] > var2[i, j] else array2[i, j] cvtArray.append(highFreq) recWave.append(tuple(cvtArray)) return pywt.waverec2(recWave, 'haar')
def hfilter(diff_image, var_image, threshold=1, ndamp=10): """ This code was inspired from: https://github.com/spacetelescope/sprint_notebooks/blob/master/lucy_damped_haar.ipynb I believe it was initially written by Justin Ely: https://github.com/justincely It was buggy and not working properly with every image sizes. I have thus exchanged it by using pyWavelet (pywt) and a custom function htrans to calculate the matrix for the var_image. """ him, coeff_slices = pywt.coeffs_to_array(pywt.wavedec2( diff_image.astype(np.float), 'haar'), padding=0) dvarim = htrans(var_image.astype(np.float)) sqhim = ((him / threshold)**2) / dvarim index = np.where(sqhim < 1) if len(index[0]) == 0: return diff_image # Eq. 8 of White is derived leading to N*x^(N-1)-(N-1)*x^N :DOI: 10.1117/12.176819 sqhim = sqhim[index] * (ndamp * sqhim[index]**(ndamp - 1) - (ndamp - 1) * sqhim[index]**ndamp) him[index] = sign(threshold * np.sqrt(dvarim[index] * sqhim), him[index]) return pywt.waverec2( pywt.array_to_coeffs(him, coeff_slices, output_format='wavedec2'), 'haar')[:diff_image.shape[0], :diff_image.shape[1]]
def w2d(img): model = 'haar' level = 1 image_array = convert_image(image, 512) watermark_array = convert_image(watermark, 128) coeffs_image = process_coefficients(image_array, model, level=level) print_image_from_array(coeffs_image[0], 'LL_after_DWT.jpg') dct_array = apply_dct(coeffs_image[0]) print_image_from_array(dct_array, 'LL_after_DCT.jpg') dct_array = embed_watermark(watermark_array, dct_array) print_image_from_array(dct_array, 'LL_after_embeding.jpg') coeffs_image[0] = inverse_dct(dct_array) print_image_from_array(coeffs_image[0], 'LL_after_IDCT.jpg') # reconstruction image_array_H=pywt.waverec2(coeffs_image, model) print_image_from_array(image_array_H, 'image_with_watermark.jpg') # recover images recover_watermark(image_array = image_array_H, model=model, level = level)
def log_wavelet_denoise(img: LIPImage, wavelet, treshold): coeffs = wavedec2(img.gray_levels, wavelet, level=4) for i in range(1, 5): coeffs[i] = wavelet_threshold(treshold, coeffs[i]) img = waverec2(coeffs, wavelet) return img
def wavedec2(x, wavelet='haar', inverse=0): if not inverse: # get the coefficients coeffs = pywt.wavedec2(x, wavelet) # arrange the coefficients to form an image imgg = coeffs[0] for i in range(1, len(coeffs)): cH, cV, cD = coeffs[i] try: imgg = np.concatenate((imgg, cH), axis=1) imggBelow = np.concatenate((cV, cD), axis=1) imgg = np.concatenate((imgg, imggBelow), axis=0) except: raise ( "Input Error: the input's shape must be able to be divided by 2^n" ) return imgg else: # collect the images to form the coefficients size = 1 coeffs = [x[:size, :size]] while size < x.shape[0]: cH = x[:size, size:2 * size] cV = x[size:2 * size, :size] cD = x[size:2 * size, size:2 * size] coeffs.append((cH, cV, cD)) size *= 2 # perform inverse dwt img = pywt.waverec2(coeffs, wavelet) return img
def visualizeWT(imageIn, show=False): #split the image into its rgb channels r = imageIn[:, :, 2] g = imageIn[:, :, 1] b = imageIn[:, :, 0] imgs = [] for imArray in [r, g, b]: #convert to float imArray = np.float32(imArray) imArray /= 255 # compute coefficients coeffs = pywt.wavedec2(imArray, 'haar', level=1) #Process Coefficients coeffs_H = list(coeffs) coeffs_H[0] *= 0 # reconstruction imArray_H = pywt.waverec2(coeffs_H, 'haar') imArray_H *= 255 imArray_H = np.uint8(imArray_H) cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image', imArray_H) cv2.waitKey(0) cv2.destroyAllWindows()
def main(): img1 = cv2.imread("/home/ajit/Desktop/images.jpg") img2 = copy.copy(img1) b1, g1, r1 = cv2.split(img2) hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) a = h.max() b = h.min() h -= h.min() h = h / (a - b) h *= 179 s -= s.min() s = s / (s.max() - s.min()) s *= 255 v -= v.min() v = v / (v.max() - v.min()) v *= 255 hsv = cv2.merge([h, s, v]) hsv1 = hsv.astype(np.float32) rgb = cv2.cvtColor(hsv1, cv2.COLOR_HSV2RGB) b, g, r = cv2.split(rgb) r -= r.min() r *= 255 / (r.max() - r.min()) g -= g.min() g *= 255 / (g.max() - g.min()) b -= b.min() b *= 255 / (b.max() - b.min()) cl1 = cv2.merge([b, g, r]) clahe = cv2.createCLAHE() b2 = clahe.apply(b1) g2 = clahe.apply(g1) r2 = clahe.apply(r1) cl2 = cv2.merge([b2, g2, r2]) cooef1 = pywt.wavedec2(cl1[:, :], 'db1') cooef2 = pywt.wavedec2(cl2[:, :], 'db1') FUSION_METHOD = 'max' fusedCooef = [] for i in range(len(cooef1) - 1): if (i == 0): fusedCooef.append(fuseCoeff(cooef1[0], cooef2[0], FUSION_METHOD)) else: c1 = fuseCoeff(cooef1[i][0], cooef2[i][0], FUSION_METHOD) c2 = fuseCoeff(cooef1[i][1], cooef2[i][1], FUSION_METHOD) c3 = fuseCoeff(cooef1[i][2], cooef2[i][2], FUSION_METHOD) fusedCooef.append((c1, c2, c3)) fusedImage = pywt.waverec2(fusedCooef, 'db1') fusedImage = np.multiply( np.divide(fusedImage - np.min(fusedImage), (np.max(fusedImage) - np.min(fusedImage))), 255) fusedImage = fusedImage.astype(np.uint8) cv2.imshow("win", fusedImage)
def wavelet_denoise( im, mother_wavelet: str = "db1", # Daubechies wavelet 1 levels: int = 4, keep: float = 1 / 1e2, # percent ): """ :param im: :type im:""" coef = pywt.wavedec2(im, wavelet=mother_wavelet, level=levels) coef_array, coef_slices = pywt.coeffs_to_array(coef) Csort = numpy.sort(numpy.abs(coef_array.reshape(-1))) coef_filt = pywt.array_to_coeffs( coef_array * (numpy.abs(coef_array) > Csort[int( numpy.floor((1 - keep) * len(Csort)))]), coef_slices, output_format=CoeffFormatEnum.wavedec2.value, ) recon = pywt.waverec2(coef_filt, wavelet=mother_wavelet) return recon
def w2d(img, mode='haar', level=1): imArray = cv2.imread(img) # Datatype conversions # convert to grayscale imArray = cv2.cvtColor(imArray, cv2.COLOR_BGR2GRAY) # convert to float imArray = np.float32(imArray) # normalize imArray /= 255 # compute coefficients coeffs = pywt.wavedec2(imArray, mode, level=level) # Process Coefficients coeffs_H = list(coeffs) coeffs_H[0] *= 0 # reconstruction imArray_H = pywt.waverec2(coeffs_H, mode) imArray_H *= 255 imArray_H = np.uint8(imArray_H) # Display result cv2.imshow('image', imArray_H) cv2.waitKey(0) cv2.destroyAllWindows()
def fromWavelet(wavImg,wavelet='haar',mode='symmetric'): """ :param wavImg: a wavelet image to transform back into image space :param wavelet: any common, named wavelet, including 'Haar' (default) 'Daubechies' 'Symlet' 'Coiflet' 'Biorthogonal' 'ReverseBiorthogonal' 'DiscreteMeyer' 'Gaussian' 'MexicanHat' 'Morlet' 'ComplexGaussian' 'Shannon' 'FrequencyBSpline' 'ComplexMorlet' or a custom [ [lowpass_decomposition], [highpass_decomposition], [lowpass_reconstruction], [highpass_reconstruction] ] where each is a pair of floating point values :param mode: str or 2-tuple of str, optional Signal extension mode, see Modes (default: �symmetric�). This can also be a tuple containing a mode to apply along each axis in axes. See also: https://pywavelets.readthedocs.io/en/latest/ref/index.html """ return pywt.waverec2(wavImg,_wavelet(wavelet),mode)
def give_high_freq(img): coeffs = pywt.wavedec2(img, 'haar', level=2) coeffs[0] = np.zeros_like(coeffs[0]) new_img = pywt.waverec2(coeffs, 'haar') new_img = new_img.astype('uint8') return new_img[:, :, :-1]
def w2d(file, mode='db1', level=10): print(file) imArray = cv2.imread(file + ".jpg") #Datatype conversions #convert to grayscale imArray = cv2.cvtColor(imArray, cv2.COLOR_BGR2GRAY) #convert to float imArray = np.float32(imArray) imArray /= 255 # compute coefficients coeffs = pywt.wavedec2(imArray, mode, level=level) #Process Coefficients coeffs_H = list(coeffs) coeffs_H[0] *= 0 # reconstruction imArray_H = pywt.waverec2(coeffs_H, mode) imArray_H *= 255 imArray_H = np.uint8(imArray_H) #Display result if (file[:2] == "./"): file = file[5:] np.savetxt("./textureAndShape/db1/" + file + "Db1.csv", imArray_H) #w2d("fruit",'db1',10)
def w2dReg(img, mode=mode, level=level, noiseSigma = noiseSigma): # compute coefficients noisy_img = add_noise(img, noiseSigma = noiseSigma) rec_coeffs = denoise(noisy_img, mode, level, noiseSigma) # reconstruction rec_img = pywt.waverec2(rec_coeffs, mode); return noisy_img, rec_img, len(rec_coeffs) - 1
def incorporateTexture(image): # Transforming image to double type image = np.float32(image) # Changing domain to YCrCb image = cst.BGR2YCrCb(image) Y, Cr, Cb = cv2.split(image) # Wavelet Transformation in 2 levels (Sl, (Sh1, Sv1, Sd1), (Sh2, Sv2, Sd2)) = pywt.wavedec2(Y, 'db1', level=2) # Resizing layers reducedCb = cv2.resize( Cb, (Sd2.shape[1], Sd2.shape[0]), interpolation=cv2.INTER_LANCZOS4) reducedCr = cv2.resize( Cr, (Sv2.shape[1], Sv2.shape[0]), interpolation=cv2.INTER_LANCZOS4) # Acquiring Cb/Cr-plus e Cb/Cr-minus CbPlus, CbMinus = cst.dividePlusMinus(reducedCb) CrPlus, CrMinus = cst.dividePlusMinus(reducedCr) # Resizing Cb- to 1/4 of original size reducedCbMinus = cv2.resize( CbMinus, (Sd1.shape[1], Sd1.shape[0]), interpolation=cv2.INTER_LANCZOS4) # Inverse Wavelet Transformation newYSecondTry = (pywt.waverec2( (Sl, (Sh1, Sv1, reducedCbMinus), (CrPlus, CbPlus, CrMinus)), 'db1')) return newYSecondTry
def backprojection_2X(Ir, Orig): myfilter = 'db2' #'bior4.4' upscale_lvl = 1 mode = 'smooth' wd1 = pywt.wavedec2(Orig, myfilter, level=upscale_lvl, mode=mode) # corg = np.append(wd1[0].ravel(),[wd1[1][0].ravel(), wd1[1][1].ravel(), wd1[1][2].ravel()]) ilowc = wd1[0] / 2 rangeImg = [np.amin(ilowc), np.amax(ilowc)] Ir = range0toN(Ir, rangeImg) wd2 = pywt.wavedec2(Ir, myfilter, level=upscale_lvl, mode=mode) crec = wd2 crec[0] = 2 * ilowc if (wd2[0].shape != wd2[1][0].shape): lst = list() for i in range(0, len(wd2[1])): # lst.append(wd2[1][i].T) lst.append(np.reshape(wd2[1][i].ravel(), wd2[0].shape)) crec[1] = lst irec = pywt.waverec2(crec, myfilter, mode=mode) ibp = range0toN(irec, rangeImg) return ibp
def apply_wavelet_reconstruction(data, wavelet_name, ignore_level=None): """ Apply 2D wavelet reconstruction. Parameters ---------- data : list or tuple The first element is an 2D-array, next elements are tuples of three 2D-arrays. i.e [mat_n, (cH_level_n, cV_level_n, cD_level_n), ..., (cH_level_1, cV_level_1, cD_level_1)]. wavelet_name : str Name of a wavelet. E.g. "db5" ignore_level : int, optional Decomposition level to be ignored for reconstruction. Returns ------- array_like 2D array. Note that the sizes of the array are always even numbers. """ if ignore_level is not None: level = len(data[1:]) if level >= ignore_level > 0: data[-ignore_level] = tuple( [np.zeros_like(v) for v in data[-ignore_level]]) return pywt.waverec2(data, wavelet_name)
def blend_images(base, texture, level=4, mode='sp1', base_gain=None, texture_gain=None): base_data = image2array(base) texture_data = image2array(texture) output_data = [] for base_band, texture_band in zip(base_data, texture_data): base_band_coeffs = pywt.wavedec2(base_band, 'db2', mode, level) texture_band_coeffs = pywt.wavedec2(texture_band, 'db2', mode, level) output_band_coeffs = [base_band_coeffs[0]] del base_band_coeffs[0], texture_band_coeffs[0] for n, (base_band_details, texture_band_details) in enumerate( zip(base_band_coeffs, texture_band_coeffs)): blended_details = [] for (base_detail, texture_detail) in zip(base_band_details, texture_band_details): if base_gain is not None: base_detail *= base_gain if texture_gain is not None: texture_detail *= texture_gain blended = numpy.where(abs(base_detail) > abs(texture_detail), base_detail, texture_detail) blended_details.append(blended) base_band_coeffs[n] = texture_band_coeffs[n] = None output_band_coeffs.append(blended_details) new_band = pywt.waverec2(output_band_coeffs, 'db2', mode) output_data.append(new_band) del new_band, base_band_coeffs, texture_band_coeffs del base_data, texture_data output_data = numpy.array(output_data) return array2image(output_data, base.mode)
def test_equal_oddshape(size): wave = 'db3' J = 3 mode = 'symmetric' x = torch.randn(5, 4, *size).to(dev) dwt1 = DWTForward(J=J, wave=wave, mode=mode).to(dev) iwt1 = DWTInverse(wave=wave, mode=mode).to(dev) dwt2 = DWTForward(J=J, wave=wave, mode=mode).to(dev) iwt2 = DWTInverse(wave=wave, mode=mode).to(dev) yl1, yh1 = dwt1(x) x1 = iwt1((yl1, yh1)) yl2, yh2 = dwt2(x) x2 = iwt2((yl2, yh2)) # Test it is the same as doing the PyWavelets wavedec coeffs = pywt.wavedec2(x.cpu().numpy(), wave, level=J, axes=(-2,-1), mode=mode) X2 = pywt.waverec2(coeffs, wave, mode=mode) np.testing.assert_array_almost_equal(X2, x1.detach(), decimal=PREC_FLT) np.testing.assert_array_almost_equal(X2, x2.detach(), decimal=PREC_FLT) np.testing.assert_array_almost_equal(yl1.cpu(), coeffs[0], decimal=PREC_FLT) np.testing.assert_array_almost_equal(yl2.cpu(), coeffs[0], decimal=PREC_FLT) for j in range(J): for b in range(3): np.testing.assert_array_almost_equal( coeffs[J-j][b], yh1[j][:,:,b].cpu(), decimal=PREC_FLT) np.testing.assert_array_almost_equal( coeffs[J-j][b], yh2[j][:,:,b].cpu(), decimal=PREC_FLT)
def w2d(img, mode='haar', level=1): kernel_size = 3 scale = 1 delta = 0 ddepth = cv2.CV_16UC3 #Datatype conversions #convert to grayscale imArray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) #convert to float imArray = np.float32(imArray) imArray /= 255; # compute coefficients coeffs=pywt.wavedec2(imArray, mode, level=level) #Process Coefficients coeffs_H=list(coeffs) coeffs_H[0] *= 0; # reconstruction imArray_H=pywt.waverec2(coeffs_H, mode); imArray_H *= 255; imArray_H = np.uint8(imArray_H) #print(imArray_H) blur = cv2.GaussianBlur(imArray_H,(3,3),0,0, cv2.BORDER_DEFAULT) laplacian = cv2.Laplacian(blur, ddepth = ddepth, ksize = kernel_size, scale = scale,delta = delta, borderType=cv2.BORDER_DEFAULT) (mean, stddev) = cv2.meanStdDev(laplacian) laplacian_operator_std_dev = stddev[0] laplacian_operator_variance = stddev[0]*stddev[0] return(laplacian_operator_variance)
def munchetal_filter(im, wlevel, sigma, wname='db15'): # Wavelet decomposition: coeffs = pywt.wavedec2(im.astype(np.float32), wname, level=wlevel) coeffsFlt = [coeffs[0]] # FFT transform of horizontal frequency bands: for i in range(1, wlevel + 1): # FFT: fcV = np.fft.fftshift(np.fft.fft(coeffs[i][1], axis=0)) my, mx = fcV.shape # Damping of vertical stripes: damp = 1 - np.exp( -(np.arange(-np.floor(my / 2.), -np.floor(my / 2.) + my)**2) / (2 * (sigma**2))) dampprime = np.kron(np.ones((1, mx)), damp.reshape( (damp.shape[0], 1))) # np.tile(damp[:, np.newaxis], (1, mx)) fcV = fcV * dampprime # Inverse FFT: fcVflt = np.real(np.fft.ifft(np.fft.ifftshift(fcV), axis=0)) cVHDtup = (coeffs[i][0], fcVflt, coeffs[i][2]) coeffsFlt.append(cVHDtup) # Get wavelet reconstruction: im_f = np.real(pywt.waverec2(coeffsFlt, wname)) # Return image according to input type: if (im.dtype == 'uint16'): # Check extrema for uint16 images: im_f[im_f < np.iinfo(np.uint16).min] = np.iinfo(np.uint16).min im_f[im_f > np.iinfo(np.uint16).max] = np.iinfo(np.uint16).max # Return filtered image (an additional row and/or column might be present): return im_f[0:im.shape[0], 0:im.shape[1]].astype(np.uint16) else: return im_f[0:im.shape[0], 0:im.shape[1]]
def _call(self, coeff): """Compute the discrete 1D, 2D or 3D inverse wavelet transform. Parameters ---------- coeff : `DiscreteLpVector` Returns ------- arr : `DiscreteLpVector` """ if len(self.range.shape) == 1: coeff_list = array_to_pywt_coeff(coeff, self.size_list) x = pywt.waverec(coeff_list, self.wbasis, self.mode) return self.range.element(x) elif len(self.range.shape) == 2: coeff_list = array_to_pywt_coeff(coeff, self.size_list) x = pywt.waverec2(coeff_list, self.wbasis, self.mode) return self.range.element(x) elif len(self.range.shape) == 3: coeff_dict = array_to_pywt_coeff(coeff, self.size_list) x = wavelet_reconstruction3d(coeff_dict, self.wbasis, self.mode, self.nscales) return self.range.element(x)
def test_waverec2_axes_subsets(): rstate = np.random.RandomState(0) data = rstate.standard_normal((8, 8, 8)) # test all combinations of 2 out of 3 axes transformed for axes in combinations((0, 1, 2), 2): coefs = pywt.wavedec2(data, 'haar', axes=axes) rec = pywt.waverec2(coefs, 'haar', axes=axes) assert_allclose(rec, data, atol=1e-14)
def waveletDenoise(u,noiseSigma): wavelet = pywt.Wavelet('bior6.8') levels = int( np.log2(u.shape[0]) ) waveletCoeffs = pywt.wavedec2( u, wavelet, level=levels) threshold=noiseSigma*np.sqrt(2*np.log2(u.size)) NWC = [pywt.thresholding.soft(x,threshold) for x in waveletCoeffs] u = pywt.waverec2( NWC, wavelet)[:u.shape[0],:u.shape[1]] return u
def trans(imArray, mode='haar', level=1): coeffs = pywt.wavedec2(imArray, mode, level=level) coeffs_H = list(coeffs) coeffs_H[0] = np.zeros(coeffs_H[0].shape) imArray_H = pywt.waverec2(coeffs_H, mode) #return imArray #print "img1", imArray[0] #print "img2", imArray_H[0] return imArray_H
def test_waverec2_all_wavelets_modes(): # test 2D case using all wavelets and modes rstate = np.random.RandomState(1234) r = rstate.randn(80, 96) for wavelet in wavelist: for mode in pywt.Modes.modes: coeffs = pywt.wavedec2(r, wavelet, mode=mode) assert_allclose(pywt.waverec2(coeffs, wavelet, mode=mode), r, rtol=tol_single, atol=tol_single)
def test_waverec2_accuracies(): rstate = np.random.RandomState(1234) x0 = rstate.randn(4, 4) for dt, tol in dtypes_and_tolerances: x = x0.astype(dt) if np.iscomplexobj(x): x += 1j*rstate.randn(4, 4).astype(x.real.dtype) coeffs = pywt.wavedec2(x, 'db1') assert_(len(coeffs) == 3) assert_allclose(pywt.waverec2(coeffs, 'db1'), x, atol=tol, rtol=tol)
def rmatvec(x): iinf = 0 isup = b[0].size yl = [x[iinf:isup].reshape(b[0].shape), ] for i in xrange(1, len(b)): tmp = list() for j in xrange(3): iinf = copy(isup) isup = iinf + b[i][j].size tmp.append(x[iinf:isup].reshape(b[i][j].shape)) yl.append(tmp) return pywt.waverec2(yl, wavelet, mode=mode)[:a.shape[0], :a.shape[1]].flatten()
def w2d(img, mode='haar', level=1): #imArray = cv2.imread(img) imArray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) imArray = np.float32(imArray) imArray /=255 coeffs = pywt.wavedec2(imArray, mode, level=level) coeffs_H = list(coeffs) coeffs_H[0]*=0 imArray_H = pywt.waverec2(coeffs_H, mode) imArray_H *= 255 imArray_H = np.uint8(imArray_H) cv2.imshow("image", imArray_H)
def inverse(self): tmp = [] tmp.append(self.a) for k in range(0, len(self.h)): detcoefs = [] detcoefs.append(self.h[k]) detcoefs.append(self.v[k]) detcoefs.append(self.d[k]) tmp.append(detcoefs) res = pywt.waverec2(tmp, self.wname, mode='per') if self.do_random_shifts is True: return _circular_shift(res, -self.shifts[0], -self.shifts[1]) else: return res
def prob4(image="swanlake_polluted.jpg"): img = imread(image, True) wavelet = pywt.Wavelet('haar') coeffs = pywt.wavedec2(img, wavelet) print len(coeffs) cleaned = pywt.waverec2(coeffs[:-1], wavelet) plt.subplot(121) plt.imshow(img,cmap='gray') plt.title("Original") plt.subplot(122) plt.imshow(cleaned, cmap='gray') plt.title("Cleaned") plt.show()
def test_pywt_coeff_to_array_and_array_to_pywt_coeff(): # Verify that the helper function does indeed work as expected wbasis = pywt.Wavelet('db1') mode = 'zpd' nscales = 2 n = 16 # 1D test size_list = coeff_size_list((n,), nscales, wbasis, mode) x = np.random.rand(n) coeff_list = pywt.wavedec(x, wbasis, mode, nscales) coeff_arr = pywt_coeff_to_array(coeff_list, size_list) assert isinstance(coeff_arr, (np.ndarray)) length_of_array = np.prod(size_list[0]) length_of_array += sum(np.prod(shape) for shape in size_list[1:-1]) assert all_equal(len(coeff_arr), length_of_array) coeff_list2 = array_to_pywt_coeff(coeff_arr, size_list) assert all_equal(coeff_list, coeff_list2) reconstruction = pywt.waverec(coeff_list2, wbasis, mode) assert all_almost_equal(reconstruction, x) # 2D test size_list = coeff_size_list((n, n), nscales, wbasis, mode) x = np.random.rand(n, n) coeff_list = pywt.wavedec2(x, wbasis, mode, nscales) coeff_arr = pywt_coeff_to_array(coeff_list, size_list) assert isinstance(coeff_arr, (np.ndarray)) length_of_array = np.prod(size_list[0]) length_of_array += sum(3 * np.prod(shape) for shape in size_list[1:-1]) assert all_equal(len(coeff_arr), length_of_array) coeff_list2 = array_to_pywt_coeff(coeff_arr, size_list) assert all_equal(coeff_list, coeff_list2) reconstruction = pywt.waverec2(coeff_list2, wbasis, mode) assert all_almost_equal(reconstruction, x) # 3D test size_list = coeff_size_list((n, n, n), nscales, wbasis, mode) x = np.random.rand(n, n, n) coeff_dict = wavelet_decomposition3d(x, wbasis, mode, nscales) coeff_arr = pywt_coeff_to_array(coeff_dict, size_list) assert isinstance(coeff_arr, (np.ndarray)) length_of_array = np.prod(size_list[0]) length_of_array += sum(7 * np.prod(shape) for shape in size_list[1:-1]) assert len(coeff_arr) == length_of_array coeff_dict2 = array_to_pywt_coeff(coeff_arr, size_list) reconstruction = wavelet_reconstruction3d(coeff_dict2, wbasis, mode, nscales) assert all_equal(coeff_dict, coeff_dict) assert all_almost_equal(reconstruction, x)
def wavelets_denoise(in_file, in_mask=None, out_file=None): import numpy as np import nibabel as nb import os.path as op import pywt as wt if out_file is None: fname, fext = op.splitext(op.basename(in_file)) if fext == '.gz': fname, _ = op.splitext(fname) out_file = op.abspath('./%s_wavelets.nii.gz' % fname) im = nb.load(in_file) aff = im.get_affine() imdata = im.get_data() datamax = imdata.max() datamin = imdata.min() imdata = 255. * (imdata - datamin) / (datamax - datamin) if in_mask is not None: mask = nb.load(in_mask).get_data() mask[mask > 0] = 1.0 mask[mask <= 0] = 0.0 imdata *= mask else: mask = np.zeros_like(imdata) mask[imdata != 0] = 1 result = np.zeros_like(imdata) wavelet = wt.Wavelet('db10') thres = 100 offset = (0 if imdata.shape[0] % 2 == 0 else 1, 0 if imdata.shape[1] % 2 == 0 else 1) for z in np.arange(imdata.shape[2]): zslice = imdata[offset[0]:, offset[1]:, z] wcoeff = wt.wavedec2(zslice, wavelet) nwcoeff = map(lambda x: wt.thresholding.soft(x, thres), wcoeff) result[offset[0]:, offset[1]:, z] = wt.waverec2(nwcoeff, wavelet) m = np.median(result[offset[0]:, offset[1]:, :]) result[offset[0]:, offset[1]:, :] = 2.0 * \ (result[offset[0]:, offset[1]:, :] - m) / 255. nb.Nifti1Image(result, im.get_affine(), im.get_header()).to_filename(out_file) return out_file
def fromwav(stack, coeffs, cA, cH, cV, cD, levels, wavelet): i1 = len(cA.flatten()) cA = stack[:i1].reshape(cA.shape) coeffs = [cA] for l in range(levels): i2 = i1 + len(cH[l].flatten()) i3 = i2 + len(cV[l].flatten()) i4 = i3 + len(cD[l].flatten()) cHn = stack[i1 :i2 ].reshape(cH[l].shape) cVn = stack[i2 :i3 ].reshape(cV[l].shape) cDn = stack[i3 :i4 ].reshape(cD[l].shape) coeffs.append((cHn,cVn,cDn)) i1 = i4 return pywt.waverec2(coeffs,wavelet)
def func(dframe): frame1, frame2 = dframe[0], dframe[1] frame1 = np.array(frame1) frame2 = np.array(frame2) C = pywt.wavedec2(frame1, 'db4', level=level) S = pywt.wavedec2(frame2, 'db4', level=level) tA2 = (C[0] + S[0])/2 coeffs = fuse(tA2, C[1:], S[1:]) fuse_img = pywt.waverec2(coeffs, 'db4') if frame1.dtype == np.uint16: fuse_img = fuse_img.clip(0,65535).astype(np.uint16) elif frame1.dtype == np.uint8: fuse_img = fuse_img.clip(0,255).astype(np.uint8) return np.squeeze(fuse_img)
def test_multilevel_dtypes_2d(): wavelet = pywt.Wavelet('haar') for dt_in, dt_out in zip(dtypes_in, dtypes_out): # wavedec2, waverec2 x = np.ones((8, 8), dtype=dt_in) errmsg = "wrong dtype returned for {0} input".format(dt_in) cA, coeffsD2, coeffsD1 = pywt.wavedec2(x, wavelet, level=2) assert_(cA.dtype == dt_out, "wavedec2: " + errmsg) for c in coeffsD1: assert_(c.dtype == dt_out, "wavedec2: " + errmsg) for c in coeffsD2: assert_(c.dtype == dt_out, "wavedec2: " + errmsg) x_roundtrip = pywt.waverec2([cA, coeffsD2, coeffsD1], wavelet) assert_(x_roundtrip.dtype == dt_out, "waverec2: " + errmsg)
def test_ravel_wavedec2_with_lists(): x1 = np.ones((8, 8)) wav = pywt.Wavelet('haar') coeffs = pywt.wavedec2(x1, wav) # list [cHn, cVn, cDn] instead of tuple is okay coeffs[1:] = [list(c) for c in coeffs[1:]] coeff_arr, slices, shapes = pywt.ravel_coeffs(coeffs) coeffs2 = pywt.unravel_coeffs(coeff_arr, slices, shapes, output_format='wavedec2') x1r = pywt.waverec2(coeffs2, wav) assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4) # wrong length list will cause a ValueError coeffs[1:] = [list(c[:-1]) for c in coeffs[1:]] # truncate diag coeffs assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
def wavelet_transform(data, threshold): wavelet_type = 'haar' clean_coef = list() compose = list() cA2, cD2, cD1 = pywt.wavedec2(data, wavelet_type, level=2) clean_coef.append(cA2) clean_coef.append(cD2) for c in cD1: compose.append(numpy.where(((c<(-threshold)) | (c>threshold)), c, 0)) clean_coef.append(tuple(compose)) t = pywt.waverec2(clean_coef, wavelet_type) values = t.astype(int) return values
def blend_images(base, texture, wavelet, level, mode='smooth', base_gain=None, texture_gain=None): """Blend loaded images at `level` of granularity using `wavelet`""" base_data = image2array(base) texture_data = image2array(texture) output_data = [] # process color bands for base_band, texture_band in zip(base_data, texture_data): # multilevel dwt base_band_coeffs = pywt.wavedec2(base_band, wavelet, mode, level) texture_band_coeffs = pywt.wavedec2(texture_band, wavelet, mode, level) # average coefficients of base image output_band_coeffs = [base_band_coeffs[0]] # cA del base_band_coeffs[0], texture_band_coeffs[0] # blend details coefficients for n, (base_band_details, texture_band_details) in enumerate( zip(base_band_coeffs, texture_band_coeffs)): blended_details = [] for (base_detail, texture_detail) in zip(base_band_details, texture_band_details): if base_gain is not None: base_detail *= base_gain if texture_gain is not None: texture_detail *= texture_gain # select coeffs with greater energy blended = numpy.where(abs(base_detail) > abs(texture_detail), base_detail, texture_detail) blended_details.append(blended) base_band_coeffs[n] = texture_band_coeffs[n] = None output_band_coeffs.append(blended_details) # multilevel idwt new_band = pywt.waverec2(output_band_coeffs, wavelet, mode) output_data.append(new_band) del new_band, base_band_coeffs, texture_band_coeffs del base_data, texture_data output_data = numpy.array(output_data) return array2image(output_data, base.mode)
def wavelet_denoise(array, wavelet, threshold, levels, thrmode="hard"): """ Wavelet filtering of a 2d array using Pywt library. First a 2d discrete wavelet transform is performed followed by a hard or soft thresholding of the coefficients. Parameters ---------- array : array_like Input 2d array or image. wavelet : Pywt wavelet object Pywt wavelet object. Example: pywt.Wavelet('bior2.2') threshold : int Threshold on the wavelet coefficients. levels : int Wavelet levels to be used. thrmode : {'hard','soft'}, optional Mode of thresholding of the wavelet coefficients. Returns ------- array_filtered : array_like Filtered array with the same dimensions and size of the input one. Notes ----- Full documentation of the PyWavelets package here: http://www.pybytes.com/pywavelets/ For information on the builtin wavelets and how to use them: http://www.pybytes.com/pywavelets/regression/wavelet.html http://wavelets.pybytes.com """ if not array.ndim == 2: raise TypeError("Input array is not a frame or 2d array") WC = pywt.wavedec2(array, wavelet, level=levels) if thrmode == "hard": NWC = map(lambda x: pywt.thresholding.hard(x, threshold), WC) elif thrmode == "soft": NWC = map(lambda x: pywt.thresholding.soft(x, threshold), WC) else: raise ValueError("Threshold mode not recognized") array_filtered = pywt.waverec2(NWC, wavelet) return array_filtered
def whash(image, hash_size = 8, image_scale = None, mode = 'haar', remove_max_haar_ll = True): """ Wavelet Hash computation. based on https://www.kaggle.com/c/avito-duplicate-ads-detection/ @image must be a PIL instance. @hash_size must be a power of 2 and less than @image_scale. @image_scale must be power of 2 and less than image size. By default is equal to max power of 2 for an input image. @mode (see modes in pywt library): 'haar' - Haar wavelets, by default 'db4' - Daubechies wavelets @remove_max_haar_ll - remove the lowest low level (LL) frequency using Haar wavelet. """ import pywt if image_scale is not None: assert image_scale & (image_scale - 1) == 0, "image_scale is not power of 2" else: image_scale = 2**int(numpy.log2(min(image.size))) ll_max_level = int(numpy.log2(image_scale)) level = int(numpy.log2(hash_size)) assert hash_size & (hash_size-1) == 0, "hash_size is not power of 2" assert level <= ll_max_level, "hash_size in a wrong range" dwt_level = ll_max_level - level image = image.convert("L").resize((image_scale, image_scale), Image.ANTIALIAS) pixels = numpy.array(image.getdata(), dtype=numpy.float).reshape((image_scale, image_scale)) pixels /= 255 # Remove low level frequency LL(max_ll) if @remove_max_haar_ll using haar filter if remove_max_haar_ll: coeffs = pywt.wavedec2(pixels, 'haar', level = ll_max_level) coeffs = list(coeffs) coeffs[0] *= 0 pixels = pywt.waverec2(coeffs, 'haar') # Use LL(K) as freq, where K is log2(@hash_size) coeffs = pywt.wavedec2(pixels, mode, level = dwt_level) dwt_low = coeffs[0] # Substract median and compute hash med = numpy.median(dwt_low) diff = dwt_low > med return ImageHash(diff)
def get_wavelet_H(wave_image): ''' inverse wavelet transform of the wavelets image ''' if len(numpy.shape(wave_image)) == 2: wave_image = numpy.reshape(wave_image,numpy.shape(wave_image)+(1,),order='F') else: pass L = numpy.shape(wave_image)[-1] new_image = numpy.empty_like(wave_image,dtype=dtype) for jj in xrange(0, L): N = numpy.shape(wave_image[:,:,jj])[0] tmp_image = numpy.abs(wave_image[:,:,jj]) # new_image = numpy.empty_like(wave_image) p0= tmp_image[N/2:, 0:N/2] p1 = tmp_image[0:N/2, N/2:] p2 = tmp_image[N/2:, N/2:] cA = tmp_image[0:N/2, 0:N/2] cH3 = (p0,p1,p2) N = numpy.shape(cA)[0] p0= cA[N/2:, 0:N/2] p1 = cA[0:N/2, N/2:] p2 = cA[N/2:, N/2:] cH2 = (p0,p1,p2) cA = cA[0:N/2, 0:N/2] N = numpy.shape(cA)[0] p0= cA[N/2:, 0:N/2] p1 = cA[0:N/2, N/2:] p2 = cA[N/2:, N/2:] cH1 = (p0,p1,p2) cA = cA[0:N/2, 0:N/2] new_image[:,:,jj] = pywt.waverec2((cA,cH1, cH2, cH3),'haar') # new_image[:,:,jj] = gasp.idwt2([cA,[cH1, cH2, cH3]],'haar') ############## return new_image
def get_cascade_with_wavelets(rainfield, nrLevels=6, wavelet = 'db4', doplot=0): rainfieldSize = rainfield.shape # Decompose rainfall field coeffsRain = pywt.wavedec2(rainfield, wavelet, level=nrLevels) if (doplot==1): vmaxorig = rainfield.max() ncols = 3 nrows = np.ceil(nrLevels/ncols) + 1 plt.subplot(nrows,ncols,1) plt.title('Original image') plt.imshow(rainfield,interpolation='none',vmin=0,vmax=vmaxorig) cbar = plt.colorbar() # cbar.set_label('dBZ') plt.axis('off') recomposedCascade = pywt.waverec2(coeffsRain, wavelet) plt.subplot(nrows,ncols,2) plt.title('Reconstructed field') plt.imshow(recomposedCascade,interpolation='none',vmin=0,vmax=vmaxorig) cbar=plt.colorbar() plt.axis('off') for nl in xrange(nrLevels): plt.subplot(nrows,ncols,ncols+1+nl) # plt.title('Level %i (%i km)' % (nl, CentreWaveLengths[nl])) nlevel = coeffsRain[nl][0].copy() print(nlevel.shape) vmax = np.percentile(nlevel,99.0) vmin = np.percentile(nlevel,1.0) plt.imshow(nlevel,vmin=vmin,vmax=vmax,interpolation='none') cbar = plt.colorbar() plt.axis('off') plt.show() return Cascade