def cryo_conv_vol(x, kernel_f): n = x.shape[0] n_ker = kernel_f.shape[0] if np.any(np.array(x.shape) != n): raise ValueError('Volume in `x` must be cubic') if np.any(np.array(kernel_f.shape) != n_ker): raise ValueError('Convolution kernel in `kernel_f` must be cubic') is_singleton = len(x.shape) == 3 shifted_kernel_f = np.fft.ifftshift(np.fft.ifftshift(np.fft.ifftshift(kernel_f, 0), 1), 2) if is_singleton: x = numpy_fft.fftn(x, [n_ker] * 3) else: x = numpy_fft.fft(x, n=n_ker, axis=0) x = numpy_fft.fft(x, n=n_ker, axis=1) x = numpy_fft.fft(x, n=n_ker, axis=2) x *= shifted_kernel_f if is_singleton: x = numpy_fft.ifftn(x) x = x[:n, :n, :n] else: x = numpy_fft.ifft(x, axis=0) x = numpy_fft.ifft(x, axis=1) x = numpy_fft.ifft(x, axis=2) x = x.real return x
def polarization_to_signal(self,P_of_t_in,*,return_polarization=False, local_oscillator_number = -1): """This function generates a frequency-resolved signal from a polarization field local_oscillator_number - usually the local oscillator will be the last pulse in the list self.efields""" pulse_time = self.pulse_times[local_oscillator_number] if self.gamma != 0: exp_factor = np.exp(-self.gamma * (self.t-pulse_time)) P_of_t_in *= exp_factor P_of_t = P_of_t_in if return_polarization: return P_of_t if local_oscillator_number == 'impulsive': efield = np.exp(1j*self.w*(pulse_time)) else: pulse_time_ind = np.argmin(np.abs(self.t - pulse_time)) pulse_start_ind = pulse_time_ind - self.size//2 pulse_end_ind = pulse_time_ind + self.size//2 + self.size%2 t_slice = slice(pulse_start_ind, pulse_end_ind,None) efield = np.zeros(self.t.size,dtype='complex') efield[t_slice] = self.efields[local_oscillator_number] efield = fftshift(ifft(ifftshift(efield)))*len(P_of_t)*(self.t[1]-self.t[0])/np.sqrt(2*np.pi) if P_of_t.size%2: P_of_t = P_of_t[:-1] efield = efield[:len(P_of_t)] P_of_w = fftshift(ifft(ifftshift(P_of_t)))*len(P_of_t)*(self.t[1]-self.t[0])/np.sqrt(2*np.pi) signal = np.imag(P_of_w * np.conjugate(efield)) return signal
def _decoder(self, x_encoded, result='full', axis=-1, planner_effort='FFTW_ESTIMATE'): """ Transform the encoded data back to time series. result = 'full': return to original modulating frequencies for each sub-band. result = 'auto': return to the lowest modulating frequencies for each sub-band. """ Nch, Nf, Nsamp = x_encoded.shape if result == 'full': out = np.zeros((Nch, self.n_freqs, self.n_samples), dtype=np.complex64) out[:, self.decoder_rule, self.encoder_rule] = x_encoded out = fft.fftshift(out, axes=axis) return fft.ifft(out, axis=axis, planner_effort=planner_effort) elif result == 'auto': out = x_encoded out = fft.fftshift(out, axes=axis) out = np.roll( out, int(self.bandwidth // 2 * (self.n_samples // self.sample_rate))) return fft.ifft(out, n=self.n_samples, axis=axis, planner_effort=planner_effort)
def ift1D(k,f,*,axis=0,zero_DC=False): """Takes in k and f = f(k), and returns x and the discrete Fourier transform of f(k) -> y(x). Handles all of the annoyances of fftshift and ifftshift, and gets the normalization right Args: x (np.ndarray): independent variable y (np.ndarray): dependent variable Kwargs: axis (int) : which axis to perform FFT """ dk = k[1]-k[0] x = fftshift(fftfreq(k.size,d=dk))*2*np.pi ifft_norm = dk*k.size/(2*np.pi) shifted_k = ifftshift(k) if np.isclose(shifted_k[0],0): y = ifft(ifftshift(f,axes=(axis)),axis=axis)*ifft_norm else: y = ifft(f,axis=axis)*ifft_norm if zero_DC: nd_slice = [slice(None) for i in range(len(y.shape))] nd_slice[axis] = slice(0,1,1) nd_slice = tuple(nd_slice) y[nd_slice] = 0 y = fftshift(y,axes=(axis)) return x, y
def polarization_to_signal(self, P_of_t_in, *, return_polarization=False, local_oscillator_number=-1, undersample_factor=1): """This function generates a frequency-resolved signal from a polarization field local_oscillator_number - usually the local oscillator will be the last pulse in the list self.efields""" undersample_slice = slice(None, None, undersample_factor) P_of_t = P_of_t_in[undersample_slice] t = self.t[undersample_slice] dt = t[1] - t[0] pulse_time = self.pulse_times[local_oscillator_number] if self.gamma != 0: exp_factor = np.exp(-self.gamma * np.abs(t - pulse_time)) P_of_t *= exp_factor if self.sigma_I != 0: inhomogeneous = np.exp(-(t - pulse_time)**2 * self.sigma_I**2 / 2) P_of_t *= inhomogeneous if return_polarization: return P_of_t pulse_time_ind = np.argmin(np.abs(self.t - pulse_time)) efield = np.zeros(self.t.size, dtype='complex') if self.efield_t.size == 1: # Impulsive limit efield[pulse_time_ind] = self.efields[local_oscillator_number] efield = fftshift(ifft(ifftshift(efield))) * efield.size / np.sqrt( 2 * np.pi) else: pulse_start_ind = pulse_time_ind - self.size // 2 pulse_end_ind = pulse_time_ind + self.size // 2 + self.size % 2 t_slice = slice(pulse_start_ind, pulse_end_ind, None) efield[t_slice] = self.efields[local_oscillator_number] efield = fftshift(ifft(ifftshift(efield))) * self.t.size * ( self.t[1] - self.t[0]) / np.sqrt(2 * np.pi) # if P_of_t.size%2: # P_of_t = P_of_t[:-1] # t = t[:-1] halfway = self.w.size // 2 pm = self.w.size // (2 * undersample_factor) efield_min_ind = halfway - pm efield_max_ind = halfway + pm + self.w.size % 2 efield = efield[efield_min_ind:efield_max_ind] P_of_w = fftshift(ifft( ifftshift(P_of_t))) * len(P_of_t) * dt / np.sqrt(2 * np.pi) signal = np.imag(P_of_w * np.conjugate(efield)) return signal
def multiple_toepltiz_inverse(c_mat, lambda_n, a): """ Efficiently compute several Toepltiz systems with the same Toepltiz matrix T xj = cj which is in matrix form T x_mat = c_mat Where T is a n_data x n_data Toeplitz matrix and c_mat is a n_data x n_knots matrix """ n = c_mat.shape[0] #zero_vect = np.zeros(n_data) # PRECOMPUTATIONS # Cf. Step 2 of Ref. [1] #ae_2n = np.concatenate(([1],a,zero_vect)) #ae_2n_fft = fft(ae_2n) ae_2n_fft = fft(np.concatenate(([1],a)),2*n) # using hermitian and real property of covariance matrices: # be_2n_fft = fft(be_2n) be_2n_fft = ae_2n_fft.conj() #np.real(ae_2n_fft) - 1j*np.imag(ae_2n_fft) signs = (-1)**(np.arange(2*n)+1) x_mat = np.empty(np.shape(c_mat)) print("shape of c_mat is " + str(c_mat.shape)) for j in range(c_mat.shape[1]): #ce_2n = np.zeros(2*n_data) #ce_2n[0:n_data] = c_mat[:,j] #ce_2n = np.concatenate((c_mat[:,j],zero_vect)) #ce_2n_fft = fft(ce_2n) ce_2n_fft = fft(c_mat[:,j],2*n) u_2n = ifft( ae_2n_fft*ce_2n_fft ) v_2n = ifft( be_2n_fft*ce_2n_fft ) #pe_2n_fft = fft( np.concatenate((v_2n[0:n_data],zero_vect)) ) #qe_2n_fft = fft( np.concatenate((u_2n[n_data:],zero_vect)) ) pe_2n_fft = fft( v_2n[0:n] , 2*n ) qe_2n_fft = fft( u_2n[n:] , 2*n ) we_2n = ifft( ae_2n_fft*pe_2n_fft + signs*be_2n_fft*qe_2n_fft ) x_mat[:,j] = np.real(we_2n[0:n]/lambda_n) return x_mat
def toepltiz_mat_vect_prod(y, s_2n): """ Linear operator that calculate T y_in assuming that we can write: Com = F* Lambda F where Lambda is a P x P diagonal matrix and F is the P x n_data Discrete Fourier Transform matrix. Parameters ---------- y : numpy array input data vector of size n_data S_2N : numpy array (size P >= 2N) PSD vector Returns ------- y_out : numpy array y_out = T * y_in transformed output vector of size N_out """ return np.real(ifft(s_2n * fft(y, len(s_2n)))[0:len(y)])
def toeplitz_multiplication(v, first_row, first_column): """ Performs the matrix-vector product T * v where T is a Toeplitz matrix, using FFT Parameters ---------- v : array_like input vector of size n_data first_row : array_like first row of the Toepltiz matrix (size n_data) first_column : array_like first column of the Toepltiz matrix (size n_data) Returns ------- y : numpy array vector such that y = T * v """ n = first_row.shape[0] a_2n_fft = fft(np.concatenate((first_row,[0],first_column[1:][::-1]))) return np.real(ifft(a_2n_fft*fft(v, 2*n))[0:n])
def delay(x, time, fs, axis=-1, keeplength=False, pad=1): extra_pad = 200 # add 200 samples to prevent wrapping samps = int(np.floor(time * fs)) s = list(x.shape) sz_pre = np.copy(s) sz_post = np.copy(s) sz_fft = np.copy(s) sz_pre[axis] = samps sz_post[axis] = pad + extra_pad x = np.concatenate((np.zeros(sz_pre), x, np.zeros(sz_post)), axis) sz_fft[axis] = int(np.round(2 ** np.ceil(np.log2(x.shape[axis])) - x.shape[axis])) x = np.concatenate((x, np.zeros(sz_fft)), axis) new_len = sz_pre[axis] + s[axis] + sz_post[axis] + sz_fft[axis] # x[n-k] <--> X(jw)e^(-jwk) where w in [0, 2pi) if type(time) is not int: theta = (-np.arange(new_len).astype(float) * fs * 2 * np.pi / new_len * (time - np.float(samps) / fs)) theta[-(new_len // 2) + 1:] = -theta[(new_len // 2):1:-1] st = [1 for _ in range(x.ndim)] st[axis] = new_len x = np.real(fft.ifft(fft.fft(x, axis=axis) * np.exp(1j * theta.reshape(st)))) if keeplength: x = np.take(x, range(s[axis]), axis) else: x = np.take(x, range(s[axis] + samps + pad), axis) inds = tuple([slice(si) for si in sz_pre]) x[inds] = 0 return x
def generate_noise_from_psd(DSP, fe, myseed=None) : """ Function generating a colored noise from a vector containing the DSP. The DSP contains Np points such that Np > 2N and the output noise should only contain N points in order to avoid boundary effects. However, the output is a 2N vector containing all the generated data. The troncature should be done afterwards. References : Timmer & König, "On generating power law noise", 1995 Parameters ---------- DSP : array_like vector of size N_DSP continaing the noise DSP calculated at frequencies between -fe/N_DSP and fe/N_DSP where fe is the sampling frequency and N is the size of the time series (it will be the size of the returned temporal noise vector b) N : scalar integer Size of the output time series fe : scalar float sampling frequency myseed : scalar integer or None seed of the random number generator Returns ------- b : numpy array time sample of the colored noise (size N) """ return ifft(generate_freq_noise_from_psd(DSP,fe,myseed = myseed))
def stCoefs_1d(signals, filters, block_size=100, second_order=False): """ compute 1D signals scattering coefficents input signals: 2D numpy array (# samples, signal_length) filters: 2D numpy array (# filters, signal_length) output filtered_signal: 3D numpy array (# samples, # filters, signal_length) """ signal_length = signals.shape[-1] signal_shape = signals.shape[:-1] signal_size = reduce(lambda x, y: x * y, signal_shape) signals.shape = (signal_size, signal_length) filter_length = filters.shape[-1] filter_shape = filters.shape[:-1] filter_size = reduce(lambda x, y: x * y, filter_shape) filters.shape = (filter_size, filter_length) f_signals = np.fft.fft(signals, axis=1) f_filters = np.fft.fft(fft.ifftshift(filters, axes=(1,)), axis=1) if not second_order: f_conv = f_signals[:, np.newaxis, :] * f_filters[np.newaxis, :, :] else: f_conv = np.zeros(signal_size, filter_size, signal_length) filtered = fft.ifft(f_conv, axis=2) return filtered
def calculate_autocorr(self, N): """ Compute the autocovariance function from the PSD. """ return np.real(ifft(self.calculate(2 * N))[0:N])
def phase_correlation(signal, ref): fft_s = fft(signal) fft_r = fft(ref) prod_of_ffts = (fft_s * np.conjugate(fft_r)) pc = prod_of_ffts / np.abs(prod_of_ffts) return abs(ifft(pc))
def covariance_matrix_time(psd_func, fs, n_points): """ Parameters ---------- psd_func : callable function giving the 2-sided noise PSD (in A/Hz) as a function of frequency (in Hz) fs : scalar float sampling frequency n_points : scalar integer size of covariance matrix to display Returns ------- cov : 2d numpy array covariance matrix in the time domain """ freq = np.fft.fftfreq(2 * n_points) * fs freq_pos = np.abs(freq) freq_pos[0] = freq_pos[1] autocorr = np.real(ifft(psd_func(freq_pos))) * fs cov = LA.toeplitz(autocorr[0:n_points]) return cov
def track(self, im, pos, base_target_sz, current_scale_factor): """ track the scale using the scale filter """ # get scale filter features scales = current_scale_factor * self.scale_size_factors xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz) # project xs = self.basis.dot(xs) * self.window # get scores xsf = fft(xs, axis=1) scale_responsef = np.sum(self.sf_num * xsf, 0) / (self.sf_den + config.lamBda) interp_scale_response = np.real(ifft(resize_dft(scale_responsef, config.number_of_interp_scales))) recovered_scale_index = np.argmax(interp_scale_response) if config.do_poly_interp: # fit a quadratic polynomial to get a refined scale estimate id1 = (recovered_scale_index - 1) % config.number_of_interp_scales id2 = (recovered_scale_index + 1) % config.number_of_interp_scales poly_x = np.array([self.interp_scale_factors[id1], self.interp_scale_factors[recovered_scale_index], self.interp_scale_factors[id2]]) poly_y = np.array([interp_scale_response[id1], interp_scale_response[recovered_scale_index], interp_scale_response[id2]]) poly_A = np.array([[poly_x[0]**2, poly_x[0], 1], [poly_x[1]**2, poly_x[1], 1], [poly_x[2]**2, poly_x[2], 1]], dtype=np.float32) poly = np.linalg.inv(poly_A).dot(poly_y.T) scale_change_factor = - poly[1] / (2 * poly[0]) else: scale_change_factor = self.interp_scale_factors[recovered_scale_index] return scale_change_factor
def stCoefs_1d(signals, filters, block_size=100, second_order=False): """ compute 1D signals scattering coefficents input signals: 2D numpy array (# samples, signal_length) filters: 2D numpy array (# filters, signal_length) output filtered_signal: 3D numpy array (# samples, # filters, signal_length) """ signal_length = signals.shape[-1] signal_shape = signals.shape[:-1] signal_size = reduce(lambda x, y: x * y, signal_shape) signals.shape = (signal_size, signal_length) filter_length = filters.shape[-1] filter_shape = filters.shape[:-1] filter_size = reduce(lambda x, y: x * y, filter_shape) filters.shape = (filter_size, filter_length) f_signals = np.fft.fft(signals, axis=1) f_filters = np.fft.fft(fft.ifftshift(filters, axes=(1, )), axis=1) if not second_order: f_conv = f_signals[:, np.newaxis, :] * f_filters[np.newaxis, :, :] else: f_conv = np.zeros(signal_size, filter_size, signal_length) filtered = fft.ifft(f_conv, axis=2) return filtered
def polarization_to_signal(self, P_of_t_in, *, local_oscillator_number=-1, undersample_factor=1): """This function generates a frequency-resolved signal from a polarization field local_oscillator_number - usually the local oscillator will be the last pulse in the list self.efields""" undersample_slice = slice(None, None, undersample_factor) P_of_t = P_of_t_in[undersample_slice].copy() t = self.t[undersample_slice] dt = t[1] - t[0] pulse_time = self.pulse_times[local_oscillator_number] efield_t = self.efield_times[local_oscillator_number] center = -self.centers[local_oscillator_number] P_of_t = P_of_t # * np.exp(-1j*center*t) pulse_time_ind = np.argmin(np.abs(self.t)) efield = np.zeros(self.t.size, dtype='complex') if efield_t.size == 1: # Impulsive limit: delta in time is flat in frequency efield = np.ones( self.w.size) * self.efields[local_oscillator_number] else: pulse_start_ind = pulse_time_ind - efield_t.size // 2 pulse_end_ind = pulse_time_ind + efield_t.size // 2 + efield_t.size % 2 t_slice = slice(pulse_start_ind, pulse_end_ind, None) efield[t_slice] = self.efields[local_oscillator_number] efield = fftshift(ifft(ifftshift(efield))) * efield.size * dt halfway = self.w.size // 2 pm = self.w.size // (2 * undersample_factor) efield_min_ind = halfway - pm efield_max_ind = halfway + pm + self.w.size % 2 efield = efield[efield_min_ind:efield_max_ind] P_of_w = fftshift(ifft(ifftshift(P_of_t))) * P_of_t.size * dt signal = P_of_w * np.conjugate(efield) if not self.return_complex_signal: return np.imag(signal) else: return 1j * signal
def plotTA_units(self, *, frequency_range=[-1000, 1000], subtract_DC=True, create_figure=True, color_range='auto', draw_colorbar=True, save_fig=True, omega_0=1): """Plots the transient absorption spectra with detection frequency on the y-axis and delay time on the x-axis. Args: frequency_range (list): sets the min (list[0]) and max (list[1]) detection frequency for y-axis subtract_DC (bool): if True subtract the DC component of the TA color_range (list): sets the min (list[0]) and max (list[1]) value for the colorbar draw_colorbar (bool): if True add a colorbar to the plot save_fig (bool): if True save the figure that is produced omega_0 (float): convert from unitless variables, omega_0 should be provided in wavenumbers """ self.load_eigen_params() f0_thz = omega_0 * 3E10 / 1.0E12 # omega_0 in wavenumbers T_ps = self.delay_times / f0_thz / (2 * np.pi) # Cut out unwanted detection frequency points self.w += self.ground_to_excited_transition + self.center self.w *= omega_0 w_ind = np.where((self.w > frequency_range[0]) & (self.w < frequency_range[1]))[0] w = self.w[w_ind] sig = self.signal[w_ind, :] if omega_0 == 1: xlab = r'Delay time ($\omega_0^{-1}$)' ylab = r'Detection Frequency ($\omega_0$)' else: xlab = 'Delay time (ps)' ylab = r'Detection Frequency (cm$^{-1}$)' if subtract_DC: sig_fft = fft(sig, axis=1) sig_fft[:, 0] = 0 sig = np.real(ifft(sig_fft)) ww, tt = np.meshgrid(T_ps, w) if create_figure: plt.figure() if color_range == 'auto': plt.pcolormesh(ww, tt, sig) else: plt.pcolormesh(ww, tt, sig, vmin=color_range[0], vmax=color_range[1]) if draw_colorbar: plt.colorbar() plt.xlabel(xlab, fontsize=16) plt.ylabel(ylab, fontsize=16) if save_fig: plt.savefig(self.base_path + 'TA_spectra_iso_ave')
def toeplitz_matvec(col0, vec, n): """ col0: first column of the Toeplitz matrix with shape (n,) vec: vector with shape (n,) """ p = (ifft( fft(np.r_[col0, col0[-2:0:-1]]) * fft(np.r_[vec, np.zeros(n - 2)])).real)[:n] return p
def fourier_transform(array,axis,new_axis,inverse=False): import numpy as np from numpy.fft import fftshift,ifftshift from expresso.pycas import pi from ..coordinate_ndarray import CoordinateNDArray try: from pyfftw.interfaces.numpy_fft import fft,ifft except ImportError: from numpy.fft import fft,ifft axi = array.axis.index(axis) if len(array.axis) == 1: if not inverse: new_data = fftshift(fft(array.data,axis=axi),axes=[axi]) new_data *= 1/np.sqrt(2*np.pi) else: new_data = ifft(ifftshift(array.data,axes=[axi]),axis=axi) new_data *= np.sqrt(2*np.pi) else: from pypropagate.progressbar import ProgressBar axt = (axi + 1) % len(array.axis) axi = axi if axt > axi else axi - 1 transposed_data = np.rollaxis(array.data,axt,start=0) new_data = np.zeros(array.data.shape,dtype=complex) transposed_new_data = np.rollaxis(new_data,axt,start=0) for i in ProgressBar(range(transposed_data.shape[0])): if not inverse: transposed_new_data[i] = fftshift(fft(transposed_data[i],axis=axi),axes=[axi]) transposed_new_data[i] *= 1/np.sqrt(2*np.pi) else: transposed_new_data[i] = ifft(ifftshift(transposed_data[i],axes=[axi]),axis=axi) transposed_new_data[i] *= np.sqrt(2*np.pi) sw = array.bounds[axi][1] - array.bounds[axi][0] tmin,tmax = array.evaluate((-(pi*array.shape[axi])/sw, (pi*array.shape[axi])/sw)) new_bounds = [(b[0],b[1]) if i!=axi else (tmin,tmax) for i,b in enumerate(array.bounds)] new_axis = [a if i!=axi else new_axis for i,a in enumerate(array.axis)] return CoordinateNDArray(new_data,new_bounds,new_axis,array.evaluate)
def toeplitz_matvec_block(col0, vec, n): """ col0: first column of the Toeplitz matrix with shape (n,) vec: vector with shape (m, n) """ m, n = vec.shape padded_vec = np.zeros((m, n * 2 - 2)) padded_vec[:, :n] = vec p = ifft(fft(np.r_[col0, col0[-2:0:-1]]) * fft(padded_vec)).real[:, :n] return p
def csr_convolution(a, b): P = len(a) Q = len(b) L = P + Q - 1 K = 2**nextpow2(L) a_pad = np.pad(a, (0, K - P), 'constant', constant_values=(0)) b_pad = np.pad(b, (0, K - Q), 'constant', constant_values=(0)) c = ifft(fft(a_pad) * fft(b_pad)) c = c[0:L - 1].real return c
def frequency_to_time(self, y_gw_fft_pos): """ Compute the waveform in the time domain from the waveform values in the frequency domain evaluated at positive Fourier frequencies """ y_gw_fft = np.zeros(self.n_data, dtype=np.complex128) y_gw_fft[self.inds] = y_gw_fft_pos y_gw_fft[self.n_data - self.inds] = np.conj(y_gw_fft_pos) return np.real(ifft(y_gw_fft)) / self.del_t
def add_gaussian_linewidth(self, sigma): self.old_signal = self.signal.copy() sig_tau_t = fftshift(fft(ifftshift(self.old_signal, axes=(-1)), axis=-1), axes=(-1)) sig_tau_t = sig_tau_t * ( np.exp(-self.t**2 / (2 * sigma**2))[np.newaxis, np.newaxis, :] * np.exp(-self.t21_array**2 / (2 * sigma**2))[:, np.newaxis, np.newaxis]) sig_tau_w = fftshift(ifft(ifftshift(sig_tau_t, axes=(-1)), axis=-1), axes=(-1)) self.signal = sig_tau_w
def apply_fourier(self, y_tilde): """Apply transformation y_o = W_o F^* y_tilde Parameters ---------- y_tilde : ndarray DFT Data vector or matrix to be mapped, must be of size n_data x k Returns ------- y_o : ndarray mapped data vector, size n_o = len(inds) """ if len(y_tilde.shape) == 1: return self.apply(ifft(y_tilde) / np.sqrt(self.n_data)) elif len(y_tilde.shape) == 2: return np.array([ self.apply(ifft(y_tilde[:, i]) / np.sqrt(self.n_data)) for i in range(y_tilde.shape[1]) ]).T
def plot_pump_probe_spectra(self, *, frequency_range=[-1000, 1000], subtract_DC=True, create_figure=True, color_range='auto', draw_colorbar=True, save_fig=True, return_signal=False): """Plots the transient absorption spectra with detection frequency on the y-axis and delay time on the x-axis. Args: frequency_range (list): sets the min (list[0]) and max (list[1]) detection frequency for y-axis subtract_DC (bool): if True subtract the DC component of the TA color_range (list): sets the min (list[0]) and max (list[1]) value for the colorbar draw_colorbar (bool): if True add a colorbar to the plot save_fig (bool): if True save the figure that is produced """ # Cut out unwanted detection frequency points w_ind = np.where((self.w > frequency_range[0]) & (self.w < frequency_range[1]))[0] w = self.w[w_ind] sig = self.signal_vs_delay_times[w_ind, :] if subtract_DC: sig_fft = fft(sig, axis=1) sig_fft[:, 0] = 0 sig = np.real(ifft(sig_fft)) ww, tt = np.meshgrid(self.delay_times, w) if create_figure: plt.figure() if color_range == 'auto': plt.pcolormesh(ww, tt, sig) else: plt.pcolormesh(ww, tt, sig, vmin=color_range[0], vmax=color_range[1]) if draw_colorbar: plt.colorbar() plt.xlabel('Delay time ($\omega_0^{-1}$)', fontsize=16) plt.ylabel('Detection Frequency ($\omega_0$)', fontsize=16) if save_fig: plt.savefig(os.path.join(self.base_path, 'TA_spectra')) if return_signal: return ww, tt, sig
def phaseran(signal): """ Performs phase randomization for coherence matrices. NOTE: Signal input has to be nTimepoints x nElectrodes (there is a check) """ # check that it is in right orientation if signal.shape[1] > signal.shape[0]: signal = signal.T # Get parameters nTimepoints = signal.shape[0] nElectrodes = signal.shape[1] # Check to make sure that it is an odd number of samples if nTimepoints % 2 == 0: signal = signal[:-1, :] nTimepoints = nTimepoints - 1 nTimepoints = signal.shape[0] len_ser = (nTimepoints - 1) / 2 interv1 = np.arange(1, len_ser + 1) interv2 = np.arange(len_ser + 1, nTimepoints) # fft_A = pfft.builders.fft(signal, axis=0) # FFT of original data try: fft_A = pfft.fft(signal, axis=0, threads=15) except: fft_A = pfft.fft(signal, axis=0) # Create the random phases for all the time series ph_rnd = np.random.rand(len_ser, nElectrodes) ph_interv1 = np.exp(2 * np.pi * 1j * ph_rnd) ph_interv2 = np.conj(np.flipud(ph_interv1)) # Randomize all time series simultaneously fft_recblk_surr = fft_A fft_recblk_surr[interv1, :] = fft_A[interv1, :] * ph_interv1 fft_recblk_surr[interv2, :] = fft_A[interv2, :] * ph_interv2 surrblk = np.float32(pfft.ifft(fft_recblk_surr, axis=0)).T return surrblk
def generateNoiseFromDSP(DSP, fe, myseed=None): """ Function generating a colored noise from a vector containing the DSP. The DSP contains Np points such that Np > 2N and the output noise should only contain N points in order to avoid boundary effects. However, the output is a 2N vector containing all the generated data. The troncature should be done afterwards. References : Timmer & König, "On generating power law noise", 1995 Parameters ---------- DSP : array_like vector of size N_DSP continaing the noise DSP calculated at frequencies between -fe/N_DSP and fe/N_DSP where fe is the sampling frequency and N is the size of the time series (it will be the size of the returned temporal noise vector b) N : scalar integer Size of the output time series fe : scalar float sampling frequency myseed : scalar integer or None seed of the random number generator Returns ------- b : numpy array time sample of the colored noise (size N) """ # # Inverse Fourier transform to get the noise time series (and apply the right normalization) # b = ifft(Noise_TF)*np.sqrt(N_DSP*fe/2.) # One must multiply by fe (to get the right dimension) and divide by 2 because of symmetrization ! # # otherwise you say that you have both an uncertainty on positive and negative frequencies values # # which is wrong because we know that they are equal. # Noise spectrum : #S = fe/2.*DSP**2 #return b[N_DSP/2:N_DSP/2+N]*np.sqrt(np.var(b)/np.var(b[N_DSP/2:N_DSP/2+N])),S return ifft(generateFreqNoiseFromDSP( DSP, fe, myseed=myseed)) #,S,Noise_TF#*np.sqrt(np.var(b)/np.var(b[0:N])),S
def _gaussian_sample(nsamples, sampling_frequency, psd, twosided=False, out=None, fftw_flag='FFTW_MEASURE'): """ Generate a gaussian N-sample sampled at fs from a one- or two-sided Power Spectrum Density sampled at fs/N. Parameter --------- nsamples : int Number of time samples. sampling_frequency : float Sampling frequency [Hz]. psd : array-like One- or two-sided Power Spectrum Density [signal unit**2/Hz]. twosided : boolean, optional Whether or not the input psd is one-sided (only positive frequencies) or two-sided (positive and negative frequencies). out : ndarray Placeholder for the output buffer. """ psd = np.asarray(psd) if out is None: out = empty(psd.shape[:-1] + (nsamples, )) if not twosided: psd = _unfold_psd(psd) shape = psd.shape[:-1] + (nsamples, ) gauss = np.random.randn(*shape) nthreads = multiprocessing.cpu_count() ftgauss = fft.fft(gauss, planner_effort=fftw_flag, threads=nthreads) ftgauss[..., 0] = 0 spec = ftgauss * np.sqrt(psd) out[...] = fft.ifft(spec, planner_effort=fftw_flag, threads=nthreads).real * np.sqrt(sampling_frequency) return out
def subtract_DC(signal,return_ft = False, axis = 1): """Use discrete fourier transform to remove the DC component of a signal. Args: signal (np.ndarray): real signal to be processed return_ft (bool): if True, return the Fourier transform of the input signal axis (int): axis along which the fourier trnasform is to be taken """ sig_fft = fft(ifftshift(signal,axes=(axis)),axis=axis) nd_slice = [slice(None) for i in range(len(sig_fft.shape))] nd_slice[axis] = slice(0,1,1) nd_slice = tuple(nd_slice) sig_fft[nd_slice] = 0 if not return_ft: sig = fftshift(ifft(sig_fft),axes=(axis)) else: sig = sig_fft return sig
def padsynth(samplerate, frequencies, band_width=10, random_phase=True): """ PadSynth from ZynAddSubFX http://zynaddsubfx.sourceforge.net/doc/PADsynth/PADsynth.htm frequencies = [(freq, gain, phase), ...] profile_size_half の定数6は以下を参照。値を大きくすると遅くなるかわりに精度が上がる。 https://en.wikipedia.org/wiki/Normal_distribution#Standard_deviation_and_coverage """ table = numpy.zeros(2**16, dtype=numpy.complex) for freq, gain, phase in frequencies: band_width_hz = (math.pow(2, band_width / 1200) - 1.0) * freq band_width_i = band_width_hz / (2.0 * samplerate) sigma = math.sqrt(math.pow(band_width_i, 2.0) / (2.0 * math.pi)) profile_size_half = max(int(6 * len(table) * sigma), 1) freq_i = freq / samplerate center = int(freq_i * len(table)) start = max(center - profile_size_half, 0) end = min(center + profile_size_half, len(table)) for index in range(start, end): table[index] += cmath.rect( gain * profile(index / len(table) - freq_i, band_width_i), phase) # table を複素数に変換。 table[0] = 0 * 1j # 直流を除去。 if random_phase: angles = numpy.random.uniform(0, 2 * numpy.pi, len(table)) table = table * numpy.exp(1j * angles) sound_ifft = ifft(table, planner_effort='FFTW_ESTIMATE', threads=1) sound_flat = normalize(sound_ifft.real) return sound_flat
def mat_vect_prod(y_in, ind_in, ind_out, mask, s_2n): """ Linear operator that calculate Com y_in assuming that we can write: Com = M_o F* Lambda F M_m^T Parameters ---------- y_in : numpy array input data vector ind_in : array_like array or list containing the chronological indices of the values contained in the input vector in the complete data vector ind_out : array_like array or list containing the chronological indices of the values contained in the output vector in the complete data vector M : numpy array (size N) mask vector (with entries equal to 0 or 1) N : scalar integer Size of the complete data vector s_2n : numpy array (size P >= 2N) PSD vector Returns ------- y_out : numpy array y_out = Com * y_in transformed output vector of size N_out """ # calculation of the matrix product Coo y, where y is a vector y = np.zeros(len(mask)) # + 1j*np.zeros(N) y[ind_in] = y_in n_fft = len(s_2n) return np.real(ifft(s_2n * fft(y, n_fft))[ind_out])
def _gaussian_sample(nsamples, sampling_frequency, psd, twosided=False, out=None, fftw_flag='FFTW_MEASURE'): """ Generate a gaussian N-sample sampled at fs from a one- or two-sided Power Spectrum Density sampled at fs/N. Parameter --------- nsamples : int Number of time samples. sampling_frequency : float Sampling frequency [Hz]. psd : array-like One- or two-sided Power Spectrum Density [signal unit**2/Hz]. twosided : boolean, optional Whether or not the input psd is one-sided (only positive frequencies) or two-sided (positive and negative frequencies). out : ndarray Placeholder for the output buffer. """ psd = np.asarray(psd) if out is None: out = empty(psd.shape[:-1] + (nsamples,)) if not twosided: psd = _unfold_psd(psd) shape = psd.shape[:-1] + (nsamples,) gauss = np.random.randn(*shape) nthreads = multiprocessing.cpu_count() ftgauss = fft.fft(gauss, planner_effort=fftw_flag, threads=nthreads) ftgauss[..., 0] = 0 spec = ftgauss * np.sqrt(psd) out[...] = fft.ifft(spec, planner_effort=fftw_flag, threads=nthreads).real * np.sqrt(sampling_frequency) return out
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan, nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol, dedisperse='incoherent', do_waterfall=True, do_foldspec=True, verbose=True, progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None, return_fits=False): """ FFT data, fold by phase/time and make a waterfall series Folding is done from the position the file is currently in Parameters ---------- fh : file handle handle to file holding voltage timeseries comm: MPI communicator or None will use size, rank attributes samplerate : Quantity rate at which samples were originally taken and thus double the band width (frequency units) fedge : float edge of the frequency band (frequency units) fedge_at_top: bool whether edge is at top (True) or bottom (False) nchan : int number of frequency channels for FFT nt, ntint : int total number nt of sets, each containing ntint samples in each file hence, total # of samples is nt*ntint, with each sample containing a single polarisation ngate, ntbin : int number of phase and time bins to use for folded spectrum ntbin should be an integer fraction of nt ntw : int number of time samples to combine for waterfall (does not have to be integer fraction of nt) dm : float dispersion measure of pulsar, used to correct for ism delay (column number density) fref: float reference frequency for dispersion measure phasepol : callable function that returns the pulsar phase for time in seconds relative to start of the file that is read. dedisperse : None or string (default: incoherent). None, 'incoherent', 'coherent', 'by-channel'. Note: None really does nothing do_waterfall, do_foldspec : bool whether to construct waterfall, folded spectrum (default: True) verbose : bool or int whether to give some progress information (default: True) progress_interval : int Ping every progress_interval sets return_fits : bool (default: False) return a subint fits table for rank == 0 (None otherwise) """ assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent') need_fine_channels = dedisperse in ['by-channel', 'coherent'] assert nchan % fh.nchan == 0 if dedisperse in ['incoherent', 'by-channel'] and fh.nchan > 1: oversample = nchan // fh.nchan assert ntint % oversample == 0 else: oversample = 1 if dedisperse == 'coherent' and fh.nchan > 1: raise ValueError("Cannot coherently dedisperse channelized data.") if comm is None: mpi_rank = 0 mpi_size = 1 else: mpi_rank = comm.rank mpi_size = comm.size npol = getattr(fh, 'npol', 1) assert npol == 1 or npol == 2 if verbose > 1 and mpi_rank == 0: print("Number of polarisations={}".format(npol)) # initialize folded spectrum and waterfall # TODO: use estimated number of points to set dtype if do_foldspec: foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32) icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32) else: foldspec = None icount = None if do_waterfall: nwsize = nt*ntint//ntw//oversample waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64) else: waterfall = None if verbose and mpi_rank == 0: print('Reading from {}'.format(fh)) nskip = fh.tell()/fh.blocksize if nskip > 0: if verbose and mpi_rank == 0: print('Starting {0} blocks = {1} bytes out from start.' .format(nskip, nskip*fh.blocksize)) dt1 = (1./samplerate).to(u.s) # need 2*nchan real-valued samples for each FFT if fh.telescope == 'lofar': dtsample = fh.dtsample else: dtsample = nchan // oversample * 2 * dt1 tstart = dtsample * ntint * nskip # pre-calculate time delay due to dispersion in coarse channels # for channelized data, frequencies are known tb = -1. if fedge_at_top else +1. if fh.nchan == 1: if getattr(fh, 'data_is_complex', False): # for complex data, really each complex sample consists of # 2 real ones, so multiply dt1 by 2. freq = fedge + tb * fftfreq(nchan, 2.*dt1) if dedisperse == 'coherent': fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1) fcoh.shape = (-1, 1) elif dedisperse == 'by-channel': fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis] else: # real data freq = fedge + tb * rfftfreq(nchan*2, dt1) if dedisperse == 'coherent': fcoh = fedge + tb * rfftfreq(ntint*nchan*2, dt1) fcoh.shape = (-1, 1) elif dedisperse == 'by-channel': fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis] freq_in = freq else: # Input frequencies may not be the ones going out. freq_in = fh.frequencies if oversample == 1: freq = freq_in else: freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample) fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis] # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb)) # By taking only up to nchan, we remove the top channel at the Nyquist # frequency for real, unchannelized data. ifreq = freq[:nchan].ravel().argsort() # pre-calculate time offsets in (input) channelized streams dt = dispersion_delay_constant * dm * (1./freq_in**2 - 1./fref**2) if need_fine_channels: # pre-calculate required turns due to dispersion. # # set frequency relative to which dispersion is coherently corrected if dedisperse == 'coherent': _fref = fref else: _fref = freq_in[np.newaxis, :] # (check via eq. 5.21 and following in # Lorimer & Kramer, Handbook of Pulsar Astronomy dang = (dispersion_delay_constant * dm * fcoh * (1./_fref-1./fcoh)**2) * u.cycle with u.set_enabled_equivalencies(u.dimensionless_angles()): dd_coh = np.exp(dang * 1j).conj().astype(np.complex64) # add dimension for polarisation dd_coh = dd_coh[..., np.newaxis] # Calculate the part of the whole file this node should handle. size_per_node = (nt-1)//mpi_size + 1 start_block = mpi_rank*size_per_node end_block = min((mpi_rank+1)*size_per_node, nt) for j in range(start_block, end_block): if verbose and j % progress_interval == 0: print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; ' 'time={:18.12f}' .format(mpi_rank, mpi_size, j+1, nt, j-start_block+1, end_block-start_block, (tstart+dtsample*j*ntint).value)) # time since start # Just in case numbers were set wrong -- break if file ends; # better keep at least the work done. try: raw = fh.seek_record_read(int((nskip+j)*fh.blocksize), fh.blocksize) except(EOFError, IOError) as exc: print("Hit {0!r}; writing data collected.".format(exc)) break if verbose >= 2: print("#{:4d}/{:4d} read {} items" .format(mpi_rank, mpi_size, raw.size), end="") if npol == 2 and raw.dtype.fields is not None: raw = raw.view(raw.dtype.fields.values()[0][0]) if fh.nchan == 1: # raw.shape=(ntint*npol) raw = raw.reshape(-1, npol) else: # raw.shape=(ntint, nchan*npol) raw = raw.reshape(-1, fh.nchan, npol) if dedisperse == 'incoherent' and oversample > 1: raw = ifft(raw, axis=1, **_fftargs).reshape(-1, nchan, npol) raw = fft(raw, axis=1, **_fftargs) if rfi_filter_raw is not None: raw, ok = rfi_filter_raw(raw) if verbose >= 2: print("... raw RFI (zap {0}/{1})" .format(np.count_nonzero(~ok), ok.size), end="") if np.can_cast(raw.dtype, np.float32): vals = raw.astype(np.float32) else: assert raw.dtype.kind == 'c' vals = raw # For pre-channelized data, data are always complex, # and should have shape (ntint, nchan, npol). # For baseband data, we wish to get to the same shape for # incoherent or by_channel, or just to fully channelized for coherent. if fh.nchan == 1: # If we need coherent dedispersion, do FT of whole thing, # otherwise to output channels, mimicking pre-channelized data. if raw.dtype.kind == 'c': # complex data nsamp = len(vals) if dedisperse == 'coherent' else nchan vals = fft(vals.reshape(-1, nsamp, npol), axis=1, **_fftargs) else: # real data nsamp = len(vals) if dedisperse == 'coherent' else nchan * 2 vals = rfft(vals.reshape(-1, nsamp, npol), axis=1, **_rfftargs) # Sadly, the way data are stored depends on what FFT routine # one is using. We cannot deal with scipy's. if vals.dtype.kind == 'f': raise TypeError("Can no longer deal with scipy's format " "for storing FTs of real data.") if fedge_at_top: # take complex conjugate to ensure by-channel de-dispersion is # applied correctly. # This needs to be done for ARO data, since we are in 2nd Nyquist # zone; not clear it is needed for other telescopes. np.conj(vals, out=vals) # Now we coherently dedisperse, either all of it or by channel. if need_fine_channels: # for by_channel, we have vals.shape=(ntint, nchan, npol), # and want to FT over ntint to get fine channels; if vals.shape[0] > 1: fine = fft(vals, axis=0, **_fftargs) else: # for coherent, we just reshape: # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol) fine = vals.reshape(-1, 1, npol) # Dedisperse. fine *= dd_coh # Still have fine.shape=(ntint, nchan, npol), # w/ nchan=1 for coherent. if fine.shape[1] > 1 or raw.dtype.kind == 'c': vals = ifft(fine, axis=0, **_fftargs) else: vals = irfft(fine, axis=0, **_rfftargs) if fine.shape[1] == 1 and nchan > 1: # final FT to get requested channels if vals.dtype.kind == 'f': vals = vals.reshape(-1, nchan*2, npol) vals = rfft(vals, axis=1, **_rfftargs) else: vals = vals.reshape(-1, nchan, npol) vals = fft(vals, axis=1, **_fftargs) elif dedisperse == 'by-channel' and oversample > 1: vals = vals.reshape(-1, oversample, fh.nchan, npol) vals = fft(vals, axis=1, **_fftargs) vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol) # vals[time, chan, pol] if verbose >= 2: print("... dedispersed", end="") if npol == 1: power = vals.real**2 + vals.imag**2 else: p0 = vals[..., 0] p1 = vals[..., 1] power = np.empty(vals.shape[:-1] + (4,), np.float32) power[..., 0] = p0.real**2 + p0.imag**2 power[..., 1] = p0.real*p1.real + p0.imag*p1.imag power[..., 2] = p0.imag*p1.real - p0.real*p1.imag power[..., 3] = p1.real**2 + p1.imag**2 if verbose >= 2: print("... power", end="") # current sample positions and corresponding time in stream isr = j*(ntint // oversample) + np.arange(ntint // oversample) tsr = (isr*dtsample*oversample)[:, np.newaxis] if rfi_filter_power is not None: power = rfi_filter_power(power, tsr.squeeze()) print("... power RFI", end="") # correct for delay if needed if dedisperse in ['incoherent', 'by-channel']: # tsample.shape=(ntint/oversample, nchan_in) tsr = tsr - dt if do_waterfall: # # loop over corresponding positions in waterfall # for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1): # if iw < nwsize: # add sum of corresponding samples # waterfall[iw, :] += np.sum(power[isr//ntw == iw], # axis=0)[ifreq] iw = np.round((tsr / dtsample / oversample).to(1) .value / ntw).astype(int) for k, kfreq in enumerate(ifreq): # sort in frequency while at it iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)] iwk = np.clip(iwk, 0, nwsize-1, out=iwk) iwkmin = iwk.min() iwkmax = iwk.max()+1 for ipow in range(npol**2): waterfall[iwkmin:iwkmax, k, ipow] += np.bincount( iwk-iwkmin, power[:, kfreq, ipow], iwkmax-iwkmin) if verbose >= 2: print("... waterfall", end="") if do_foldspec: ibin = (j*ntbin) // nt # bin in the time series: 0..ntbin-1 # times and cycles since start time of observation. tsample = tstart + tsr phase = (phasepol(tsample.to(u.s).value.ravel()) .reshape(tsample.shape)) # corresponding PSR phases iphase = np.remainder(phase*ngate, ngate).astype(np.int) for k, kfreq in enumerate(ifreq): # sort in frequency while at it iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq // oversample)] # sum and count samples by phase bin for ipow in range(npol**2): foldspec[ibin, k, :, ipow] += np.bincount( iph, power[:, kfreq, ipow], ngate) icount[ibin, k, :] += np.bincount( iph, power[:, kfreq, 0] != 0., ngate).astype(np.int32) if verbose >= 2: print("... folded", end="") if verbose >= 2: print("... done") #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data #if verbose >= 2 or verbose and mpi_rank == 0: # print('#{:4d}/{:4d} read {:6d} out of {:6d}' # .format(mpi_rank, mpi_size, j+1, nt)) if npol == 1: if do_foldspec: foldspec = foldspec.reshape(foldspec.shape[:-1]) if do_waterfall: waterfall = waterfall.reshape(waterfall.shape[:-1]) return foldspec, icount, waterfall
# for by_channel, we have vals.shape=(ntint, nchan, npol), # and want to FT over ntint to get fine channels; if vals.shape[0] > 1: fine = fft(vals, axis=0, **_fftargs) else: # for coherent, we just reshape: # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol) fine = vals.reshape(-1, 1, npol) # Dedisperse. fine *= dd_coh # Still have fine.shape=(ntint, nchan, npol), # w/ nchan=1 for coherent. if fine.shape[1] > 1 or raw.dtype.kind == 'c': vals = ifft(fine, axis=0, **_fftargs) else: vals = irfft(fine, axis=0, **_rfftargs) if fine.shape[1] == 1 and nchan > 1: # final FT to get requested channels if vals.dtype.kind == 'f': vals = vals.reshape(-1, nchan*2, npol) vals = rfft(vals, axis=1, **_rfftargs) else: vals = vals.reshape(-1, nchan, npol) vals = fft(vals, axis=1, **_fftargs) elif dedisperse == 'by-channel' and oversample > 1: vals = vals.reshape(-1, oversample, fh.nchan, npol) vals = fft(vals, axis=1, **_fftargs) vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)
def muenchetal(im, args): """Process a sinogram image with the Munch et al. de-striping algorithm. Parameters ---------- im : array_like Image data as numpy array. wlevel : int Levels of the wavelet decomposition. sigma : float Smoothing effect. (Parameters wlevel and sigma have to passed as a string separated by ;) Example (using tiffile.py) -------------------------- >>> im = imread('sino_orig.tif') >>> im = munchetal(im, '4;1.0') >>> imsave('sino_flt.tif', im) References ---------- B. Munch, P. Trtik, F. Marone, M. Stampanoni, Stripe and ring artifact removal with combined wavelet-Fourier filtering, Optics Express 17(10):8567-8591, 2009. """ # Disable a warning: simplefilter("ignore", ComplexWarning) # Get args: wlevel, sigma = args.split(";") wlevel = int(wlevel) sigma = float(sigma) # The wavelet transform to use : {'haar', 'db1'-'db20', 'sym2'-'sym20', 'coif1'-'coif5', 'dmey'} wname = "db5" # Wavelet decomposition: coeffs = wavedec2(im.astype(float32), wname, level=wlevel) coeffsFlt = [coeffs[0]] # FFT transform of horizontal frequency bands: for i in range(1, wlevel + 1): # Padding and windowing of input signal: n_byte_align(coeffs[i][1], simd_alignment) siz = coeffs[i][1].shape tmp = pad(coeffs[i][1], pad_width=((coeffs[i][1].shape[0] / 2, coeffs[i][1].shape[0] / 2), (0,0)), mode='constant') # or 'constant' for zero padding tmp = pad(tmp, pad_width=((0,0) ,(coeffs[i][1].shape[1] / 2, coeffs[i][1].shape[1] / 2)), mode='constant') # or 'constant' for zero padding tmp = _windowing_lr(tmp, siz[1]) tmp = _windowing_lr(tmp.T, siz[0]).T # FFT: fcV = fftshift(fft(tmp, axis=0, threads=2)) my, mx = fcV.shape # Damping of vertical stripes: damp = 1 - npexp(-(arange(-floor(my / 2.),-floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2))) dampprime = kron(ones((1,mx)), damp.reshape((damp.shape[0],1))) fcV = fcV * dampprime # Inverse FFT: fcV = ifftshift(fcV) n_byte_align(fcV, simd_alignment) fcVflt = ifft(fcV, axis=0, threads=2) ## Crop image: tmp = fcVflt[fcVflt.shape[0] / 4:(fcVflt.shape[0] / 4 + siz[0]), fcVflt.shape[1] / 4:(fcVflt.shape[1] / 4 + siz[1])] # Dump back coefficients: cVHDtup = (coeffs[i][0], tmp, coeffs[i][2]) coeffsFlt.append(cVHDtup) # Get wavelet reconstruction: im_f = real(waverec2(coeffsFlt, wname)) # Return filtered image (an additional row and/or column might be present): return im_f[0:im.shape[0],0:im.shape[1]].astype(float32)