def _fourier_cross(self, lc1, lc2): """ Fourier transform the two light curves, then compute the cross spectrum. Computed as CS = lc1 x lc2* (where lc2 is the one that gets complex-conjugated) Parameters ---------- lc1: :class:`stingray.Lightcurve` object One light curve to be Fourier transformed. Ths is the band of interest or channel of interest. lc2: :class:`stingray.Lightcurve` object Another light curve to be Fourier transformed. This is the reference band. Returns ------- fr: numpy.ndarray The squared absolute value of the Fourier amplitudes """ fourier_1 = fft(lc1.counts) # do Fourier transform 1 fourier_2 = fft(lc2.counts) # do Fourier transform 2 freqs = fftfreq(lc1.n, lc1.dt) cross = np.multiply(fourier_1[freqs > 0], np.conj(fourier_2[freqs > 0])) return freqs[freqs > 0], cross
def time_step_fftpack(self, dt, Nsteps=1): """ Perform a series of time-steps via the time-dependent Schrodinger Equation. Parameters ---------- dt : float The small time interval over which to integrate Nsteps : float, optional The number of intervals to compute. The total change in time at the end of this method will be dt * Nsteps (default = 1) """ assert Nsteps >= 0 self.dt = dt if Nsteps > 0: self.psi_mod_x *= self.x_evolve_half for num_iter in xrange(Nsteps - 1): self.psi_mod_k = fftpack.fft(self.psi_mod_x) self.psi_mod_k *= self.k_evolve self.psi_mod_x = fftpack.ifft(self.psi_mod_k) self.psi_mod_x *= self.x_evolve self.psi_mod_k = fftpack.fft(self.psi_mod_x) self.psi_mod_k *= self.k_evolve self.psi_mod_x = fftpack.ifft(self.psi_mod_k) self.psi_mod_x *= self.x_evolve_half self.psi_mod_k = fftpack.fft(self.psi_mod_x) self.psi_mod_x /= self.norm self.psi_mod_k = fftpack.fft(self.psi_mod_x) self.t += dt * Nsteps return None
def tada_spec(): rate, sig = wavfile.read('tada.wav') sig = np.float32(sig) fsig = pyfft.fft(sig.T).T plt.plot(np.absolute(fsig)) plt.savefig('tadaspec.png') plt.clf()
def prob3(filename='pianoclip.wav'): """Plots the spectrum of a given .wav file, then calculates the location and value of the largest spike. For the default value, the exact value is 742.281519994 Hz (f#5 + 5 cents) Parameters ---------- filename: string, optional The name of the .wav sound file to be examined. Defaults to 'pianoclip.wav'. Returns ------- None """ plot_signal(filename) rate, signal = wavfile.read(filename) signal = sp.float32(signal) fsignal = sp.absolute(fftw.fft(signal.T).T) # Use if scipy_fftpack is unavailable #fsignal = sp.absolute(sp.fft(signal, axis=0)) plt.plot(fsignal[0:fsignal.shape[0] / 2]) plt.title("Spectrum of " + filename) plt.show() loc = fsignal[1:].argmax() val = fsignal[1:].max() print "\nSpike location:\t" + str(loc) print "Spike value:\t" + str(val) print "Hz:\t\t" + str(float(loc * rate) / signal.shape[0])
def prob4(filename='saw.wav', new_rate = 11025, outfile='prob4.wav'): """Down-samples a given .wav file to a new rate and saves the resulting signal as another .wav file. Parameters ---------- filename : string, optional The name of the .wav sound file to be down-sampled. Defaults to 'saw.wav'. new_rate : integer, optional The down-sampled rate. Defaults to 11025. outfile : string, optional The name of the new file. Defaults to prob4.wav. Returns ------- None """ old_rate, in_sig = wavfile.read(filename) fin = fftw.fft(sp.float32(in_sig)) # Use if scipy_fftpack is unavailable # fin = sp.fft(sp.float32(in_sig)) nsiz = sp.floor(in_sig.size * new_rate / old_rate) nsizh = sp.floor(nsiz / 2) fout = sp.zeros(nsiz) + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh])) out = sp.real(sp.ifft(fout)) out = sp.int16(out/sp.absolute(out).max() * 32767) plot_signal(filename) wavfile.write('prob4.wav',new_rate,out) print ""; plot_signal('prob4.wav')
def prob3(filename='pianoclip.wav'): """Plots the spectrum of a given .wav file, then calculates the location and value of the largest spike. For the default value, the exact value is 742.281519994 Hz (f#5 + 5 cents) Parameters ---------- filename: string, optional The name of the .wav sound file to be examined. Defaults to 'pianoclip.wav'. Returns ------- None """ plot_signal(filename) rate, signal = wavfile.read(filename) signal = sp.float32(signal) fsignal = sp.absolute(fftw.fft(signal.T).T) # Use if scipy_fftpack is unavailable #fsignal = sp.absolute(sp.fft(signal, axis=0)) plt.plot(fsignal[0:fsignal.shape[0]/2]) plt.title("Spectrum of " + filename) plt.show() loc = fsignal[1:].argmax() val = fsignal[1:].max() print "\nSpike location:\t" + str(loc) print "Spike value:\t" + str(val) print "Hz:\t\t" + str(float(loc*rate)/signal.shape[0])
def _remove_stripe_based_filtering_sinogram(sinogram, sigma, size): """ Algorithm 2 in the paper. Remove stripes using the filtering technique. Angular direction is along the axis 0 --------- Parameters: - sinogram: 2D array. - sigma: sigma of the Gaussian window which is used to separate the low-pass and high-pass components of the intensity profiles of each column. - size: window size of the median filter. --------- Return: - stripe-removed sinogram. """ pad = 150 # To reduce artifacts caused by FFT sinogram = np.transpose(sinogram) sinogram2 = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect') (_, ncol) = sinogram2.shape window = signal.gaussian(ncol, std=sigma) listsign = np.power(-1.0, np.arange(ncol)) sinosmooth = np.zeros_like(sinogram) for i, sinolist in enumerate(sinogram2): # sinosmooth[i] = np.real(ifft(fft(sinolist*listsign)*window)*listsign)[pad:ncol-pad] sinosmooth[i] = np.real( fft_vo.ifft(fft_vo.fft(sinolist * listsign) * window) * listsign)[pad:ncol - pad] sinosharp = sinogram - sinosmooth sinosmooth_cor = median_filter(sinosmooth, (size, 1)) return np.transpose(sinosmooth_cor + sinosharp)
def prob4(filename='saw.wav', new_rate=11025, outfile='prob4.wav'): """Down-samples a given .wav file to a new rate and saves the resulting signal as another .wav file. Parameters ---------- filename : string, optional The name of the .wav sound file to be down-sampled. Defaults to 'saw.wav'. new_rate : integer, optional The down-sampled rate. Defaults to 11025. outfile : string, optional The name of the new file. Defaults to prob4.wav. Returns ------- None """ old_rate, in_sig = wavfile.read(filename) fin = fftw.fft(sp.float32(in_sig)) # Use if scipy_fftpack is unavailable # fin = sp.fft(sp.float32(in_sig)) nsiz = sp.floor(in_sig.size * new_rate / old_rate) nsizh = sp.floor(nsiz / 2) fout = sp.zeros(nsiz) + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz - nsizh + 1:] = sp.conj(sp.flipud(fout[1:nsizh])) out = sp.real(sp.ifft(fout)) out = sp.int16(out / sp.absolute(out).max() * 32767) plot_signal(filename) wavfile.write('prob4.wav', new_rate, out) print "" plot_signal('prob4.wav')
def tada_spec_left(): rate, sig = wavfile.read('tada.wav') sig = np.float32(sig) fsig = pyfft.fft(sig.T).T f = np.absolute(fsig) plt.plot(f[0:f.shape[0] / 2, :]) plt.savefig('tadaspec2.png') plt.clf()
def tada_spec_left(): rate, sig = wavfile.read('tada.wav') sig = np.float32(sig) fsig = pyfft.fft(sig.T).T f = np.absolute(fsig) plt.plot(f[0:f.shape[0]/2,:]) plt.savefig('tadaspec2.png') plt.clf()
def problem3(filename): rate, sig = wavfile.read(filename) sig = sp.float32(sig) fsig = fftw.fft(sig,axis=0) fsig = fsig[1:len(fsig)/2] #return sp.argmax(fsig)/2 plt.plot(sp.absolute(fsig)) plt.show()
def fir_window_bp(delta, fl, fh): """ Finite impulse response, bandpass. This filter doesn't work exactly like the matlab version due to some fourier transform imprecisions. Consider replacing the transform calls to the FFTW versions. """ b = firwin(delta.shape[0]+1, (fl*2, fh*2), pass_zero=False)[:-1] m = delta.shape[1] batches = 20 batch_size = int(m / batches) + 1 temp = fft(ifftshift(b)) out = zeros(delta.shape, dtype=delta.dtype) for i in range(batches): indexes = (batch_size*i, min((batch_size*(i+1), m))) freq = fft(delta[:,indexes[0]:indexes[1]], axis=0)*tile(temp, (delta.shape[2],indexes[1]-indexes[0], 1)).swapaxes(0,2) out[:, indexes[0]:indexes[1]] = real(ifft(freq, axis=0)) return out
def FT_1D(ls, P, axis=-1): """ Fourier transform the complex linear polarization spectrum P(lambda^2) to obtain the Faraday dispersion function F(phi). The function uses the FFT to approximate the continuous Fourier transform of a discretely sampled function. FT: F(phi) = integral[ P(ls) exp(-2*i*phi*ls) dls] IFT: P(ls) = integral[ F(phi) exp(2*i*phi*ls) dphi] Function returns phi and F, which approximate F(phi). Parameters ---------- ls : array_like regularly sampled array of lambda_squared. ls is assumed to be regularly spaced, i.e. ls = ls0 + Dls * np.arange(N) P : array_like Complex linear polarization spectrum. axis : int axis along which to perform fourier transform. Returns ------- phi : ndarray Faraday depth of the calculated Faraday dispersion function. F : ndarray Complex Faraday dispersion function. """ assert ls.ndim == 1 assert P.shape[axis] == ls.shape[0] N = int(len(ls)) if N % 2 != 0: raise ValueError("number of samples must be even") ls = ls / np.pi Dls = ls[1] - ls[0] Dphi = 1. / (N * Dls) ls0 = ls[int(N / 2)] phi = Dphi * (np.arange(N) - N / 2) shape = np.ones(P.ndim, dtype=int) shape[axis] = N phase = np.ones(N) phase[1::2] = -1 phase = phase.reshape(shape) # F = Dls * fft.fft(P * phase, axis=axis) F = Dls * fft.fftshift(fft.fft(P, axis=axis), axes=axis) #*np.pi F *= phase F *= np.exp(-2j * np.pi * ls0 * phi.reshape(shape)) F *= np.exp(-1j * np.pi * N / 2) return phi, F
def sine_spec(): samplerate = 44100 # 44100 samples per second freq = 1760 # We’re going to produce a 1760 Hz sine wave ... length = 2 # ... which will last for 2 seconds. stepsize = freq * 2 * np.pi / samplerate sig = np.sin(np.arange(0, stepsize * length * samplerate, stepsize)) sig = np.float32(sig) fsig = pyfft.fft(sig) plt.plot(np.absolute(fsig)) plt.savefig('sinespec.png') plt.clf()
def sine_spec(): samplerate = 44100 # 44100 samples per second freq = 1760 # We’re going to produce a 1760 Hz sine wave ... length = 2 # ... which will last for 2 seconds. stepsize = freq * 2*np.pi/samplerate sig = np.sin(np.arange(0, stepsize*length*samplerate, stepsize)) sig = np.float32(sig) fsig = pyfft.fft(sig) plt.plot(np.absolute(fsig)) plt.savefig('sinespec.png') plt.clf()
def process_frames(self, data): sinogram = np.copy(data[0]) ratio = self.parameters['ratio'] pad = 100 ncolpad = self.width1 + 2 * pad centerc = np.int16(np.ceil((ncolpad - 1) * 0.5)) ulist = 1.0 * (np.arange(0, ncolpad) - centerc) / ncolpad listfactor = 1.0 + ratio * ulist**2 sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge') sinophase = np.zeros((self.height1, ncolpad), dtype=np.float32) for i in range(0, self.height1): sinophase[i] = np.real(fft.ifft(np.fft.ifftshift( np.fft.fftshift(fft.fft(sinopad[i])) / listfactor))) return sinophase[:, pad:ncolpad - pad]
def process_frames(self, data): sinogram = np.transpose(np.copy(data[0])) sinogram2 = np.pad(sinogram, ((0, 0), (self.pad, self.pad)), mode='reflect') size = np.clip(np.int16(self.parameters['size']), 1, self.width1 - 1) sinosmooth = np.zeros_like(sinogram) for i, sinolist in enumerate(sinogram2): sinosmooth[i] = np.real( fft.ifft(fft.fft(sinolist * self.listsign) * self.window) * self.listsign)[self.pad:self.height1 - self.pad] sinosharp = sinogram - sinosmooth sinosmooth_cor = np.transpose( self.remove_stripe_based_sorting(self.matindex, np.transpose(sinosmooth), size)) return np.transpose(sinosmooth_cor + sinosharp)
def down_saw_spec_correct(): rate, in_sig = wavfile.read('saw.wav') old_rate = 44100 new_rate = 22050 in_sig = np.float32(in_sig) fin = pyfft.fft(in_sig) nsiz = np.floor(in_sig.size * new_rate / old_rate) nsizh = np.floor(nsiz / 2) fout = np.zeros(nsiz) fout = fout + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz - nsizh + 1:] = np.conj(np.flipud(fout[1:nsizh])) f = np.absolute(fout) plt.plot(f[0:f.shape[0] / 2]) plt.savefig('sawdownspec.png') plt.clf()
def down_saw_spec_correct(): rate, in_sig = wavfile.read('saw.wav') old_rate = 44100 new_rate = 22050 in_sig = np.float32(in_sig) fin = pyfft.fft(in_sig) nsiz = np.floor(in_sig.size*new_rate/old_rate) nsizh = np.floor(nsiz/2) fout = np.zeros(nsiz) fout = fout + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz-nsizh+1:] = np.conj(np.flipud(fout[1:nsizh])) f = np.absolute(fout) plt.plot(f[0:f.shape[0]/2]) plt.savefig('sawdownspec.png') plt.clf()
def process_frames(self, data): sinogram = np.copy(data[0]) ratio = self.parameters['ratio'] pad = 100 ncolpad = self.width1 + 2 * pad centerc = np.int16(np.ceil((ncolpad - 1) * 0.5)) ulist = 1.0 * (np.arange(0, ncolpad) - centerc) / ncolpad listfactor = 1.0 + ratio * ulist**2 sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge') sinophase = np.zeros((self.height1, ncolpad), dtype=np.float32) for i in range(0, self.height1): sinophase[i] = np.real( fft.ifft( np.fft.ifftshift( np.fft.fftshift(fft.fft(sinopad[i])) / listfactor))) return sinophase[:, pad:ncolpad - pad]
def periodogram(y, wd='blackman', del_t=1.0, module=True, sqroot=True, onesided=True): """ Compute periodogram with time-domain windowing, using convensions of one-sided power spectral density. Parameters ---------- y : ndarray input time series wd : ndarray or str time window del_t : float, optional sampling time, by default 1.0 module : bool if True, the module of the FFT is taken, so that the output is real sqroot : bool if True, the output is in sqrt{PSD} onesided : bool if True, the output is equivalent to the one-sided PSD """ if type(wd) == str: if wd == 'hannning': wd = np.hanning(y.shape[0]) elif wd == 'blackman': wd = np.blackman(y.shape[0]) elif wd == 'rect': wd = np.ones(y.shape[0]) if onesided: fact = 2.0 else: fact = 1.0 y_fft = fft(y * wd) * np.sqrt(fact * del_t / np.sum(wd**2)) if module: per = np.abs(y_fft) else: per = y_fft[:] if not sqroot: per = np.sqrt(per) return per
def remove_stripe_based_filtering(sinogram, sigma, size, dim=1): """ Remove stripe artifacts in a sinogram using the filtering technique, algorithm 2 in Ref. [1]. Angular direction is along the axis 0. Parameters ---------- sinogram : array_like 2D array. Sinogram image sigma : int Sigma of the Gaussian window used to separate the low-pass and high-pass components of the intensity profile of each column. size : int Window size of the median filter. dim : {1, 2}, optional Dimension of the window. Returns ------- array_like 2D array. Stripe-removed sinogram. References ---------- .. [1] https://doi.org/10.1364/OE.26.028396 """ pad = min(150, int(0.1 * sinogram.shape[0])) sinogram = np.transpose(sinogram) sino_pad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect') (_, ncol) = sino_pad.shape window = gaussian(ncol, std=sigma) list_sign = np.power(-1.0, np.arange(ncol)) sino_smooth = np.copy(sinogram) for i, sino_1d in enumerate(sino_pad): sino_smooth[i] = np.real( fft.ifft(fft.fft(sino_1d * list_sign) * window) * list_sign)[pad:ncol - pad] sino_sharp = sinogram - sino_smooth if dim == 2: sino_smooth_cor = median_filter(sino_smooth, (size, size)) else: sino_smooth_cor = median_filter(sino_smooth, (size, 1)) return np.transpose(sino_smooth_cor + sino_sharp)
def apply_IRS(self, data, srate, nbits): """ Apply telephone handset BW [300, 3200] Hz """ raise NotImplementedError('Under construction!') from pyfftw.interfaces import scipy_fftpack as fftw n = data.shape[0] # find next pow of 2 which is greater or eq to n pow_of_2 = 2**(np.ceil(np.log2(n))) align_filter_dB = np.array([[0, -200], [50, -40], [100, -20], [125, -12], [160, -6], [200, 0], [250, 4], [300, 6], [350, 8], [400, 10], [500, 11], [600, 12], [700, 12], [800, 12], [1000, 12], [1300, 12], [1600, 12], [2000, 12], [2500, 12], [3000, 12], [3250, 12], [3500, 4], [4000, -200], [5000, -200], [6300, -200], [8000, -200]]) print('align filter dB shape: ', align_filter_dB.shape) num_of_points, trivial = align_filter_dB.shape overallGainFilter = interp1d(align_filter_dB[:, 0], align_filter[:, 1], 1000) x = np.zeros((pow_of_2)) x[:data.shape[0]] = data x_fft = fftw.fft(x, pow_of_2) freq_resolution = srate / pow_of_2 factorDb = interp1d(align_filter_dB[:, 0], align_filter_dB[:, 1], list(range(0, (pow_of_2 / 2) + 1) *\ freq_resolution)) - \ overallGainFilter factor = 10**(factorDb / 20) factor = [factor, np.fliplr(factor[1:(pow_of_2 / 2 + 1)])] x_fft = x_fft * factor y = fftw.ifft(x_fft, pow_of_2) data_filtered = y[:n] return data_filtered
def spectral_whitening(tr, smooth=None, filter=None, waterlevel=1e-8, mask_again=True): """ Apply spectral whitening to data Data is divided by its smoothed (Default: None) amplitude spectrum. :param tr: trace to manipulate :param smooth: length of smoothing window in Hz (default None -> no smoothing) :param filter: filter spectrum with bandpass after whitening (tuple with min and max frequency) :param waterlevel: waterlevel relative to mean of spectrum :param mask_again: weather to mask array after this operation again and set the corresponding data to 0 :return: whitened data """ sr = tr.stats.sampling_rate data = tr.data data = _fill_array(data, fill_value=0) mask = np.ma.getmask(data) nfft = next_fast_len(len(data)) spec = fft(data, nfft) spec_ampl = np.abs(spec) spec_ampl /= np.max(spec_ampl) if smooth: smooth = int(smooth * nfft / sr) spec_ampl = ifftshift(smooth_func(fftshift(spec_ampl), smooth)) # save guard against division by 0 spec_ampl[spec_ampl < waterlevel] = waterlevel spec /= spec_ampl if filter is not None: spec *= _filter_resp(*filter, sr=sr, N=len(spec), whole=True)[1] ret = np.real(ifft(spec, nfft)[:len(data)]) if mask_again: ret = _fill_array(ret, mask=mask, fill_value=0) tr.data = ret return tr
def fourier_operator(f, mode, N, idx=None): if (mode == 1): # Forward operator x = f1_linear(fftw.fft(f) / np.sqrt(N)) if idx is not None: out = x[idx] else: out = x else: if idx is not None: x = np.zeros(N, dtype=np.complex128) x[idx] = f else: x = f out = fftw.ifft(f1_linear_inv(x)) * np.sqrt(N) return out
def periodogram_mean(func, fe, n_data, f_zero=None): """ Function calculating the theoretical mean of the periodogram (defined as the squared modulus of the fft devided by fe*n_data) given the theoretical PSD (func) , the sampling frequency fe and the number of points n_data. @param func: function of one parameter giving the PSD as a function of frequency @type func : function @param fe: sampling frequency @type fe : scalar (float) @param n_data: number of points of the periodogram @type n_data : scalar (integer) @return: P_mean : Periodogram expectation (n_data-vector) """ # 1. Calculation of the autocovariance function Rn power = np.int(np.log(n_data) / np.log(2.)) + 4 # Number of points for the integration N_points = 2**power # N_points = 3*n_data k_points = np.arange(0, N_points) frequencies = fe * (k_points / np.float(N_points) - 0.5) if f_zero is None: f_zero = fe / (N_points * 10.) i = np.where(frequencies == 0) frequencies[i] = f_zero Z = func(frequencies) n = np.arange(0, n_data) Z_ifft = ifft(Z) R = fe / np.float(N_points) * ( Z[0] * 0.5 * (np.exp(1j * np.pi * n) - np.exp(-1j * np.pi * n)) + N_points * Z_ifft[0:n_data] * np.exp(-1j * np.pi * n)) # 2. Calculation of the of the periodogram mean vector X = R[0:n_data] * (1. - np.abs(n) / np.float(n_data)) return 1. / fe * (fft(X) + n_data * ifft(X) - R[0]), R[0:n_data]
def remove_stripe_based_filtering_sorting(sinogram, sigma, size, dim=1): """ Combination of algorithm 2 and algorithm 3 in [1]. Removing stripes using the filtering and sorting technique. Angular direction is along the axis 0. Parameters ---------- sinogram : float 2D array. sigma : int Sigma of the Gaussian window used to separate the low-pass and high-pass components of the intensity profile of each column. size : int Window size of the median filter. dim : {1, 2}, optional Dimension of the window. Returns ------- float 2D array. Stripe-removed sinogram. """ pad = 150 # To reduce artifacts caused by FFT sinogram = np.transpose(sinogram) sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect') (_, ncol) = sinopad.shape window = gaussian(ncol, std=sigma) listsign = np.power(-1.0, np.arange(ncol)) sinosmooth = np.copy(sinogram) for i, sinolist in enumerate(sinopad): # sinosmooth[i] = np.real(ifft(fft( # sinolist * listsign) * window) * listsign)[pad:ncol-pad] sinosmooth[i] = np.real( fft.ifft(fft.fft(sinolist * listsign) * window) * listsign)[pad:ncol - pad] sinosharp = sinogram - sinosmooth sinosmooth_cor = np.transpose( remove_stripe_based_sorting(np.transpose(sinosmooth), size, dim)) return np.transpose(sinosmooth_cor + sinosharp)
def apply_filter(self, mat, window, pattern, pad_width): (nrow, ncol) = mat.shape if pattern == "PROJECTION": top_drop = 10 # To remove the time stamp at some data mat_pad = np.pad(mat[top_drop:], ((pad_width + top_drop, pad_width), (pad_width, pad_width)), mode="edge") win_pad = np.pad(window, pad_width, mode="edge") mat_dec = fft.ifft2( fft.fft2(-np.log(mat_pad)) / fft.ifftshift(win_pad)) mat_dec = np.abs(mat_dec[pad_width:pad_width + nrow, pad_width:pad_width + ncol]) else: mat_pad = np.pad(-np.log(mat), ((0, 0), (pad_width, pad_width)), mode='edge') win_pad = np.pad(window, ((0, 0), (pad_width, pad_width)), mode="edge") mat_fft = np.fft.fftshift(fft.fft(mat_pad), axes=1) / win_pad mat_dec = fft.ifft(np.fft.ifftshift(mat_fft, axes=1)) mat_dec = np.abs(mat_dec[:, pad_width:pad_width + ncol]) return np.float32(np.exp(-mat_dec))
def _set_psi_x(self, psi_x): assert psi_x.shape == self.x.shape self.psi_mod_x = (psi_x * np.exp(-1j * self.k[0] * self.x) * self.dx / np.sqrt(2 * np.pi)) self.psi_mod_x /= self.norm self.psi_mod_k = fftpack.fft(self.psi_mod_x)
def stransform(h, Fs): ''' Compute S-Transform without for loops Converted from MATLAB code written by Kalyan S. Dash Converted by Geoffrey Barrett, CUMC h - an 1xN vector representing timeseries data, units will most likely by uV returns the stockwell transform, representing the values of all frequencies from 0-> Fs/2 (nyquist) for each time ''' h = np.asarray(h, dtype=float) # scipy.io.savemat('stransform_numpy.mat', {'h': h}) h = h.reshape((1, len(h))) # uV n = h.shape[1] num_voices = int(Fs / 2) ''' if n is None: n = h.shape[1] print(n) ''' # n_half = num_voices n_half = np.fix(n / 2) n_half = int(n_half) odd_n = 1 if n_half * 2 == n: odd_n = 0 f = np.concatenate((np.arange(n_half + 1), np.arange( -n_half + 1 - odd_n, 0))) / n # array that goes 0-> 0.5 and then -0.5 -> 0 [2*n_half,] Hft = fftw.fft(h, axis=1) # uV, [1xn] Hft = conj_nonzeros(Hft) # compute all frequency domain Guassians as one matrix invfk = np.divide(1, f[1:n_half + 1]) # matrix of inverse frequencies in Hz, [n_half] invfk = invfk.reshape((len(invfk), 1)) W = np.multiply( 2 * np.pi * np.tile(f, (n_half, 1)), # [n_half, f] np.tile(invfk.reshape((len(invfk), 1)), (1, n)), # [n_half(invfk) x n] ) # n_half x len(f) G = np.exp((-W**2) / 2) # Gaussian in freq domain G = np.asarray(G, dtype=np.complex) # n_half x len(f) # Compute Toeplitz matrix with the shifted fft(h) HW = scipy.linalg.toeplitz(Hft[0, :n_half + 1].T, np.conj(Hft)) # n_half + 1 x len(h) # HW = scipy.linalg.toeplitz(Hft[0,:n_half+1].T, Hft) # exclude the first row, corresponding to zero frequency HW = HW[1:n_half + 1, :] # n_half x len(h) # compute the stockwell transform cwt = np.multiply(HW, G) ST = fftw.ifft(cwt, axis=-1) # compute voices # add the zero freq row # print(np.mean(h, axis=1)) st0 = np.multiply(np.mean(h, axis=1), np.ones((1, n))) ST = np.vstack((st0, ST)) return ST
def __init__(self, x, psi_x0, V_x, k0 = None, hbar = 1, m = 1, t0 = 0.0): """ Parameters ---------- x : array_like, float Length-N array of evenly spaced spatial coordinates psi_x0 : array_like, complex Length-N array of the initial wave function at time t0 V_x : array_like, float Length-N array giving the potential at each x k0 : float The minimum value of k. Note that, because of the workings of the Fast Fourier Transform, the momentum wave-number will be defined in the range k0 < k < 2*pi / dx , where dx = x[1]-x[0]. If you expect nonzero momentum outside this range, you must modify the inputs accordingly. If not specified, k0 will be calculated such that the range is [-k0,k0] hbar : float Value of Planck's constant (default = 1) m : float Particle mass (default = 1) t0 : float Initial time (default = 0) """ # Validation of array inputs self.x, psi_x0, self.V_x = map(np.asarray, (x, psi_x0, V_x)) N = self.x.size assert self.x.shape == (N,) assert psi_x0.shape == (N,) assert self.V_x.shape == (N,) # Validate and set internal parameters assert hbar > 0 assert m > 0 self.hbar = hbar self.m = m self.t = t0 self.dt_ = None self.N = len(x) self.dx = self.x[1] - self.x[0] self.dk = 2 * np.pi / (self.N * self.dx) # Set momentum scale if k0 == None: self.k0 = -0.5 * self.N * self.dk else: assert k0 < 0 self.k0 = k0 self.k = self.k0 + self.dk * np.arange(self.N) self.psi_x = psi_x0 self.psi_mod_k = fftpack.fft(self.psi_mod_x) # Variables which hold steps in evolution self.x_evolve_half = None self.x_evolve = None self.k_evolve = None if found_pyfftw: # Align arrays for pyFFTW self.psi_mod_k = pyfftw.n_byte_align( self.psi_mod_k, pyfftw.simd_alignment) self.psi_mod_x = pyfftw.n_byte_align( self.psi_mod_x, pyfftw.simd_alignment) # Try to read any wisdom from file if (os.path.isfile('fftw_wisdom.pickle.gz')): pyfftw.import_wisdom( pickle.load(gzip.open('fftw_wisdom.pickle.gz', 'rb'))) print('about to initialize the fftw plans, which can take a while') self.k_from_x_plan = pyfftw.FFTW( self.psi_mod_x, self.psi_mod_k, direction = 'FFTW_FORWARD', flags = ('FFTW_MEASURE',), threads = 4) self.x_from_k_plan = pyfftw.FFTW( self.psi_mod_k, self.psi_mod_x, direction = 'FFTW_BACKWARD', flags = ('FFTW_MEASURE',), threads = 4) print('finalized fftw initialization') # Save wisdom to file bla = pyfftw.export_wisdom() pickle.dump(bla, gzip.open('fftw_wisdom.pickle.gz', 'wb'))
def compute_k_from_x(self): self.psi_mod_k = fftpack.fft(self.psi_mod_x)
''' If problem 3 stays as it is, this is a CORRECT solution. The current solution is totally jacked. ''' # read in files rate1,piano = wavfile.read('chopinw.wav') rate2,balloon = wavfile.read('balloon.wav') # pad signal with zeros sig = np.zeros((piano.shape[0]+balloon.shape[0],2)) sig[:len(piano)] = piano imp = np.zeros_like(sig) imp[:len(balloon)] = balloon # fourier transforms f1 = fftw.fft(sig,axis=0) f2 = fftw.fft(imp,axis=0) out = sp.ifft((f1*f2),axis=0) # prepping output and writing file out = sp.real(out) scaled = sp.int16(out/sp.absolute(out).max() * 32767) wavfile.write('test.wav',rate1,scaled) #Problem 4. Let's make the "tada" circular convolution example a problem instead. def problem4(): # read in tada.wav rate, tada = wavfile.read('tada.wav') # upon inspection, we find that tada.wav is a stereo audio file. # we create stereo white noise that lasts 10 seconds
def _set_psi_k(self, psi_k): assert psi_k.shape == self.x.shape self.psi_mod_k = psi_k * np.exp(1j * self.x[0] * self.dk * np.arange(self.N)) self.psi_mod_x = fftpack.ifft(self.psi_mod_k) self.psi_mod_k = fftpack.fft(self.psi_mod_x)
def periodogram_mean_masked(func, fe, n_data, n_freq, mask, n_points=None, n_conv=None, normal=True): """ Function calculating the theoretical mean of the periodogram of a masked signal (defined as the squared modulus of the fft devided by fe*n_data) given the theoretical PSD (func), the sampling frequency fe and the number of points n_data. @param func: function of one parameter giving the PSD as a function of frequency @type func : function @param fe: sampling frequency @type fe : scalar (float) @param n_data: number of points of the periodogram @type n_data : scalar (integer) @param mask: mask vetor M[i] = 1 if data is available, 0 otherwise @type mask : (n_data x 1) array @param n_freq: number of frequency point where to compute the periodogram @type n_freq : scalar (integer) @return: P_mean : Periodogram expectation (n_data-vector) """ if n_points == None: # 1. Calculation of the autocovariance function Rn power = np.int(np.log(2 * n_data) / np.log(2.)) # + 1 # Number of points for the integration n_points = 2**power k_points = np.arange(0, n_points) frequencies = fe * (k_points / np.float(n_points) - 0.5) i = np.where(frequencies == 0) frequencies[i] = fe / (n_points) Z = func(frequencies) n = np.arange(0, n_data) Z_ifft = ifft(Z) R = fe / np.float(n_points) * (Z[0] * 0.5 * (np.exp(1j * np.pi * n) \ - np.exp(-1j * np.pi * n)) + n_points * Z_ifft[0:n_data] * np.exp( -1j * np.pi * n)) if n_conv == None: n_conv = 2 * n_data - 1 # 2. Calculation of the sample autocovariance of the mask fx = fft(mask, n_conv) # print("FFT of M is done with N_points") # fx = fft(M, N_points) if normal: K2 = np.sum(mask**2) else: K2 = n_data lambda_N = np.real(ifft(fx * np.conj(fx))) / K2 # 3. Calculation of the of the periodogram mean vector X = R[0:n_data] * lambda_N[0:n_data] Pm = 1. / fe * (fft(X, n_freq) + n_freq * ifft(X, n_freq) - R[0] * lambda_N[0]) return Pm
def ffty(ar): return fft(ar,axis=1)
def fftx(ar): return fft(ar,axis=0)
def fftx(ar): return fft(ar, axis=0)
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan, nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol, dedisperse='incoherent', do_waterfall=True, do_foldspec=True, verbose=True, progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None, return_fits=False): """ FFT data, fold by phase/time and make a waterfall series Folding is done from the position the file is currently in Parameters ---------- fh : file handle handle to file holding voltage timeseries comm: MPI communicator or None will use size, rank attributes samplerate : Quantity rate at which samples were originally taken and thus double the band width (frequency units) fedge : float edge of the frequency band (frequency units) fedge_at_top: bool whether edge is at top (True) or bottom (False) nchan : int number of frequency channels for FFT nt, ntint : int total number nt of sets, each containing ntint samples in each file hence, total # of samples is nt*ntint, with each sample containing a single polarisation ngate, ntbin : int number of phase and time bins to use for folded spectrum ntbin should be an integer fraction of nt ntw : int number of time samples to combine for waterfall (does not have to be integer fraction of nt) dm : float dispersion measure of pulsar, used to correct for ism delay (column number density) fref: float reference frequency for dispersion measure phasepol : callable function that returns the pulsar phase for time in seconds relative to start of the file that is read. dedisperse : None or string (default: incoherent). None, 'incoherent', 'coherent', 'by-channel'. Note: None really does nothing do_waterfall, do_foldspec : bool whether to construct waterfall, folded spectrum (default: True) verbose : bool or int whether to give some progress information (default: True) progress_interval : int Ping every progress_interval sets return_fits : bool (default: False) return a subint fits table for rank == 0 (None otherwise) """ assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent') assert nchan % fh.nchan == 0 if dedisperse == 'by-channel': oversample = nchan // fh.nchan assert ntint % oversample == 0 else: oversample = 1 if dedisperse == 'coherent' and fh.nchan > 1: raise ValueError("For coherent dedispersion, data must be " "unchannelized before folding.") if comm is None: mpi_rank = 0 mpi_size = 1 else: mpi_rank = comm.rank mpi_size = comm.size npol = getattr(fh, 'npol', 1) assert npol == 1 or npol == 2 if verbose > 1 and mpi_rank == 0: print("Number of polarisations={}".format(npol)) # initialize folded spectrum and waterfall # TODO: use estimated number of points to set dtype if do_foldspec: foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32) icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32) else: foldspec = None icount = None if do_waterfall: nwsize = nt * ntint // ntw waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64) else: waterfall = None if verbose and mpi_rank == 0: print('Reading from {}'.format(fh)) nskip = fh.tell() / fh.blocksize if nskip > 0: if verbose and mpi_rank == 0: print('Starting {0} blocks = {1} bytes out from start.'.format( nskip, nskip * fh.blocksize)) dt1 = (1. / samplerate).to(u.s) # need 2*nchan real-valued samples for each FFT if fh.telescope == 'lofar': dtsample = fh.dtsample else: dtsample = nchan // oversample * 2 * dt1 tstart = dtsample * ntint * nskip # pre-calculate time delay due to dispersion in coarse channels # for channelized data, frequencies are known if fh.nchan == 1: if getattr(fh, 'data_is_complex', False): # for complex data, really each complex sample consists of # 2 real ones, so multiply dt1 by 2. if fedge_at_top: freq = fedge - fftfreq(nchan, 2. * dt1.value) * u.Hz else: freq = fedge + fftfreq(nchan, 2. * dt1.value) * u.Hz else: if fedge_at_top: freq = fedge - rfftfreq(nchan * 2, dt1.value)[::2] * u.Hz else: freq = fedge + rfftfreq(nchan * 2, dt1.value)[::2] * u.Hz freq_in = freq else: # input frequencies may not be the ones going out freq_in = fh.frequencies if oversample == 1: freq = freq_in else: if fedge_at_top: freq = (freq_in[:, np.newaxis] - u.Hz * fftfreq(oversample, dtsample.value)) else: freq = (freq_in[:, np.newaxis] + u.Hz * fftfreq(oversample, dtsample.value)) ifreq = freq.ravel().argsort() # pre-calculate time offsets in (input) channelized streams dt = dispersion_delay_constant * dm * (1. / freq_in**2 - 1. / fref**2) if dedisperse in ['coherent', 'by-channel']: # pre-calculate required turns due to dispersion if fedge_at_top: fcoh = (freq_in[np.newaxis, :] - u.Hz * fftfreq(ntint, dtsample.value)[:, np.newaxis]) else: fcoh = (freq_in[np.newaxis, :] + u.Hz * fftfreq(ntint, dtsample.value)[:, np.newaxis]) # set frequency relative to which dispersion is coherently corrected if dedisperse == 'coherent': _fref = fref else: _fref = freq_in[np.newaxis, :] # (check via eq. 5.21 and following in # Lorimer & Kramer, Handbook of Pulsar Astronomy dang = (dispersion_delay_constant * dm * fcoh * (1. / _fref - 1. / fcoh)**2) * u.cycle with u.set_enabled_equivalencies(u.dimensionless_angles()): dd_coh = np.exp(dang * 1j).conj().astype(np.complex64) # add dimension for polarisation dd_coh = dd_coh[..., np.newaxis] # Calculate the part of the whole file this node should handle. size_per_node = (nt - 1) // mpi_size + 1 start_block = mpi_rank * size_per_node end_block = min((mpi_rank + 1) * size_per_node, nt) for j in range(start_block, end_block): if verbose and j % progress_interval == 0: print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; ' 'time={:18.12f}'.format( mpi_rank, mpi_size, j + 1, nt, j - start_block + 1, end_block - start_block, (tstart + dtsample * j * ntint).value)) # time since start # Just in case numbers were set wrong -- break if file ends; # better keep at least the work done. try: raw = fh.seek_record_read(int((nskip + j) * fh.blocksize), fh.blocksize) except (EOFError, IOError) as exc: print("Hit {0!r}; writing data collected.".format(exc)) break if verbose >= 2: print("#{:4d}/{:4d} read {} items".format(mpi_rank, mpi_size, raw.size), end="") if npol == 2: # multiple polarisations raw = raw.view(raw.dtype.fields.values()[0][0]) if fh.nchan == 1: # raw.shape=(ntint*npol) raw = raw.reshape(-1, npol) else: # raw.shape=(ntint, nchan*npol) raw = raw.reshape(-1, fh.nchan, npol) if rfi_filter_raw is not None: raw, ok = rfi_filter_raw(raw) if verbose >= 2: print("... raw RFI (zap {0}/{1})".format( np.count_nonzero(~ok), ok.size), end="") if np.can_cast(raw.dtype, np.float32): vals = raw.astype(np.float32) else: assert raw.dtype.kind == 'c' vals = raw if fh.nchan == 1: # have real-valued time stream of complex baseband # if we need some coherentdedispersion, do FT of whole thing, # otherwise to output channels if raw.dtype.kind == 'c': ftchan = nchan if dedisperse == 'incoherent' else len(vals) vals = fft(vals.reshape(-1, ftchan, npol), axis=1, overwrite_x=True, **_fftargs) else: # real data ftchan = nchan if dedisperse == 'incoherent' else len( vals) // 2 vals = rfft(vals.reshape(-1, ftchan * 2, npol), axis=1, overwrite_x=True, **_fftargs) # rfft: Re[0], Re[1], Im[1], ..., Re[n/2-1], Im[n/2-1], Re[n/2] # re-order to normal fft format (like Numerical Recipes): # Re[0], Re[n], Re[1], Im[1], .... (channel 0 is junk anyway) vals = np.hstack( (vals[:, 0], vals[:, -1], vals[:, 1:-1])).view(np.complex64) # for incoherent, vals.shape=(ntint, nchan, npol) -> OK # for others, have (1, ntint*nchan, npol) # reshape(nchan, ntint) gives rough as slowly varying -> .T if dedisperse != 'incoherent': fine = vals.reshape(nchan, -1, npol).transpose(1, 0, 2) # now have fine.shape=(ntint, nchan, npol) else: # data already channelized if dedisperse == 'by-channel': fine = fft(vals, axis=0, overwrite_x=True, **_fftargs) # have fine.shape=(ntint, fh.nchan, npol) if dedisperse in ['coherent', 'by-channel']: fine *= dd_coh # rechannelize to output channels if oversample > 1 and dedisperse == 'by-channel': # fine.shape=(ntint*oversample, chan_in, npol) # =(coarse,fine,fh.chan, npol) # -> reshape(oversample, ntint, fh.nchan, npol) # want (ntint=fine, fh.nchan, oversample, npol) -> .transpose fine = (fine.reshape(oversample, -1, fh.nchan, npol).transpose( 1, 2, 0, 3).reshape(-1, nchan, npol)) # now, for both, fine.shape=(ntint, nchan, npol) vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs) # vals[time, chan, pol] if verbose >= 2: print("... dedispersed", end="") if npol == 1: power = vals.real**2 + vals.imag**2 else: p0 = vals[..., 0] p1 = vals[..., 1] power = np.empty(vals.shape[:-1] + (4, ), np.float32) power[..., 0] = p0.real**2 + p0.imag**2 power[..., 1] = p0.real * p1.real + p0.imag * p1.imag power[..., 2] = p0.imag * p1.real - p0.real * p1.imag power[..., 3] = p1.real**2 + p1.imag**2 if verbose >= 2: print("... power", end="") if rfi_filter_power is not None: power = rfi_filter_power(power) print("... power RFI", end="") # current sample positions in stream isr = j * (ntint // oversample) + np.arange(ntint // oversample) if do_waterfall: # loop over corresponding positions in waterfall for iw in xrange(isr[0] // ntw, isr[-1] // ntw + 1): if iw < nwsize: # add sum of corresponding samples waterfall[iw, :] += np.sum(power[isr // ntw == iw], axis=0)[ifreq] if verbose >= 2: print("... waterfall", end="") if do_foldspec: ibin = (j * ntbin) // nt # bin in the time series: 0..ntbin-1 # times since start tsample = (tstart + isr * dtsample * oversample)[:, np.newaxis] # correct for delay if needed if dedisperse in ['incoherent', 'by-channel']: # tsample.shape=(ntint/oversample, nchan_in) tsample = tsample - dt phase = (phasepol(tsample.to(u.s).value.ravel()).reshape( tsample.shape)) # corresponding PSR phases iphase = np.remainder(phase * ngate, ngate).astype(np.int) for k, kfreq in enumerate(ifreq): # sort in frequency while at it iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq // oversample)] # sum and count samples by phase bin for ipow in xrange(npol**2): foldspec[ibin, k, :, ipow] += np.bincount(iph, power[:, kfreq, ipow], ngate) icount[ibin, k, :] += np.bincount(iph, power[:, kfreq, 0] != 0., ngate) if verbose >= 2: print("... folded", end="") if verbose >= 2: print("... done") #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data #if verbose >= 2 or verbose and mpi_rank == 0: # print('#{:4d}/{:4d} read {:6d} out of {:6d}' # .format(mpi_rank, mpi_size, j+1, nt)) if npol == 1: if do_foldspec: foldspec = foldspec.reshape(foldspec.shape[:-1]) if do_waterfall: waterfall = waterfall.reshape(waterfall.shape[:-1]) return foldspec, icount, waterfall
def fold(file1, samplerate, fmid, nchan, nt, ntint, nhead, ngate, ntbin, ntw, dm, fref, phasepol, coherent=False, do_waterfall=True, do_foldspec=True, verbose=True, progress_interval=100): """FFT Effelsberg data, fold by phase/time and make a waterfall series Parameters ---------- file1 : string name of the file holding voltage timeseries samplerate : float rate at which samples were originally taken and thus band width (frequency units)) fmid : float mid point of the frequency band (frequency units) nchan : int number of frequency channels for FFT nt, ntint : int total number nt of sets, each containing ntint samples in each file hence, total # of samples is nt*(2*ntint), with each sample containing real,imag for two polarisations nhead : int number of bytes to skip before reading (usually 4096 for Effelsberg) ngate, ntbin : int number of phase and time bins to use for folded spectrum ntbin should be an integer fraction of nt ntw : int number of time samples to combine for waterfall (does not have to be integer fraction of nt) dm : float dispersion measure of pulsar, used to correct for ism delay (column number density) fref: float reference frequency for dispersion measure phasepol : callable function that returns the pulsar phase for time in seconds relative to start of part of the file that is read (i.e., ignoring nhead) do_waterfall, do_foldspec : bool whether to construct waterfall, folded spectrum (default: True) verbose : bool whether to give some progress information (default: True) progress_interval : int Ping every progress_interval sets """ # initialize folded spectrum and waterfall foldspec2 = np.zeros((nchan, ngate, ntbin)) nwsize = nt*ntint//ntw waterfall = np.zeros((nchan, nwsize)) # size in bytes of records read from file (each nchan contains 4 bytes: # real,imag for 2 polarisations). recsize = 4*nchan*ntint if verbose: print('Reading from {}'.format(file1)) myopen = gzip.open if '.gz' in file1 else open with myopen(file1, 'rb', recsize) as fh1: if nhead > 0: if verbose: print('Skipping {0} bytes'.format(nhead)) fh1.seek(nhead) foldspec = np.zeros((nchan, ngate)) icount = np.zeros((nchan, ngate)) # gosh, fftpack has everything; used to calculate with: # fband / nchan * (np.mod(np.arange(nchan)+nchan/2, nchan)-nchan/2) if coherent: # pre-calculate required turns due to dispersion fcoh = (fmid + fftfreq(nchan*ntint, (1./samplerate).to(u.s).value) * u.Hz) # (check via eq. 5.21 and following in # Lorimer & Kramer, Handbook of Pulsar Astrono dang = (dispersion_delay_constant * dm * fcoh * (1./fref-1./fcoh)**2) * 360. * u.deg dedisperse = np.exp(dang.to(u.rad).value * 1j ).conj().astype(np.complex64) else: # pre-calculate time delay due to dispersion freq = fmid + fftfreq(nchan, (1./samplerate).to(u.s).value) * u.Hz dt = (dispersion_delay_constant * dm * (1./freq**2 - 1./fref**2)).to(u.s).value dtsample = (nchan/samplerate).to(u.s).value for j in xrange(nt): if verbose and (j+1) % progress_interval == 0: print('Doing {:6d}/{:6d}; time={:18.12f}'.format( j+1, nt, dtsample*j*ntint)) # equivalent time since start # just in case numbers were set wrong -- break if file ends # better keep at least the work done try: # data stored as series of two two-byte complex numbers, # one for each polarization raw = np.fromstring(fh1.read(recsize), dtype=np.int8).reshape(-1,2,2) except: break # use view for fast conversion from float to complex vals = raw.astype(np.float32).view(np.complex64).squeeze() # vals[i_int * i_block, i_pol] if coherent: fine = fft(vals, axis=0, overwrite_x=True, **_fftargs) fine *= dedisperse[:,np.newaxis] vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs) chan = fft(vals.reshape(-1, nchan, 2), axis=1, overwrite_x=True, **_fftargs) # chan[i_int, i_block, i_pol] power = np.sum(chan.real**2+chan.imag**2, axis=-1) # current sample positions in stream isr = j*ntint + np.arange(ntint) if do_waterfall: # loop over corresponding positions in waterfall for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1): if iw < nwsize: # add sum of corresponding samples waterfall[:,iw] += np.sum(power[isr//ntw == iw], axis=0) if do_foldspec: tsample = dtsample*isr # times since start for k in xrange(nchan): if coherent: t = tsample # already dedispersed else: t = tsample - dt[k] # dedispersed times phase = phasepol(t) # corresponding PSR phases iphase = np.remainder(phase*ngate, ngate).astype(np.int) # sum and count samples by phase bin foldspec[k] += np.bincount(iphase, power[:,k], ngate) icount[k] += np.bincount(iphase, None, ngate) ibin = j*ntbin//nt # bin in the time series: 0..ntbin-1 if (j+1)*ntbin//nt > ibin: # last addition to bin? # get normalised flux in each bin (where any were added) nonzero = icount > 0 nfoldspec = np.where(nonzero, foldspec/icount, 0.) # subtract phase average and store nfoldspec -= np.where(nonzero, np.sum(nfoldspec, 1, keepdims=True) / np.sum(nonzero, 1, keepdims=True), 0) foldspec2[:,:,ibin] = nfoldspec # reset for next iteration foldspec *= 0 icount *= 0 if verbose: print('read {0:6d} out of {1:6d}'.format(j+1, nt)) if do_foldspec: # swap two halfs in frequency, so that freq increases monotonically foldspec2 = fftshift(foldspec2, axes=0) if do_waterfall: nonzero = waterfall == 0. waterfall -= np.where(nonzero, np.sum(waterfall, 1, keepdims=True) / np.sum(nonzero, 1, keepdims=True), 0.) # swap two halfs in frequency, so that freq increases monotonically waterfall = fftshift(waterfall, axes=0) return foldspec2, waterfall
def _applyEulerianVideoMagnification(self, image): timestamp = timeit.default_timer() if self._useGrayOverlay: smallImage = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY).astype( numpy.float32) else: smallImage = image.astype(numpy.float32) # Downsample the image using a pyramid technique. i = 0 while i < self._numPyramidLevels: smallImage = cv2.pyrDown(smallImage) i += 1 if self._useLaplacianPyramid: smallImage[:] -= \ cv2.pyrUp(cv2.pyrDown(smallImage)) historyLength = len(self._historyTimestamps) if historyLength < self._maxHistoryLength - 1: # Append the new image and timestamp to the # history. self._history[historyLength] = smallImage self._historyTimestamps.append(timestamp) # The history is still not full, so wait. return if historyLength == self._maxHistoryLength - 1: # Append the new image and timestamp to the # history. self._history[historyLength] = smallImage self._historyTimestamps.append(timestamp) else: # Drop the oldest image and timestamp from the # history and append the new ones. self._history[:-1] = self._history[1:] self._historyTimestamps.popleft() self._history[-1] = smallImage self._historyTimestamps.append(timestamp) # The history is full, so process it. # Find the average length of time per frame. startTime = self._historyTimestamps[0] endTime = self._historyTimestamps[-1] timeElapsed = endTime - startTime timePerFrame = \ timeElapsed / self._maxHistoryLength #print 'FPS:', 1.0 / timePerFrame # Apply the temporal bandpass filter. fftResult = fft(self._history, axis=0, threads=self._numFFTThreads) frequencies = fftfreq( self._maxHistoryLength, d=timePerFrame) lowBound = (numpy.abs( frequencies - self._minHz)).argmin() highBound = (numpy.abs( frequencies - self._maxHz)).argmin() fftResult[:lowBound] = 0j fftResult[highBound:-highBound] = 0j fftResult[-lowBound:] = 0j ifftResult = ifft(fftResult, axis=0, threads=self._numIFFTThreads) # Amplify the result and overlay it on the # original image. overlay = numpy.real(ifftResult[-1]) * \ self._amplification i = 0 while i < self._numPyramidLevels: overlay = cv2.pyrUp(overlay) i += 1 if self._useGrayOverlay: overlay = cv2.cvtColor(overlay, cv2.COLOR_GRAY2BGR) cv2.convertScaleAbs(image + overlay, image)
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan, nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol, dedisperse='incoherent', do_waterfall=True, do_foldspec=True, verbose=True, progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None, return_fits=False): """ FFT data, fold by phase/time and make a waterfall series Folding is done from the position the file is currently in Parameters ---------- fh : file handle handle to file holding voltage timeseries comm: MPI communicator or None will use size, rank attributes samplerate : Quantity rate at which samples were originally taken and thus double the band width (frequency units) fedge : float edge of the frequency band (frequency units) fedge_at_top: bool whether edge is at top (True) or bottom (False) nchan : int number of frequency channels for FFT nt, ntint : int total number nt of sets, each containing ntint samples in each file hence, total # of samples is nt*ntint, with each sample containing a single polarisation ngate, ntbin : int number of phase and time bins to use for folded spectrum ntbin should be an integer fraction of nt ntw : int number of time samples to combine for waterfall (does not have to be integer fraction of nt) dm : float dispersion measure of pulsar, used to correct for ism delay (column number density) fref: float reference frequency for dispersion measure phasepol : callable function that returns the pulsar phase for time in seconds relative to start of the file that is read. dedisperse : None or string (default: incoherent). None, 'incoherent', 'coherent', 'by-channel'. Note: None really does nothing do_waterfall, do_foldspec : bool whether to construct waterfall, folded spectrum (default: True) verbose : bool or int whether to give some progress information (default: True) progress_interval : int Ping every progress_interval sets return_fits : bool (default: False) return a subint fits table for rank == 0 (None otherwise) """ assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent') need_fine_channels = dedisperse in ['by-channel', 'coherent'] assert nchan % fh.nchan == 0 if dedisperse == 'by-channel' and fh.nchan > 1: oversample = nchan // fh.nchan assert ntint % oversample == 0 else: oversample = 1 if dedisperse == 'coherent' and fh.nchan > 1: warnings.warn("Doing coherent dedispersion on channelized data. " "May get artefacts!") if comm is None: mpi_rank = 0 mpi_size = 1 else: mpi_rank = comm.rank mpi_size = comm.size npol = getattr(fh, 'npol', 1) assert npol == 1 or npol == 2 if verbose > 1 and mpi_rank == 0: print("Number of polarisations={}".format(npol)) # initialize folded spectrum and waterfall # TODO: use estimated number of points to set dtype if do_foldspec: foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32) icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32) else: foldspec = None icount = None if do_waterfall: nwsize = nt*ntint//ntw//oversample waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64) else: waterfall = None if verbose and mpi_rank == 0: print('Reading from {}'.format(fh)) nskip = fh.tell()/fh.blocksize if nskip > 0: if verbose and mpi_rank == 0: print('Starting {0} blocks = {1} bytes out from start.' .format(nskip, nskip*fh.blocksize)) dt1 = (1./samplerate).to(u.s) # need 2*nchan real-valued samples for each FFT if fh.telescope == 'lofar': dtsample = fh.dtsample else: dtsample = nchan // oversample * 2 * dt1 tstart = dtsample * ntint * nskip # pre-calculate time delay due to dispersion in coarse channels # for channelized data, frequencies are known tb = -1. if fedge_at_top else +1. if fh.nchan == 1: if getattr(fh, 'data_is_complex', False): # for complex data, really each complex sample consists of # 2 real ones, so multiply dt1 by 2. freq = fedge + tb * fftfreq(nchan, 2.*dt1.value) * u.Hz if dedisperse == 'coherent': fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1.value) * u.Hz fcoh.shape = (-1, 1) elif dedisperse == 'by-channel': fcoh = freq + (tb * fftfreq( ntint, 2.*dtsample.value) * u.Hz)[:, np.newaxis] else: freq = fedge + tb * rfftfreq(nchan*2, dt1.value)[::2] * u.Hz if dedisperse == 'coherent': fcoh = fedge + tb * rfftfreq(nchan*ntint*2, dt1.value)[::2] * u.Hz fcoh.shape = (-1, 1) elif dedisperse == 'by-channel': fcoh = freq + tb * fftfreq( ntint, dtsample.value)[:, np.newaxis] * u.Hz freq_in = freq else: # input frequencies may not be the ones going out freq_in = fh.frequencies if oversample == 1: freq = freq_in else: freq = (freq_in[:, np.newaxis] + tb * u.Hz * rfftfreq(oversample*2, dtsample.value/2.)[::2]) # same as fine = rfftfreq(2*ntint, dtsample.value/2.)[::2] fcoh = freq_in[np.newaxis, :] + tb * u.Hz * rfftfreq( ntint*2, dtsample.value/2.)[::2, np.newaxis] # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb)) ifreq = freq.ravel().argsort() # pre-calculate time offsets in (input) channelized streams dt = dispersion_delay_constant * dm * (1./freq_in**2 - 1./fref**2) if need_fine_channels: # pre-calculate required turns due to dispersion. # # set frequency relative to which dispersion is coherently corrected if dedisperse == 'coherent': _fref = fref else: _fref = freq_in[np.newaxis, :] # (check via eq. 5.21 and following in # Lorimer & Kramer, Handbook of Pulsar Astronomy dang = (dispersion_delay_constant * dm * fcoh * (1./_fref-1./fcoh)**2) * u.cycle with u.set_enabled_equivalencies(u.dimensionless_angles()): dd_coh = np.exp(dang * 1j).conj().astype(np.complex64) # add dimension for polarisation dd_coh = dd_coh[..., np.newaxis] # Calculate the part of the whole file this node should handle. size_per_node = (nt-1)//mpi_size + 1 start_block = mpi_rank*size_per_node end_block = min((mpi_rank+1)*size_per_node, nt) for j in range(start_block, end_block): if verbose and j % progress_interval == 0: print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; ' 'time={:18.12f}' .format(mpi_rank, mpi_size, j+1, nt, j-start_block+1, end_block-start_block, (tstart+dtsample*j*ntint).value)) # time since start # Just in case numbers were set wrong -- break if file ends; # better keep at least the work done. try: raw = fh.seek_record_read(int((nskip+j)*fh.blocksize), fh.blocksize) except(EOFError, IOError) as exc: print("Hit {0!r}; writing data collected.".format(exc)) break if verbose >= 2: print("#{:4d}/{:4d} read {} items" .format(mpi_rank, mpi_size, raw.size), end="") if npol == 2: # multiple polarisations raw = raw.view(raw.dtype.fields.values()[0][0]) if fh.nchan == 1: # raw.shape=(ntint*npol) raw = raw.reshape(-1, npol) else: # raw.shape=(ntint, nchan*npol) raw = raw.reshape(-1, fh.nchan, npol) if rfi_filter_raw is not None: raw, ok = rfi_filter_raw(raw) if verbose >= 2: print("... raw RFI (zap {0}/{1})" .format(np.count_nonzero(~ok), ok.size), end="") if np.can_cast(raw.dtype, np.float32): vals = raw.astype(np.float32) else: assert raw.dtype.kind == 'c' vals = raw if fh.nchan == 1: # have real-valued time stream of complex baseband # if we need some coherentdedispersion, do FT of whole thing, # otherwise to output channels if raw.dtype.kind == 'c': ftchan = len(vals) if dedisperse == 'coherent' else nchan vals = fft(vals.reshape(-1, ftchan, npol), axis=1, overwrite_x=True, **_fftargs) else: # real data ftchan = len(vals) // 2 if dedisperse == 'coherent' else nchan vals = rfft(vals.reshape(-1, ftchan*2, npol), axis=1, overwrite_x=True, **_fftargs) if vals.dtype.kind == 'f': # this depends on version, sigh. # rfft: Re[0], Re[1], Im[1],.,Re[n/2-1], Im[n/2-1], Re[n/2] # re-order to normal fft format (like Numerical Recipes): # Re[0], Re[n], Re[1], Im[1], .... (channel 0 junk anyway) vals = (np.hstack((vals[:, :1], vals[:, -1:], vals[:, 1:-1])) .reshape(-1, ftchan, 2 * npol)) if npol == 2: # reorder pol & real/imag vals1 = vals[:, :, 1] vals[:, :, 1] = vals[:, :, 2] vals[:, :, 2] = vals1 vals = vals.reshape(-1, ftchan, npol, 2) else: vals[:, 0] = vals[:, 0].real + 1j * vals[:, -1].real vals = vals[:, :-1] vals = vals.view(np.complex64).reshape(-1, ftchan, npol) # for incoherent, vals.shape=(ntint, nchan, npol) # for others, (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol) if need_fine_channels: if dedisperse == 'by-channel': fine = fft(vals, axis=0, overwrite_x=True, **_fftargs) else: fine = vals.reshape(-1, 1, npol) else: # data already channelized if need_fine_channels: fine = fft(vals, axis=0, overwrite_x=True, **_fftargs) # have fine.shape=(ntint, fh.nchan, npol) if need_fine_channels: # Dedisperse. fine *= dd_coh # if dedisperse == 'by-channel' and oversample > 1: # fine.shape=(ntint*oversample, chan_in, npol) # =(coarse,fine,fh.chan, npol) # -> reshape(oversample, ntint, fh.nchan, npol) # want (ntint=fine, fh.nchan, oversample, npol) -> .transpose # fine = (fine.reshape(nchan / fh.nchan, -1, fh.nchan, npol) # .transpose(1, 2, 0, 3) # .reshape(-1, nchan, npol)) # now fine.shape=(ntint, nchan, npol) w/ nchan=1 for coherent vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs) if dedisperse == 'coherent' and nchan > 1 and fh.nchan == 1: # final FT to get requested channels vals = vals.reshape(-1, nchan, npol) vals = fft(vals, axis=1, overwrite_x=True, **_fftargs) elif dedisperse == 'by-channel' and oversample > 1: vals = vals.reshape(-1, oversample, fh.nchan, npol) vals = fft(vals, axis=1, overwrite_x=True, **_fftargs) vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol) # vals[time, chan, pol] if verbose >= 2: print("... dedispersed", end="") if npol == 1: power = vals.real**2 + vals.imag**2 else: p0 = vals[..., 0] p1 = vals[..., 1] power = np.empty(vals.shape[:-1] + (4,), np.float32) power[..., 0] = p0.real**2 + p0.imag**2 power[..., 1] = p0.real*p1.real + p0.imag*p1.imag power[..., 2] = p0.imag*p1.real - p0.real*p1.imag power[..., 3] = p1.real**2 + p1.imag**2 if verbose >= 2: print("... power", end="") # current sample positions and corresponding time in stream isr = j*(ntint // oversample) + np.arange(ntint // oversample) tsr = (isr*dtsample*oversample)[:, np.newaxis] if rfi_filter_power is not None: power = rfi_filter_power(power, tsr.squeeze()) print("... power RFI", end="") # correct for delay if needed if dedisperse in ['incoherent', 'by-channel']: # tsample.shape=(ntint/oversample, nchan_in) tsr = tsr - dt if do_waterfall: # # loop over corresponding positions in waterfall # for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1): # if iw < nwsize: # add sum of corresponding samples # waterfall[iw, :] += np.sum(power[isr//ntw == iw], # axis=0)[ifreq] iw = np.round((tsr / dtsample / oversample).to(1) .value / ntw).astype(int) for k, kfreq in enumerate(ifreq): # sort in frequency while at it iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)] iwk = np.clip(iwk, 0, nwsize-1, out=iwk) iwkmin = iwk.min() iwkmax = iwk.max()+1 for ipow in range(npol**2): waterfall[iwkmin:iwkmax, k, ipow] += np.bincount( iwk-iwkmin, power[:, kfreq, ipow], iwkmax-iwkmin) if verbose >= 2: print("... waterfall", end="") if do_foldspec: ibin = (j*ntbin) // nt # bin in the time series: 0..ntbin-1 # times and cycles since start time of observation. tsample = tstart + tsr phase = (phasepol(tsample.to(u.s).value.ravel()) .reshape(tsample.shape)) # corresponding PSR phases iphase = np.remainder(phase*ngate, ngate).astype(np.int) for k, kfreq in enumerate(ifreq): # sort in frequency while at it iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq // oversample)] # sum and count samples by phase bin for ipow in range(npol**2): foldspec[ibin, k, :, ipow] += np.bincount( iph, power[:, kfreq, ipow], ngate) icount[ibin, k, :] += np.bincount( iph, power[:, kfreq, 0] != 0., ngate) if verbose >= 2: print("... folded", end="") if verbose >= 2: print("... done") #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data #if verbose >= 2 or verbose and mpi_rank == 0: # print('#{:4d}/{:4d} read {:6d} out of {:6d}' # .format(mpi_rank, mpi_size, j+1, nt)) if npol == 1: if do_foldspec: foldspec = foldspec.reshape(foldspec.shape[:-1]) if do_waterfall: waterfall = waterfall.reshape(waterfall.shape[:-1]) return foldspec, icount, waterfall