def correlation_spectrum(x1, x2, Fs=2 * np.pi, norm=False): """ Calculate the spectral decomposition of the correlation. Parameters ---------- x1,x2: ndarray Two arrays to be correlated. Same dimensions Fs: float, optional Sampling rate in Hz. If provided, an array of frequencies will be returned.Defaults to 2 norm: bool, optional When this is true, the spectrum is normalized to sum to 1 Returns ------- f: ndarray ndarray with the frequencies ccn: ndarray The spectral decomposition of the correlation Notes ----- This method is described in full in: D Cordes, V M Haughton, K Arfanakis, G J Wendt, P A Turski, C H Moritz, M A Quigley, M E Meyerand (2000). Mapping functionally related regions of brain with functional connectivity MR imaging. AJNR American journal of neuroradiology 21:1636-44 """ x1 = x1 - np.mean(x1) x2 = x2 - np.mean(x2) x1_f = fftpack.fft(x1) x2_f = fftpack.fft(x2) D = np.sqrt(np.sum(x1 ** 2) * np.sum(x2 ** 2)) n = x1.shape[0] ccn = ((np.real(x1_f) * np.real(x2_f) + np.imag(x1_f) * np.imag(x2_f)) / (D * n)) if norm: ccn = ccn / np.sum(ccn) * 2 # Only half of the sum is sent back # because of the freq domain symmetry. # XXX Does normalization make this # strictly positive? f = utils.get_freqs(Fs, n) return f, ccn[0:(n / 2 + 1)]
def correlation_spectrum(x1, x2, Fs=2 * np.pi, norm=False): """ Calculate the spectral decomposition of the correlation. Parameters ---------- x1,x2: ndarray Two arrays to be correlated. Same dimensions Fs: float, optional Sampling rate in Hz. If provided, an array of frequencies will be returned.Defaults to 2 norm: bool, optional When this is true, the spectrum is normalized to sum to 1 Returns ------- f: ndarray ndarray with the frequencies ccn: ndarray The spectral decomposition of the correlation Notes ----- This method is described in full in: D Cordes, V M Haughton, K Arfanakis, G J Wendt, P A Turski, C H Moritz, M A Quigley, M E Meyerand (2000). Mapping functionally related regions of brain with functional connectivity MR imaging. AJNR American journal of neuroradiology 21:1636-44 """ x1 = x1 - np.mean(x1) x2 = x2 - np.mean(x2) x1_f = fftpack.fft(x1) x2_f = fftpack.fft(x2) D = np.sqrt(np.sum(x1**2) * np.sum(x2**2)) n = x1.shape[0] ccn = ((np.real(x1_f) * np.real(x2_f) + np.imag(x1_f) * np.imag(x2_f)) / (D * n)) if norm: ccn = ccn / np.sum(ccn) * 2 # Only half of the sum is sent back # because of the freq domain symmetry. # XXX Does normalization make this # strictly positive? f = utils.get_freqs(Fs, n) return f, ccn[0:(n // 2 + 1)]
def filtered_fourier(self): """ Filter the time-series by passing it to the Fourier domain and null out the frequency bands outside of the range [lb,ub] """ freqs = tsu.get_freqs(self.sampling_rate, self.data.shape[-1]) if self.ub is None: self.ub = freqs[-1] power = fftpack.fft(self.data) idx_0 = np.hstack([np.where(freqs < self.lb)[0], np.where(freqs > self.ub)[0]]) #Make sure that you keep the DC component: keep_dc = np.copy(power[..., 0]) power[..., idx_0] = 0 power[..., -1 * idx_0] = 0 # Take care of the negative frequencies power[..., 0] = keep_dc # And put the DC back in when you're done: data_out = fftpack.ifft(power) data_out = np.real(data_out) # In order to make sure that you are not # left with float-precision residual # complex parts return ts.TimeSeries(data=data_out, sampling_rate=self.sampling_rate, time_unit=self.time_unit)
def filtered_fourier(self): """ Filter the time-series by passing it to the Fourier domain and null out the frequency bands outside of the range [lb,ub] """ freqs = tsu.get_freqs(self.sampling_rate, self.data.shape[-1]) if self.ub is None: self.ub = freqs[-1] power = fftpack.fft(self.data) idx_0 = np.hstack( [np.where(freqs < self.lb)[0], np.where(freqs > self.ub)[0]]) #Make sure that you keep the DC component: keep_dc = np.copy(power[..., 0]) power[..., idx_0] = 0 power[..., -1 * idx_0] = 0 # Take care of the negative frequencies power[..., 0] = keep_dc # And put the DC back in when you're done: data_out = fftpack.ifft(power) data_out = np.real(data_out) # In order to make sure that you are not # left with float-precision residual # complex parts return ts.TimeSeries(data=data_out, sampling_rate=self.sampling_rate, time_unit=self.time_unit)
def spectra(self): tdata = self.tapers[None, :, :] * self.input.data[:, None, :] tspectra = fftpack.fft(tdata) return tspectra
def cache_fft(time_series, ij, lb=0, ub=None, method=None, prefer_speed_over_memory=False, scale_by_freq=True): """compute and cache the windowed FFTs of the time_series, in such a way that computing the psd and csd of any combination of them can be done quickly. Parameters ---------- time_series : float array An ndarray with time-series, where time is the last dimension ij: list of tuples Each tuple in this variable should contain a pair of indices of the form (i,j). The resulting cache will contain the fft of time-series in the rows indexed by the unique elements of the union of i and j lb,ub: float Define a frequency band of interest, for which the fft will be cached method: dict, optional See :func:`get_spectra` for details on how this is used. For this set of functions, 'this_method' has to be 'welch' Returns ------- freqs, cache where: cache = {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices, 'norm_val':norm_val} Notes ----- - For these functions, only the Welch windowed periodogram ('welch') is available. - Detrending the input is not an option here, in order to save time on an empty function call. """ if method is None: method = {'this_method': 'welch'} # The default this_method = method.get('this_method', 'welch') if this_method == 'welch': NFFT = method.get('NFFT', 64) Fs = method.get('Fs', 2 * np.pi) window = method.get('window', mlab.window_hanning) n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0))) else: e_s = "For cache_fft, spectral estimation method must be welch" raise ValueError(e_s) time_series = utils.zero_pad(time_series, NFFT) #The shape of the zero-padded version: n_channels, n_time_points = time_series.shape # get all the unique channels in time_series that we are interested in by # checking the ij tuples all_channels = set() for i, j in ij: all_channels.add(i) all_channels.add(j) # for real time_series, ignore the negative frequencies if np.iscomplexobj(time_series): n_freqs = NFFT else: n_freqs = NFFT // 2 + 1 #Which frequencies freqs = utils.get_freqs(Fs, NFFT) #If there are bounds, limit the calculation to within that band, #potentially include the DC component: lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub) n_freqs = ub_idx - lb_idx #Make the window: if mlab.cbook.iterable(window): assert(len(window) == NFFT) window_vals = window else: window_vals = window(np.ones(NFFT, time_series.dtype)) #Each fft needs to be normalized by the square of the norm of the window #and, for consistency with newer versions of mlab.csd (which, in turn, are #consistent with Matlab), normalize also by the sampling rate: if scale_by_freq: #This is the normalization factor for one-sided estimation, taking into #account the sampling rate. This makes the PSD a density function, with #units of dB/Hz, so that integrating over frequencies gives you the RMS #(XXX this should be in the tests!). norm_val = (np.abs(window_vals) ** 2).sum() * (Fs / 2) else: norm_val = (np.abs(window_vals) ** 2).sum() / 2 # cache the FFT of every windowed, detrended NFFT length segement # of every channel. If prefer_speed_over_memory, cache the conjugate # as well i_times = list(range(0, n_time_points - NFFT + 1, NFFT - n_overlap)) n_slices = len(i_times) FFT_slices = {} FFT_conj_slices = {} for i_channel in all_channels: #dbg: #print i_channel Slices = np.zeros((n_slices, n_freqs), dtype=np.complex) for iSlice in range(n_slices): thisSlice = time_series[i_channel, i_times[iSlice]:i_times[iSlice] + NFFT] #Windowing: thisSlice = window_vals * thisSlice # No detrending #Derive the fft for that slice: Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx]) FFT_slices[i_channel] = Slices if prefer_speed_over_memory: FFT_conj_slices[i_channel] = np.conjugate(Slices) cache = {'FFT_slices': FFT_slices, 'FFT_conj_slices': FFT_conj_slices, 'norm_val': norm_val, 'Fs': Fs, 'scale_by_freq': scale_by_freq} return freqs, cache
def cache_fft(time_series, ij, lb=0, ub=None, method=None, prefer_speed_over_memory=False, scale_by_freq=True): """compute and cache the windowed FFTs of the time_series, in such a way that computing the psd and csd of any combination of them can be done quickly. Parameters ---------- time_series : float array An ndarray with time-series, where time is the last dimension ij: list of tuples Each tuple in this variable should contain a pair of indices of the form (i,j). The resulting cache will contain the fft of time-series in the rows indexed by the unique elements of the union of i and j lb,ub: float Define a frequency band of interest, for which the fft will be cached method: dict, optional See :func:`get_spectra` for details on how this is used. For this set of functions, 'this_method' has to be 'welch' Returns ------- freqs, cache where: cache = {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices, 'norm_val':norm_val} Notes ----- - For these functions, only the Welch windowed periodogram ('welch') is available. - Detrending the input is not an option here, in order to save time on an empty function call. """ if method is None: method = {'this_method': 'welch'} # The default this_method = method.get('this_method', 'welch') if this_method == 'welch': NFFT = method.get('NFFT', 64) Fs = method.get('Fs', 2 * np.pi) window = method.get('window', mlab.window_hanning) n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0))) else: e_s = "For cache_fft, spectral estimation method must be welch" raise ValueError(e_s) time_series = utils.zero_pad(time_series, NFFT) # The shape of the zero-padded version: n_channels, n_time_points = time_series.shape # get all the unique channels in time_series that we are interested in by # checking the ij tuples all_channels = set() for i, j in ij: all_channels.add(i) all_channels.add(j) # for real time_series, ignore the negative frequencies if np.iscomplexobj(time_series): n_freqs = NFFT else: n_freqs = NFFT // 2 + 1 # Which frequencies freqs = utils.get_freqs(Fs, NFFT) # If there are bounds, limit the calculation to within that band, # potentially include the DC component: lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub) n_freqs = ub_idx - lb_idx # Make the window: if mlab.cbook.iterable(window): assert (len(window) == NFFT) window_vals = window else: window_vals = window(np.ones(NFFT, time_series.dtype)) # Each fft needs to be normalized by the square of the norm of the window # and, for consistency with newer versions of mlab.csd (which, in turn, are # consistent with Matlab), normalize also by the sampling rate: if scale_by_freq: # This is the normalization factor for one-sided estimation, taking # into account the sampling rate. This makes the PSD a density # function, with units of dB/Hz, so that integrating over # frequencies gives you the RMS. (XXX this should be in the tests!). norm_val = (np.abs(window_vals)**2).sum() * (Fs / 2) else: norm_val = (np.abs(window_vals)**2).sum() / 2 # cache the FFT of every windowed, detrended NFFT length segment # of every channel. If prefer_speed_over_memory, cache the conjugate # as well i_times = list(range(0, n_time_points - NFFT + 1, NFFT - n_overlap)) n_slices = len(i_times) FFT_slices = {} FFT_conj_slices = {} for i_channel in all_channels: Slices = np.zeros((n_slices, n_freqs), dtype=np.complex) for iSlice in range(n_slices): thisSlice = time_series[i_channel, i_times[iSlice]:i_times[iSlice] + NFFT] # Windowing: thisSlice = window_vals * thisSlice # No detrending # Derive the fft for that slice: Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx]) FFT_slices[i_channel] = Slices if prefer_speed_over_memory: FFT_conj_slices[i_channel] = np.conjugate(Slices) cache = { 'FFT_slices': FFT_slices, 'FFT_conj_slices': FFT_conj_slices, 'norm_val': norm_val, 'Fs': Fs, 'scale_by_freq': scale_by_freq } return freqs, cache
def multi_taper_csd(s, Fs=2 * np.pi, BW=None, low_bias=True, adaptive=False, sides='default'): """Returns an estimate of the Cross Spectral Density (CSD) function between all (N choose 2) pairs of timeseries in s, using the multitaper method. If the NW product, or the BW and Fs in Hz are not specified by the user, a bandwidth of 4 times the fundamental frequency, corresponding to NW = 4 will be used. Parameters ---------- s : ndarray An array of sampled random processes, where the time axis is assumed to be on the last axis. If ndim > 2, the number of time series to compare will still be taken as prod(s.shape[:-1]) Fs: float, Sampling rate of the signal BW: float, The bandwidth of the windowing function will determine the number tapers to use. This parameters represents trade-off between frequency resolution (lower main lobe BW for the taper) and variance reduction (higher BW and number of averaged estimates). adaptive : {True, False} Use adaptive weighting to combine spectra low_bias : {True, False} Rather than use 2NW tapers, only use the tapers that have better than 90% spectral concentration within the bandwidth (still using a maximum of 2NW tapers) sides : str (optional) [ 'default' | 'onesided' | 'twosided' ] This determines which sides of the spectrum to return. For complex-valued inputs, the default is two-sided, for real-valued inputs, default is one-sided Indicates whether to return a one-sided or two-sided Returns ------- (freqs, csd_est) : ndarrays The estimatated CSD and the frequency points vector. The CSD{i,j}(f) are returned in a square "matrix" of vectors holding Sij(f). For an input array of (M,N), the output is (M,M,N) """ # have last axis be time series for now N = s.shape[-1] rest_of = s.shape[:-1] M = int(np.product(rest_of)) s = s.reshape(M, N) # de-mean this sucker s = utils.remove_bias(s, axis=-1) #Get the number of tapers from the sampling rate and the bandwidth: if BW is not None: NW = BW / (2 * Fs) * N else: NW = 4 Kmax = int(2 * NW) dpss, eigvals = dpss_windows(N, NW, Kmax) if low_bias: keepers = (eigvals > 0.9) dpss = dpss[keepers] eigvals = eigvals[keepers] Kmax = len(dpss) # if the time series is a complex vector, a one sided PSD is invalid: if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided': sides = 'twosided' elif sides in ('default', 'onesided'): sides = 'onesided' sig_sl = [slice(None)] * len(s.shape) sig_sl.insert(len(s.shape) - 1, np.newaxis) # tapered.shape is (M, Kmax, N) tapered = s[sig_sl] * dpss # compute the y_{i,k}(f) tapered_spectra = fftpack.fft(tapered) # compute the cross-spectral density functions last_freq = N / 2 + 1 if sides == 'onesided' else N if adaptive: w = np.empty(tapered_spectra.shape[:-1] + (last_freq,)) nu = np.empty((M, last_freq)) for i in xrange(M): w[i], nu[i] = utils.adaptive_weights( tapered_spectra[i], eigvals, sides=sides ) else: weights = np.sqrt(eigvals).reshape(Kmax, 1) csdfs = np.empty((M, M, last_freq), 'D') for i in xrange(M): if adaptive: wi = w[i] else: wi = weights for j in xrange(i + 1): if adaptive: wj = w[j] else: wj = weights ti = tapered_spectra[i] tj = tapered_spectra[j] csdfs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides) upper_idc = triu_indices(M, k=1) lower_idc = tril_indices(M, k=-1) csdfs[upper_idc] = csdfs[lower_idc].conj() if sides == 'onesided': freqs = np.linspace(0, Fs / 2, N / 2 + 1) else: freqs = np.linspace(0, Fs, N, endpoint=False) return freqs, csdfs
def multi_taper_psd(s, Fs=2 * np.pi, BW=None, adaptive=False, jackknife=True, low_bias=True, sides='default', NFFT=None): """Returns an estimate of the PSD function of s using the multitaper method. If the NW product, or the BW and Fs in Hz are not specified by the user, a bandwidth of 4 times the fundamental frequency, corresponding to NW = 4 will be used. Parameters ---------- s : ndarray An array of sampled random processes, where the time axis is assumed to be on the last axis Fs: float Sampling rate of the signal BW: float The bandwidth of the windowing function will determine the number tapers to use. This parameters represents trade-off between frequency resolution (lower main lobe BW for the taper) and variance reduction (higher BW and number of averaged estimates). adaptive : {True/False} Use an adaptive weighting routine to combine the PSD estimates of different tapers. jackknife : {True/False} Use the jackknife method to make an estimate of the PSD variance at each point. low_bias : {True/False} Rather than use 2NW tapers, only use the tapers that have better than 90% spectral concentration within the bandwidth (still using a maximum of 2NW tapers) sides : str (optional) [ 'default' | 'onesided' | 'twosided' ] This determines which sides of the spectrum to return. For complex-valued inputs, the default is two-sided, for real-valued inputs, default is one-sided Indicates whether to return a one-sided or two-sided Returns ------- (freqs, psd_est, var_or_nu) : ndarrays The first two arrays are the frequency points vector and the estimatated PSD. The last returned array differs depending on whether the jackknife was used. It is either * The jackknife estimated variance of the log-psd, OR * The degrees of freedom in a chi2 model of how the estimated PSD is distributed about the true log-PSD (this is either 2*floor(2*NW), or calculated from adaptive weights) """ # have last axis be time series for now N = s.shape[-1] if not NFFT else NFFT rest_of_dims = s.shape[:-1] s = s.reshape(int(np.product(rest_of_dims)), N) # de-mean this sucker s = utils.remove_bias(s, axis=-1) # Get the number of tapers from the sampling rate and the bandwidth: if BW is not None: NW = BW / (2 * Fs) * N else: NW = 4 Kmax = int(2 * NW) dpss, eigs = dpss_windows(N, NW, Kmax) if low_bias: keepers = (eigs > 0.9) dpss = dpss[keepers] eigs = eigs[keepers] Kmax = len(dpss) # if the time series is a complex vector, a one sided PSD is invalid: if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided': sides = 'twosided' elif sides in ('default', 'onesided'): sides = 'onesided' sig_sl = [slice(None)] * len(s.shape) sig_sl.insert(-1, np.newaxis) # tapered.shape is (..., Kmax, N) tapered = s[sig_sl] * dpss # Find the direct spectral estimators S_k(f) for k tapered signals.. # don't normalize the periodograms by 1/N as normal.. since the taper # windows are orthonormal, they effectively scale the signal by 1/N # XXX: scipy fft is faster tapered_spectra = fftpack.fft(tapered) last_freq = N / 2 + 1 if sides == 'onesided' else N # degrees of freedom at each timeseries, at each freq nu = np.empty((s.shape[0], last_freq)) if adaptive: weights = np.empty(tapered_spectra.shape[:-1] + (last_freq,)) for i in xrange(s.shape[0]): weights[i], nu[i] = utils.adaptive_weights( tapered_spectra[i], eigs, sides=sides ) else: # let the weights simply be the square-root of the eigenvalues. # repeat these values across all n_chan channels of data n_chan = tapered.shape[0] weights = np.tile(np.sqrt(eigs), n_chan).reshape(n_chan, Kmax, 1) nu.fill(2 * Kmax) if jackknife: jk_var = np.empty_like(nu) for i in xrange(s.shape[0]): jk_var[i] = utils.jackknifed_sdf_variance( tapered_spectra[i], eigs, sides=sides, adaptive=adaptive ) # Compute the unbiased spectral estimator for S(f) as the sum of # the S_k(f) weighted by the function w_k(f)**2, all divided by the # sum of the w_k(f)**2 over k # 1st, roll the tapers axis forward tapered_spectra = np.rollaxis(tapered_spectra, 1, start=0) weights = np.rollaxis(weights, 1, start=0) sdf_est = mtm_cross_spectrum( tapered_spectra, tapered_spectra, weights, sides=sides ) if sides == 'onesided': freqs = np.linspace(0, Fs / 2, N / 2 + 1) else: freqs = np.linspace(0, Fs, N, endpoint=False) out_shape = rest_of_dims + (len(freqs),) sdf_est.shape = out_shape # XXX: always return nu and jk_var if jackknife: jk_var.shape = out_shape return freqs, sdf_est, jk_var else: nu.shape = out_shape return freqs, sdf_est, nu
def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default', normalize=True): """Takes an N-point periodogram estimate of all the cross spectral density functions between rows of s. The number of points N, or a precomputed FFT Sk may be provided. By default, the CSD function returned is normalized so that the integral of the PSD is equal to the mean squared amplitude (mean energy) of s (see Notes). Parameters --------- s : ndarray Signals for which to estimate the CSD, time dimension in the last axis Fs: float (optional) The sampling rate. Defaults to 2*pi Sk : ndarray (optional) Precomputed FFT of rows of s NFFT : int (optional) Indicates an N-point FFT where N != s.shape[-1] sides : str (optional) [ 'default' | 'onesided' | 'twosided' ] This determines which sides of the spectrum to return. For complex-valued inputs, the default is two-sided, for real-valued inputs, default is one-sided Indicates whether to return a one-sided or two-sided normalize : boolean (optional) Normalizes the PSD Returns ------- freqs, csd_est : ndarrays The estimatated CSD and the frequency points vector. The CSD{i,j}(f) are returned in a square "matrix" of vectors holding Sij(f). For an input array that is reshaped to (M,N), the output is (M,M,N) Notes ----- setting dw = 2*PI/N, then the integral from -PI, PI (or 0,PI) of PSD/(2PI) will be nearly equal to sxy(0), where sxx is the crosscovariance function of s1(n), s2(n). By definition, sxy(0) = E{s1(n)s2*(n)} ~ (s1*s2.conj()).mean() """ s_shape = s.shape s.shape = (np.prod(s_shape[:-1]), s_shape[-1]) # defining an Sk_loc is a little opaque, but it avoids having to # reset the shape of any user-given Sk later on if Sk is not None: Sk_shape = Sk.shape N = Sk.shape[-1] Sk_loc = Sk.reshape(np.prod(Sk_shape[:-1]), N) else: if NFFT is not None: N = NFFT else: N = s.shape[-1] Sk_loc = fftpack.fft(s, n=N) # reset s.shape s.shape = s_shape M = Sk_loc.shape[0] norm = float(s.shape[-1]) # if the time series is a complex vector, a one sided PSD is invalid: if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided': sides = 'twosided' elif sides in ('default', 'onesided'): sides = 'onesided' if sides == 'onesided': # putative Nyquist freq Fn = N / 2 + 1 # last duplicate freq Fl = (N + 1) / 2 csd_mat = np.empty((M, M, Fn), 'D') freqs = np.linspace(0, Fs / 2, Fn) for i in xrange(M): for j in xrange(i + 1): csd_mat[i, j, 0] = Sk_loc[i, 0] * Sk_loc[j, 0].conj() csd_mat[i, j, 1:Fl] = 2 * (Sk_loc[i, 1:Fl] * Sk_loc[j, 1:Fl].conj()) if Fn > Fl: csd_mat[i, j, Fn - 1] = (Sk_loc[i, Fn - 1] * Sk_loc[j, Fn - 1].conj()) else: csd_mat = np.empty((M, M, N), 'D') freqs = np.linspace(0, Fs / 2, N, endpoint=False) for i in xrange(M): for j in xrange(i + 1): csd_mat[i, j] = Sk_loc[i] * Sk_loc[j].conj() if normalize: csd_mat /= norm upper_idc = triu_indices(M, k=1) lower_idc = tril_indices(M, k=-1) csd_mat[upper_idc] = csd_mat[lower_idc].conj() return freqs, csd_mat
def periodogram(s, Fs=2 * np.pi, Sk=None, N=None, sides='default', normalize=True): """Takes an N-point periodogram estimate of the PSD function. The number of points N, or a precomputed FFT Sk may be provided. By default, the PSD function returned is normalized so that the integral of the PSD is equal to the mean squared amplitude (mean energy) of s (see Notes). Parameters ---------- s : ndarray Signal(s) for which to estimate the PSD, time dimension in the last axis Fs: float (optional) The sampling rate. Defaults to 2*pi Sk : ndarray (optional) Precomputed FFT of s N : int (optional) Indicates an N-point FFT where N != s.shape[-1] sides : str (optional) [ 'default' | 'onesided' | 'twosided' ] This determines which sides of the spectrum to return. For complex-valued inputs, the default is two-sided, for real-valued inputs, default is one-sided Indicates whether to return a one-sided or two-sided PSD normalize : boolean (optional, default=True) Normalizes the PSD Returns ------- (f, psd): tuple f: The central frequencies for the frequency bands PSD estimate for each row of s Notes ----- setting dw = 2*PI/N, then the integral from -PI, PI (or 0,PI) of PSD/(2PI) will be nearly equal to sxx(0), where sxx is the autocovariance function of s(n). By definition, sxx(0) = E{s(n)s*(n)} ~ (s*s.conj()).mean() """ if Sk is not None: N = Sk.shape[-1] else: N = s.shape[-1] if not N else N Sk = fftpack.fft(s, n=N) pshape = list(Sk.shape) norm = float(s.shape[-1]) # if the time series is a complex vector, a one sided PSD is invalid: if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided': sides = 'twosided' elif sides in ('default', 'onesided'): sides = 'onesided' if sides == 'onesided': # putative Nyquist freq Fn = N / 2 + 1 # last duplicate freq Fl = (N + 1) / 2 pshape[-1] = Fn P = np.zeros(pshape, 'd') freqs = np.linspace(0, Fs / 2, Fn) P[..., 0] = (Sk[..., 0] * Sk[..., 0].conj()).real P[..., 1:Fl] = 2 * (Sk[..., 1:Fl] * Sk[..., 1:Fl].conj()).real if Fn > Fl: P[..., Fn - 1] = (Sk[..., Fn - 1] * Sk[..., Fn - 1].conj()).real else: P = (Sk * Sk.conj()).real freqs = np.linspace(0, Fs, N, endpoint=False) if normalize: P /= norm return freqs, P
def tapered_spectra(s, tapers, NFFT=None, low_bias=True): """ Compute the tapered spectra of the rows of s. Parameters ---------- s : ndarray, (n_arr, n_pts) An array whose rows are timeseries. tapers : ndarray or container Either the precomputed DPSS tapers, or the pair of parameters (NW, K) needed to compute K tapers of length n_pts. NFFT : int Number of FFT bins to compute low_bias : Boolean If compute DPSS, automatically select tapers corresponding to > 90% energy concentration. Returns ------- t_spectra : ndarray, shaped (n_arr, K, NFFT) The FFT of the tapered sequences in s. First dimension is squeezed out if n_arr is 1. eigvals : ndarray The eigenvalues are also returned if DPSS are calculated here. """ N = s.shape[-1] # XXX: don't allow NFFT < N -- not every implementation is so restrictive! if NFFT is None or NFFT < N: NFFT = N rest_of_dims = s.shape[:-1] M = int(np.product(rest_of_dims)) s = s.reshape(int(np.product(rest_of_dims)), N) # de-mean this sucker s = utils.remove_bias(s, axis=-1) if not isinstance(tapers, np.ndarray): # then tapers is (NW, K) args = (N,) + tuple(tapers) dpss, eigvals = dpss_windows(*args) if low_bias: keepers = (eigvals > 0.9) dpss = dpss[keepers] eigvals = eigvals[keepers] tapers = dpss else: eigvals = None K = tapers.shape[0] sig_sl = [slice(None)] * len(s.shape) sig_sl.insert(len(s.shape) - 1, np.newaxis) # tapered.shape is (M, Kmax, N) tapered = s[sig_sl] * tapers # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking # results of real-valued FFT eats up memory t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1) t_spectra.shape = rest_of_dims + (K, NFFT) if eigvals is None: return t_spectra return t_spectra, eigvals
def tapered_spectra(s, tapers, NFFT=None, low_bias=True): """ Compute the tapered spectra of the rows of s. Parameters ---------- s : ndarray, (n_arr, n_pts) An array whose rows are timeseries. tapers : ndarray or container Either the precomputed DPSS tapers, or the pair of parameters (NW, K) needed to compute K tapers of length n_pts. NFFT : int Number of FFT bins to compute low_bias : Boolean If compute DPSS, automatically select tapers corresponding to > 90% energy concentration. Returns ------- t_spectra : ndarray, shaped (n_arr, K, NFFT) The FFT of the tapered sequences in s. First dimension is squeezed out if n_arr is 1. eigvals : ndarray The eigenvalues are also returned if DPSS are calculated here. """ N = s.shape[-1] # XXX: don't allow NFFT < N -- not every implementation is so restrictive! if NFFT is None or NFFT < N: NFFT = N rest_of_dims = s.shape[:-1] M = int(np.product(rest_of_dims)) s = s.reshape(int(np.product(rest_of_dims)), N) # de-mean this sucker s = utils.remove_bias(s, axis=-1) if not isinstance(tapers, np.ndarray): # then tapers is (NW, K) args = (N, ) + tuple(tapers) dpss, eigvals = dpss_windows(*args) if low_bias: keepers = (eigvals > 0.9) dpss = dpss[keepers] eigvals = eigvals[keepers] tapers = dpss else: eigvals = None K = tapers.shape[0] sig_sl = [slice(None)] * len(s.shape) sig_sl.insert(len(s.shape) - 1, np.newaxis) # tapered.shape is (M, Kmax, N) tapered = s[sig_sl] * tapers # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking # results of real-valued FFT eats up memory t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1) t_spectra.shape = rest_of_dims + (K, NFFT) if eigvals is None: return t_spectra return t_spectra, eigvals