Exemple #1
0
def cpsd(a, b, nfft, fs, window='hann'):
    """
    Compute the cross power spectral density (CPSD) of the signals *a* and *b*.

    This performs:
    fft(a)*conj(fft(b))
    Note that this is consistent with *np.correlate*'s definition of correlation.
    (The conjugate of D.B. Chelton's definition of correlation.)

    The two signals should be the same length, and should both be real.

    See also:
    psd,cohere

    The units of the spectra is the product of the units of *a* and *b*, divided by the units of fs.
    """
    if np.iscomplexobj(a) or np.iscomplexobj(b):
        raise Exception
    auto_psd = False
    if a is b:
        auto_psd = True
    max_ind = len(a)
    nfft = min([nfft, max_ind])
    repeats = np.fix(2. * max_ind / nfft)
    fs = np.float64(fs)
    if max_ind == nfft:
        repeats = 1
    if window == 'hann':
        wind = np.hanning(nfft)
    elif window is None or window == 1:
        wind = np.ones(nfft)
    fft_inds = slice(1, np.floor(nfft / 2. + 1))
    wght = 2. / (wind ** 2).sum()
    s1 = fft(detrend(a[0:nfft]) * wind)[fft_inds]
    if auto_psd:
        pwr = np.abs(s1) ** 2
    else:
        pwr = s1 * np.conj(fft(detrend(b[0:nfft]) * wind)[fft_inds])
    if repeats - 1:
        step = np.fix((max_ind - nfft) / (repeats - 1))
        for i in range(step, max_ind - nfft + 1, step):
            s1 = fft(detrend(a[i:(i + nfft)]) * wind)[fft_inds]
            if auto_psd:
                pwr += np.abs(s1) ** 2
            else:
                pwr += s1 * \
                    np.conj(fft(detrend(b[i:(i + nfft)]) * wind)[fft_inds])
    pwr *= wght / repeats / fs
    if auto_psd:  # No need to take the abs again.
        return pwr
    return np.abs(pwr)
Exemple #2
0
 def test_detrend_none(self):
     assert mlab.detrend_none(0.) == 0.
     assert mlab.detrend_none(0., axis=1) == 0.
     assert mlab.detrend(0., key="none") == 0.
     assert mlab.detrend(0., key=mlab.detrend_none) == 0.
     for sig in [
             5.5, self.sig_off, self.sig_slope, self.sig_base,
             (self.sig_base + self.sig_slope + self.sig_off).tolist(),
             np.vstack([self.sig_base,  # 2D case.
                        self.sig_base + self.sig_off,
                        self.sig_base + self.sig_slope,
                        self.sig_base + self.sig_off + self.sig_slope]),
             np.vstack([self.sig_base,  # 2D transposed case.
                        self.sig_base + self.sig_off,
                        self.sig_base + self.sig_slope,
                        self.sig_base + self.sig_off + self.sig_slope]).T,
     ]:
         if isinstance(sig, np.ndarray):
             assert_array_equal(mlab.detrend_none(sig), sig)
         else:
             assert mlab.detrend_none(sig) == sig
Exemple #3
0
    def test_detrend_mean_2d(self):
        input = np.vstack([self.sig_off, self.sig_base + self.sig_off])
        target = np.vstack([self.sig_zeros, self.sig_base])
        self.allclose(mlab.detrend_mean(input), target)
        self.allclose(mlab.detrend_mean(input, axis=None), target)
        self.allclose(mlab.detrend_mean(input.T, axis=None).T, target)
        self.allclose(mlab.detrend(input), target)
        self.allclose(mlab.detrend(input, axis=None), target)
        self.allclose(mlab.detrend(input.T, key="constant", axis=None),
                      target.T)

        input = np.vstack([
            self.sig_base, self.sig_base + self.sig_off,
            self.sig_base + self.sig_slope,
            self.sig_base + self.sig_off + self.sig_slope
        ])
        target = np.vstack([
            self.sig_base, self.sig_base, self.sig_base + self.sig_slope_mean,
            self.sig_base + self.sig_slope_mean
        ])
        self.allclose(mlab.detrend_mean(input.T, axis=0), target.T)
        self.allclose(mlab.detrend_mean(input, axis=1), target)
        self.allclose(mlab.detrend_mean(input, axis=-1), target)
        self.allclose(mlab.detrend(input, key="default", axis=1), target)
        self.allclose(mlab.detrend(input.T, key="mean", axis=0), target.T)
        self.allclose(mlab.detrend(input.T, key=mlab.detrend_mean, axis=0),
                      target.T)
Exemple #4
0
def psd(x, NFFT, Fs, noverlap=None):

    # If there is noverlap is None, then there is no overlap between the windows
    if noverlap is None:
        noverlap = 0

    # Make the window the size of the NFFT
    window = np.hanning(NFFT)

    # Make sure the data is a np array
    x = np.asarray(x)

    # zero pad x if the data is less than the segment NFFT
    if len(x) < NFFT:
        n = len(x)
        x = np.resize(x, NFFT)
        x[n:] = 0

    # Apply the rolling window to the data
    Pxx = rolling_windows(x, NFFT, noverlap, axis=0)

    # Detrend the data
    Pxx = mlab.detrend(Pxx, key='none')

    # Reshape the data
    Pxx = Pxx * window.reshape((-1, 1))

    # Compute the fft and only look at the positive frequencies
    Pxx = np.fft.rfft(Pxx, n=NFFT, axis=0)

    # Calculate the magnitude squared
    Pxx = Pxx * np.conj(Pxx)

    # Take the mean of the Pxx
    Pxx = Pxx.mean(axis=1)
    Pxx = Pxx.real

    # Scale the Pxx due to power loss from windowing
    # Scaling factors taken from the documentation to ensure that the graphs looked the same
    if not NFFT % 2:
        slc = slice(1, -1, None)
        # if we have an odd number, just don't scale DC
    else:
        slc = slice(1, None, None)
    Pxx[slc] = Pxx[slc] * 2.
    Pxx = Pxx / Fs
    Pxx = Pxx / (np.abs(window) ** 2).sum()

    # Determine the positive frequencies
    freqs = np.fft.rfftfreq(NFFT, 1 / Fs)

    return Pxx, freqs
Exemple #5
0
def window(x, n, samprate, overlap, normalize=True, KOsmooth=False,
           window_correction=None, KOnormalize=False,
           bandwidth=40, detrend=True, subtractmean=False,
           winlen=201, polyorder=3):
    """
    Windowing borrowed from matplotlib.mlab for specgram (_spectral_helper)
    Applies hanning window (if use 0.5 overlap, amplitudes are ~preserved)
    https://github.com/matplotlib/matplotlib/blob/f92bd013ea8f0f99d2e177fd572b86f3b42bb652/lib/matplotlib/mlab.py#L434

    DIVIDES BY NFFT TO CORRECT AMPLITUDES

    Args:
        x (array): 1xn array data to window
        n (int): length each window should be, in samples (before padding)
        overlap (float): proportion of overlap of windows, should be between 0 and 1
        normalize (bool): if True, will normalize by signal length by dividing by 1/NFFT (1/NFFT = deltat/time length),
            if False, will scale by deltat (1/samprate) to approximate continuous transform
        samprate (float): sampling rate of x, in samples per second
        KOsmooth (bool): If True, will return Konno Ohmachi smoothed spectra with only positive frequencies
        window_correction (str): Apply correction for fourier spectrum to account for windowing. If 'amp', will
            multiply spectrum by 2 to preserve amplitude, if 'energy' will multiply by 1.63
        normalize (bool): If True, KOsmooth will be smoothed linearly, otherwise will be smooth logarithmically
        bandwidth (float): bandwidth for KO smoothing
        detrend (bool): if True, will detrend each window
        subtractmean (bool): if True, will subtract time-averaged mean from
            entire time series before windowing using savgol filter
        winlen
        polyorder

    Returns:
        tmid: time vector taken at midpoints of each window (in sec from 0)
        tstart: time vector taken at beginning of each window (in sec from 0)
        freqs: vector of frequency (Hz), applyFT and applyKOsmooth are False, will return None
        resultF: fourier transform, or smoothed fourier transform of each time window
        resultT: time series of each time window (note, will have hanning window applied)

    """
    if overlap < 0. or overlap > 1.:
        raise Exception('overlap must be between 0 and 1')

    if subtractmean:
        mean1 = savgol_filter(np.copy(x), winlen, polyorder)
        x = np.copy(x) - mean1

    noverlap = int(overlap * n)
    resultT1 = mlab.stride_windows(x, n, noverlap)
    row, col = np.shape(resultT1)
    newsampint = (n-noverlap)/samprate
    tstart = np.linspace(0, (col-1)*newsampint, num=col)
    tmid = tstart + 0.5*(n/samprate)  # shift by half of window length
    NFFT = nextpow2(n)
    if detrend:
        resultT = mlab.detrend(resultT1, key='mean', axis=0)
    else:
        resultT = resultT1
    resultTwin, windowVals = mlab.apply_window(resultT, mlab.window_hanning, axis=0, return_window=True)
    if KOsmooth:
        if normalize:
            resultF = np.fft.rfft(resultTwin, n=NFFT, axis=0)/NFFT
        else:
            resultF = np.fft.rfft(resultTwin, n=NFFT, axis=0)/samprate
        if window_correction == 'amp':  # For hanning window
            resultF *= 2.0
        elif window_correction == 'energy':
            resultF *= 1.63
        freqs = np.fft.rfftfreq(NFFT, 1/samprate)
        resultF = ksmooth(np.abs(resultF.T), freqs, normalize=KOnormalize, bandwidth=bandwidth)
        resultF = resultF.T
    else:
        if normalize:
            resultF = np.fft.fft(resultTwin, n=NFFT, axis=0)/NFFT
        else:
            resultF = np.fft.fft(resultTwin, n=NFFT, axis=0)/samprate
        freqs = np.fft.fftfreq(NFFT, 1/samprate)
        if window_correction == 'amp':
            resultF *= 2.0
        elif window_correction == 'energy':
            resultF *= 1.63
    return tmid, tstart, freqs, resultF, resultT, resultTwin
Exemple #6
0
 def test_detrend_str_linear_1d(self):
     input = self.sig_slope + self.sig_off
     target = self.sig_zeros
     self.allclose(mlab.detrend(input, key="linear"), target)
     self.allclose(mlab.detrend(input, key=mlab.detrend_linear), target)
     self.allclose(mlab.detrend_linear(input.tolist()), target)
Exemple #7
0
                                 tr.stats.starttime)
 except ValueError:
     print "Cannot process station %s, no RESP file given" % tr.stats.station
     continue
 # Cannot process a whole day file, split it in smaller junks
 overlap = s2p(30.0, tr)
 olap = overlap
 samp = 0
 df = tr.stats.sampling_rate
 if trId(tr.stats)[1] != last_id or tr.stats.starttime - last_endtime > 1.0 / df:
     data_buf = np.array([], dtype='float64')
     olap = 0
 while samp < tr.stats.npts:
     data = tr.data[samp:samp + nfft - olap].astype('float64')
     data = np.concatenate((data_buf, data))
     data = detrend(data)
     # Correct for frequency response of instrument
     data = seisSim(data, tr.stats.sampling_rate, paz, inst_sim=inst)
     data /= (paz['sensitivity'] / 1e9)  #V/nm/s correct for overall sensitivity
     data = recStalta(data, s2p(2.5, tr), s2p(10.0, tr))
     picked_values = triggerOnset(data, 3.0, 0.5, max_len=overlap)
     #
     for i, j in picked_values:
          begin = tr.stats.starttime + float(i + samp - olap) / df
          end = tr.stats.starttime + float(j + samp - olap) / df
          f.write("%s,%s,%s\n" % (str(begin), str(end), tr.stats.station))
     olap = overlap # only needed for first time in loop
     samp += nfft - overlap
     data_buf = data[-overlap:]
     print '.', # Progress Bar
 last_endtime, last_id = trId(tr.stats)
Exemple #8
0
# load data
tr = read("china.mseed")[0]
df, npts = (tr.stats.sampling_rate, tr.stats.npts)
tr.data = tr.data.astype('float64') #convert to double

# lowpass at 30s and downsample to 10s
f0 = 1.0/50
tr.data = lowpass(tr.data, f0, df=df, corners=2)
tr.data = tr.data[0::10] #resample at 10Hz
df, npts = (.1, len(tr.data)) #redefine df and npts

# do the fourier transformation
#data = np.loadtxt("china8b.asc",usecols=[0], dtype='float64')
#tr.data -= tr.data.mean()
tr.data = detrend(tr.data, 'linear')
tr.data *= np.hanning(npts)
df = 0.1
fdat = np.fft.rfft(tr.data, n=4*npts) #smooty by pading with zeros
fdat /= abs(fdat).max() #normalize to 1

# get the eigenmodes
eigen = np.loadtxt("eiglst", usecols=[0,1,2,3], converters={1:stringToBool})
# only the S part
ind1 = eigen[:,1].astype(bool)
ind2 = eigen[:,0]
ind = ((ind2 == 0) & ind1) #bitwise comparing for bool arrays
modes = eigen[ind,3]/1000  #normalize, freq given in mHz

# plot the first N points only
N = 4000
Exemple #9
0
     print "Cannot process station %s, no RESP file given" % tr.stats.station
     continue
 # Cannot process a whole day file, split it in smaller junks
 overlap = s2p(30.0, tr)
 olap = overlap
 samp = 0
 df = tr.stats.sampling_rate
 if trId(
         tr.stats
 )[1] != last_id or tr.stats.starttime - last_endtime > 1.0 / df:
     data_buf = np.array([], dtype='float64')
     olap = 0
 while samp < tr.stats.npts:
     data = tr.data[samp:samp + nfft - olap].astype('float64')
     data = np.concatenate((data_buf, data))
     data = detrend(data)
     # Correct for frequency response of instrument
     data = seisSim(data, tr.stats.sampling_rate, paz, inst_sim=inst)
     data /= (paz['sensitivity'] / 1e9
              )  #V/nm/s correct for overall sensitivity
     data = recStalta(data, s2p(2.5, tr), s2p(10.0, tr))
     picked_values = triggerOnset(data, 3.0, 0.5, max_len=overlap)
     #
     for i, j in picked_values:
         begin = tr.stats.starttime + float(i + samp - olap) / df
         end = tr.stats.starttime + float(j + samp - olap) / df
         f.write("%s,%s,%s\n" %
                 (str(begin), str(end), tr.stats.station))
     olap = overlap  # only needed for first time in loop
     samp += nfft - overlap
     data_buf = data[-overlap:]
Exemple #10
0
# load data
tr = read("china.mseed")[0]
df, npts = (tr.stats.sampling_rate, tr.stats.npts)
tr.data = tr.data.astype('float64')  #convert to double

# lowpass at 30s and downsample to 10s
f0 = 1.0 / 50
tr.data = lowpass(tr.data, f0, df=df, corners=2)
tr.data = tr.data[0::10]  #resample at 10Hz
df, npts = (.1, len(tr.data))  #redefine df and npts

# do the fourier transformation
#data = np.loadtxt("china8b.asc",usecols=[0], dtype='float64')
#tr.data -= tr.data.mean()
tr.data = detrend(tr.data, 'linear')
tr.data *= np.hanning(npts)
df = 0.1
fdat = np.fft.rfft(tr.data, n=4 * npts)  #smooty by pading with zeros
fdat /= abs(fdat).max()  #normalize to 1

# get the eigenmodes
eigen = np.loadtxt("eiglst",
                   usecols=[0, 1, 2, 3],
                   converters={1: stringToBool})
# only the S part
ind1 = eigen[:, 1].astype(bool)
ind2 = eigen[:, 0]
ind = ((ind2 == 0) & ind1)  #bitwise comparing for bool arrays
modes = eigen[ind, 3] / 1000  #normalize, freq given in mHz
Exemple #11
0
    def getFFTs(self, x, detrend=mlab.detrend_none,
                window=mlab.window_hanning):
        '''Get array of FFTs corresponding to each realization of `x`.

        Parameters:
        -----------
        x - array_like, (`N`,)
            Signal to be analyzed. Signal is split into several
            realizations, and the FFT of each realization is computed.
            [x] = arbitrary units

        detrend - string
            The function applied to each realization before taking FFT.
            May be [ 'default' | 'constant' | 'mean' | 'linear' | 'none']
            or callable, as specified in :py:func: `csd <matplotlib.mlab.csd>`.

            *Warning*: Naively detrending (even with something as simple as
            `mean` or `linear` detrending) can introduce detrimental artifacts
            into the computed spectrum, so *no* detrending is the default.

        window - callable or ndarray
            The window applied to each realization before taking FFT,
            as specified in :py:func: `csd <matplotlib.mlab.csd>`.

        Returns:
        --------
        Xk - array_like, (L, M, N) where
                L = `len(self.f)` = `(self.Npts_per_real // 2) + 1`,
                M = number of whole ensembles in data record `x`, and
                N = `self.Nreal_per_ens`

            The FFTs of each realization in each ensemble.
            The FFTs are indexed by frequency, ensemble, and realization.

            [Xk] = [x]

        '''
        # Only real-valued signals are expected/supported at the moment
        if np.iscomplexobj(x):
            raise ValueError('`x` must be a real-valued signal!')

        # Determine the number of *whole* ensembles in the data record
        # (Disregard fractional ensemble at the end of the data, if present)
        Nens = np.int(len(x) / self.Npts_per_ens)

        # Determine number of frequencies in 1-sided FFT, noting that
        # `self.Npts_per_real` is constrained to be a power of 2
        Nf = (self.Npts_per_real // 2) + 1

        # Initialize.
        Xk = np.zeros(
            (Nf, Nens, self.Nreal_per_ens),
            dtype='complex')

        # Loop through each ensemble, computing the FFT of each realization
        # via strides for efficient use of memory. (Note that the below
        # procedure closely parallels that of Matplotlib's internal function
        #
        #     :py:func:`_spectral_helper <matplotlib.mlab._spectral_helper>`
        #
        # Here, we use our own implementation so as not to rely on
        # an internal function)
        stride_axis = 0
        for ens in np.arange(Nens):
            # Split the ensemble into realizations
            sl = slice(
                ens * self.Npts_per_ens,
                (ens + 1) * self.Npts_per_ens)

            result = mlab.stride_windows(
                x[sl],
                self.Npts_per_real,
                self.Npts_overlap,
                axis=stride_axis)

            # Detrend each realization
            result = mlab.detrend(
                result,
                detrend,
                axis=stride_axis)

            # Window each realization (power loss compensated outside loop)
            result, windowVals = mlab.apply_window(
                result,
                window,
                axis=stride_axis,
                return_window=True)

            # Finally compute and return the FFT of each realization
            Xk[:, ens, :] = np.fft.rfft(result, axis=stride_axis)

        # Compensate for windowing power loss
        norm = np.sqrt(np.mean((np.abs(windowVals)) ** 2))
        Xk /= norm

        return Xk
Exemple #12
0
        continue
    
    # merging
    try:
        st.merge(0)
    except Exception, e:
        summary.append("Error while merging:")
        summary.append(str(e))
        summary = "\n".join(summary)
        summary += "\n" + "\n".join(("%s=%s" % (k, v) for k, v in PAR.items()))
        open(SUMMARY, "at").write(summary + "\n")
        continue

    # preprocessing, keep original data for plotting at end
    for tr in st:
        tr.data = detrend(tr.data)
    st.simulate(paz_remove="self", paz_simulate=cornFreq2Paz(1.0), remove_sensitivity=False)
    st.sort()
    st_trigger = st.copy()
    st_trigger.filter("bandpass", freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True)
    st.trim(T1, T2)
    st_trigger.trim(T1, T2)
    st_trigger.trigger("recstalta", sta=PAR.STA, lta=PAR.LTA)
    summary.append(str(st))

    # do the triggering
    trigger_list = []
    for tr in st_trigger:
        tr.stats.channel = "recstalta"
        max_len = PAR.MAXLEN * tr.stats.sampling_rate
        trigger_sample_list = triggerOnset(tr.data, PAR.ON, PAR.OFF, max_len=max_len)
Exemple #13
0
        continue

    # merging
    try:
        st.merge(0)
    except Exception, e:
        summary.append("Error while merging:")
        summary.append(str(e))
        summary = "\n".join(summary)
        summary += "\n" + "\n".join(("%s=%s" % (k, v) for k, v in PAR.items()))
        open(SUMMARY, "at").write(summary + "\n")
        continue

    # preprocessing, keep original data for plotting at end
    for tr in st:
        tr.data = detrend(tr.data)
    st.simulate(paz_remove="self",
                paz_simulate=cornFreq2Paz(1.0),
                remove_sensitivity=False)
    st.sort()
    st_trigger = st.copy()
    st_trigger.filter("bandpass",
                      freqmin=PAR.LOW,
                      freqmax=PAR.HIGH,
                      corners=1,
                      zerophase=True)
    st.trim(T1, T2)
    st_trigger.trim(T1, T2)
    st_trigger.trigger("recstalta", sta=PAR.STA, lta=PAR.LTA)
    summary.append(str(st))
plt.plot(time,tr.data)
#print (starttime)                    #identify the start time of the seismogram
plt.title('CAN[LHZ], starting time:2011-03-11T05:46:24.000000Z')
plt.xlabel('Time [s]')
plt.ylabel('Counts')
plt.ylim(-3E4,3E4)
plt.show()

# + {"code_folding": [0]}
# Take a copy of the stream to avoid overwriting the original data
y = st.copy()[0].data                         

# Taper and Detrend Signal
taper_percentage = 0.1                                              # Percentage of tapering applied to signal
y_td = detrend((y* cosine_taper(npts,taper_percentage)), 'linear')  # Taper signal

# Frequency Domain
y_fft = np.fft.rfft(y_td) 

# Zeros padding
y_pad = np.lib.pad(y_td, (92289,92289),'constant',constant_values=(0,0))
y_fftpad= np.fft.rfft(y_pad)

# + {"code_folding": []}
# Plotting parameter
plt.rcParams['figure.figsize'] = 10, 5
plt.rcParams['lines.linewidth'] = 0.5

# Plot Frequency spectrum in [0.2, 1] mHz window
freq = np.linspace(0, fNy, len(y_fft))  
Exemple #15
0
    def getFFTs(self,
                x,
                detrend=mlab.detrend_none,
                window=mlab.window_hanning):
        '''Get array of FFTs corresponding to each realization of `x`.

        Parameters:
        -----------
        x - array_like, (`N`,)
            Signal to be analyzed. Signal is split into several
            realizations, and the FFT of each realization is computed.
            [x] = arbitrary units

        detrend - string
            The function applied to each realization before taking FFT.
            May be [ 'default' | 'constant' | 'mean' | 'linear' | 'none']
            or callable, as specified in :py:func: `csd <matplotlib.mlab.csd>`.

            *Warning*: Naively detrending (even with something as simple as
            `mean` or `linear` detrending) can introduce detrimental artifacts
            into the computed spectrum, so *no* detrending is the default.

        window - callable or ndarray
            The window applied to each realization before taking FFT,
            as specified in :py:func: `csd <matplotlib.mlab.csd>`.

        Returns:
        --------
        Xk - array_like, (L, M, N) where
                L = `len(self.f)` = `(self.Npts_per_real // 2) + 1`,
                M = number of whole ensembles in data record `x`, and
                N = `self.Nreal_per_ens`

            The FFTs of each realization in each ensemble.
            The FFTs are indexed by frequency, ensemble, and realization.

            [Xk] = [x]

        '''
        # Only real-valued signals are expected/supported at the moment
        if np.iscomplexobj(x):
            raise ValueError('`x` must be a real-valued signal!')

        # Determine the number of *whole* ensembles in the data record
        # (Disregard fractional ensemble at the end of the data, if present)
        Nens = np.int(len(x) / self.Npts_per_ens)

        # Determine number of frequencies in 1-sided FFT, noting that
        # `self.Npts_per_real` is constrained to be a power of 2
        Nf = (self.Npts_per_real // 2) + 1

        # Initialize.
        Xk = np.zeros((Nf, Nens, self.Nreal_per_ens), dtype='complex')

        # Loop through each ensemble, computing the FFT of each realization
        # via strides for efficient use of memory. (Note that the below
        # procedure closely parallels that of Matplotlib's internal function
        #
        #     :py:func:`_spectral_helper <matplotlib.mlab._spectral_helper>`
        #
        # Here, we use our own implementation so as not to rely on
        # an internal function)
        stride_axis = 0
        for ens in np.arange(Nens):
            # Split the ensemble into realizations
            sl = slice(ens * self.Npts_per_ens, (ens + 1) * self.Npts_per_ens)

            result = mlab.stride_windows(x[sl],
                                         self.Npts_per_real,
                                         self.Npts_overlap,
                                         axis=stride_axis)

            # Detrend each realization
            result = mlab.detrend(result, detrend, axis=stride_axis)

            # Window each realization (power loss compensated outside loop)
            result, windowVals = mlab.apply_window(result,
                                                   window,
                                                   axis=stride_axis,
                                                   return_window=True)

            # Finally compute and return the FFT of each realization
            Xk[:, ens, :] = np.fft.rfft(result, axis=stride_axis)

        # Compensate for windowing power loss
        norm = np.sqrt(np.mean((np.abs(windowVals))**2))
        Xk /= norm

        return Xk