Пример #1
0
    def estimate(self, signal, sample_rate, start_time, end_time, debug=False):

        slen = len(signal)

        #compute DPSS tapers for signals
        NW = max(1, int((slen / sample_rate) * self.bandwidth))
        K = 2 * NW - 1

        tapers, eigs = ntalg.dpss_windows(slen, NW, K)
        ntapers = len(tapers)
        if debug:
            print(
                '[MultiTaperSpectrumEstimator.estimate] slen=%d, NW=%d, K=%d, bandwidth=%0.1f, ntapers: %d'
                % (slen, NW, K, self.bandwidth, ntapers))

        #compute a set of tapered signals
        s_tap = tapers * signal

        #compute the FFT of each tapered signal
        s_fft = fft(s_tap, axis=1)

        #throw away negative frequencies of the spectrum
        cspec_freq = fftfreq(slen, d=1.0 / sample_rate)
        nz = cspec_freq >= 0.0
        s_fft = s_fft[:, nz]
        flen = nz.sum()
        cspec_freq = cspec_freq[nz]
        #print '(1)cspec_freq.shape=',cspec_freq.shape
        #print '(1)s_fft.shape=',s_fft.shape

        #determine the weights used to combine the tapered signals
        if self.adaptive and ntapers > 1:
            #compute the adaptive weights
            weights, weights_dof = ntutils.adaptive_weights(
                s_fft, eigs, sides='twosided', max_iter=self.max_adaptive_iter)
        else:
            weights = np.ones([ntapers, flen]) / float(ntapers)

        #print '(1)weights.shape=',weights.shape

        def make_spectrum(signal, signal_weights):
            denom = (signal_weights**2).sum(axis=0)
            return (np.abs(signal * signal_weights)**2).sum(axis=0) / denom

        if self.jackknife:
            #do leave-one-out cross validation to estimate the complex mean and standard deviation of the spectrum
            cspec_mean = np.zeros([flen], dtype='complex')
            for k in range(ntapers):
                index = range(ntapers)
                del index[k]
                #compute an estimate of the spectrum using all but the kth weight
                cspec_est = make_spectrum(s_fft[index, :], weights[index, :])
                cspec_diff = cspec_est - cspec_mean
                #do an online update of the mean spectrum
                cspec_mean += cspec_diff / (k + 1)
        else:
            #compute the average complex spectrum weighted across tapers
            cspec_mean = make_spectrum(s_fft, weights)

        return cspec_freq, cspec_mean.squeeze()
Пример #2
0
    def estimate(self, signal, sample_rate, start_time, end_time, debug=False):

        slen = len(signal)

        #compute DPSS tapers for signals
        NW = max(1, int((slen / sample_rate)*self.bandwidth))
        K = 2*NW - 1

        tapers, eigs = ntalg.dpss_windows(slen, NW, K)
        ntapers = len(tapers)
        if debug:
            print '[MultiTaperSpectrumEstimator.estimate] slen=%d, NW=%d, K=%d, bandwidth=%0.1f, ntapers: %d' % (slen, NW, K, self.bandwidth, ntapers)

        #compute a set of tapered signals
        s_tap = tapers * signal

        #compute the FFT of each tapered signal
        s_fft = fft(s_tap, axis=1)

        #throw away negative frequencies of the spectrum
        cspec_freq = fftfreq(slen, d=1.0/sample_rate)
        nz = cspec_freq >= 0.0
        s_fft = s_fft[:, nz]
        flen = nz.sum()
        cspec_freq = cspec_freq[nz]
        #print '(1)cspec_freq.shape=',cspec_freq.shape
        #print '(1)s_fft.shape=',s_fft.shape

        #determine the weights used to combine the tapered signals
        if self.adaptive and ntapers > 1:
            #compute the adaptive weights
            weights,weights_dof = ntutils.adaptive_weights(s_fft, eigs, sides='twosided', max_iter=self.max_adaptive_iter)
        else:
            weights = np.ones([ntapers, flen]) / float(ntapers)

        #print '(1)weights.shape=',weights.shape

        def make_spectrum(signal, signal_weights):
            denom = (signal_weights**2).sum(axis=0)
            return (np.abs(signal * signal_weights)**2).sum(axis=0) / denom

        if self.jackknife:
            #do leave-one-out cross validation to estimate the complex mean and standard deviation of the spectrum
            cspec_mean = np.zeros([flen], dtype='complex')
            for k in range(ntapers):
                index = range(ntapers)
                del index[k]
                #compute an estimate of the spectrum using all but the kth weight
                cspec_est = make_spectrum(s_fft[index, :], weights[index, :])
                cspec_diff = cspec_est - cspec_mean
                #do an online update of the mean spectrum
                cspec_mean += cspec_diff / (k+1)
        else:
            #compute the average complex spectrum weighted across tapers
            cspec_mean = make_spectrum(s_fft, weights)

        return cspec_freq,cspec_mean.squeeze()
Пример #3
0
def test_mtm_cross_spectrum():
    """
    
    Test the multi-taper cross-spectral estimation. Based on the example in
    doc/examples/multi_taper_coh.py

    """ 
    NW = 4
    K = 2 * NW - 1

    N = 2 ** 10
    n_reps = 10
    n_freqs = N

    tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)

    est_psd = []
    for k in xrange(n_reps):
        data,nz,alpha = utils.ar_generator(N=N)
        fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
        # 'one-sided', so multiply by 2:
        psd = 2 * (hz * hz.conj()).real

        tdata = tapers * data

        tspectra = np.fft.fft(tdata)

        L = N / 2 + 1
        sides = 'onesided'
        w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)

        sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
        est_psd.append(sxx)

    fxx = np.mean(est_psd, 0)

    psd_ratio = np.mean(fxx / psd)

    # This is a rather lenient test, making sure that the average ratio is 1 to
    # within an order of magnitude. That is, that they are equal on average:
    npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)

    # Test raising of error in case the inputs don't make sense:
    npt.assert_raises(ValueError,
                      tsa.mtm_cross_spectrum,
                      tspectra,np.r_[tspectra, tspectra],
                      (w, w))
Пример #4
0
def test_mtm_cross_spectrum():
    """

    Test the multi-taper cross-spectral estimation. Based on the example in
    doc/examples/multi_taper_coh.py

    """
    NW = 4
    K = 2 * NW - 1

    N = 2**10
    n_reps = 10
    n_freqs = N

    tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)

    est_psd = []
    for k in range(n_reps):
        data, nz, alpha = utils.ar_generator(N=N)
        fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
        # 'one-sided', so multiply by 2:
        psd = 2 * (hz * hz.conj()).real

        tdata = tapers * data

        tspectra = fftpack.fft(tdata)

        L = N / 2 + 1
        sides = 'onesided'
        w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)

        sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
        est_psd.append(sxx)

    fxx = np.mean(est_psd, 0)

    psd_ratio = np.mean(fxx / psd)

    # This is a rather lenient test, making sure that the average ratio is 1 to
    # within an order of magnitude. That is, that they are equal on average:
    npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)

    # Test raising of error in case the inputs don't make sense:
    npt.assert_raises(ValueError, tsa.mtm_cross_spectrum, tspectra,
                      np.r_[tspectra, tspectra], (w, w))
Пример #5
0
    def weights(self):
        channel_n = self.input.data.shape[0]
        w = np.empty((channel_n, self.df, self._L))

        if self._adaptive:
            for i in xrange(channel_n):
                # this is always a one-sided spectrum?
                w[i] = tsu.adaptive_weights(self.spectra[i], self.eigs, sides="onesided")[0]

        # Set the weights to be the square root of the eigen-values:
        else:
            wshape = [1] * len(self.spectra.shape)
            wshape[0] = channel_n
            wshape[-2] = int(self.df)
            pre_w = np.sqrt(self.eigs) + np.zeros((wshape[0], self.eigs.shape[0]))

            w = pre_w.reshape(*wshape)

        return w
Пример #6
0
    def weights(self):
        channel_n = self.input.data.shape[0]
        w = np.empty((channel_n, self.df, self._L))

        if self._adaptive:
            for i in range(channel_n):
                # this is always a one-sided spectrum?
                w[i] = tsu.adaptive_weights(self.spectra[i],
                                            self.eigs,
                                            sides='onesided')[0]

        # Set the weights to be the square root of the eigen-values:
        else:
            wshape = [1] * len(self.spectra.shape)
            wshape[0] = channel_n
            wshape[-2] = int(self.df)
            pre_w = np.sqrt(self.eigs) + np.zeros(
                (wshape[0], self.eigs.shape[0]))

            w = pre_w.reshape(*wshape)

        return w
Пример #7
0
def multi_taper_psd(s, Fs=2 * np.pi, BW=None,  adaptive=False,
                    jackknife=True, low_bias=True, sides='default', NFFT=None):
    """Returns an estimate of the PSD function of s using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified
    by the user, a bandwidth of 4 times the fundamental frequency,
    corresponding to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
       An array of sampled random processes, where the time axis is assumed to
       be on the last axis

    Fs: float
        Sampling rate of the signal

    BW: float
        The bandwidth of the windowing function will determine the number
        tapers to use. This parameters represents trade-off between frequency
        resolution (lower main lobe BW for the taper) and variance reduction
        (higher BW and number of averaged estimates).

    adaptive : {True/False}
       Use an adaptive weighting routine to combine the PSD estimates of
       different tapers.
    jackknife : {True/False}
       Use the jackknife method to make an estimate of the PSD variance
       at each point.
    low_bias : {True/False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)
    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.
         For complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, psd_est, var_or_nu) : ndarrays
        The first two arrays are the frequency points vector and the
        estimatated PSD. The last returned array differs depending on whether
        the jackknife was used. It is either

        * The jackknife estimated variance of the log-psd, OR
        * The degrees of freedom in a chi2 model of how the estimated
          PSD is distributed about the true log-PSD (this is either
          2*floor(2*NW), or calculated from adaptive weights)
    """
    # have last axis be time series for now
    N = s.shape[-1] if not NFFT else NFFT
    rest_of_dims = s.shape[:-1]

    s = s.reshape(int(np.product(rest_of_dims)), N)
    # de-mean this sucker
    s = utils.remove_bias(s, axis=-1)

    # Get the number of tapers from the sampling rate and the bandwidth:
    if BW is not None:
        NW = BW / (2 * Fs) * N
    else:
        NW = 4

    Kmax = int(2 * NW)

    dpss, eigs = dpss_windows(N, NW, Kmax)
    if low_bias:
        keepers = (eigs > 0.9)
        dpss = dpss[keepers]
        eigs = eigs[keepers]
        Kmax = len(dpss)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    sig_sl = [slice(None)] * len(s.shape)
    sig_sl.insert(-1, np.newaxis)

    # tapered.shape is (..., Kmax, N)
    tapered = s[sig_sl] * dpss
    # Find the direct spectral estimators S_k(f) for k tapered signals..
    # don't normalize the periodograms by 1/N as normal.. since the taper
    # windows are orthonormal, they effectively scale the signal by 1/N

    # XXX: scipy fft is faster
    tapered_spectra = fftpack.fft(tapered)

    last_freq = N / 2 + 1 if sides == 'onesided' else N

    # degrees of freedom at each timeseries, at each freq
    nu = np.empty((s.shape[0], last_freq))
    if adaptive:
        weights = np.empty(tapered_spectra.shape[:-1] + (last_freq,))
        for i in xrange(s.shape[0]):
            weights[i], nu[i] = utils.adaptive_weights(
                tapered_spectra[i], eigs, sides=sides
                )
    else:
        # let the weights simply be the square-root of the eigenvalues.
        # repeat these values across all n_chan channels of data
        n_chan = tapered.shape[0]
        weights = np.tile(np.sqrt(eigs), n_chan).reshape(n_chan, Kmax, 1)
        nu.fill(2 * Kmax)

    if jackknife:
        jk_var = np.empty_like(nu)
        for i in xrange(s.shape[0]):
            jk_var[i] = utils.jackknifed_sdf_variance(
                tapered_spectra[i], eigs, sides=sides, adaptive=adaptive
                )

    # Compute the unbiased spectral estimator for S(f) as the sum of
    # the S_k(f) weighted by the function w_k(f)**2, all divided by the
    # sum of the w_k(f)**2 over k

    # 1st, roll the tapers axis forward
    tapered_spectra = np.rollaxis(tapered_spectra, 1, start=0)
    weights = np.rollaxis(weights, 1, start=0)
    sdf_est = mtm_cross_spectrum(
        tapered_spectra, tapered_spectra, weights, sides=sides
        )

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, N / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, N, endpoint=False)

    out_shape = rest_of_dims + (len(freqs),)
    sdf_est.shape = out_shape
    # XXX: always return nu and jk_var
    if jackknife:
        jk_var.shape = out_shape
        return freqs, sdf_est, jk_var
    else:
        nu.shape = out_shape
        return freqs, sdf_est, nu
Пример #8
0
def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
                    adaptive=False, sides='default', NFFT=None):
    """Returns an estimate of the Cross Spectral Density (CSD) function
    between all (N choose 2) pairs of timeseries in s, using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified by
    the user, a bandwidth of 4 times the fundamental frequency, corresponding
    to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
        An array of sampled random processes, where the time axis is
        assumed to be on the last axis. If ndim > 2, the number of time
        series to compare will still be taken as prod(s.shape[:-1])

    Fs : float, Sampling rate of the signal

    NW : float
        The normalized half-bandwidth of the data tapers, indicating a
        multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4. This parameter is unitless
        and more MATLAB compatible. As an alternative, set the BW
        parameter in Hz. See Notes on bandwidth.

    BW : float
        The sampling-relative bandwidth of the data tapers, in Hz.

    adaptive : {True, False}
       Use adaptive weighting to combine spectra

    low_bias : {True, False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)

    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.  For
         complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, csd_est) : ndarrays
        The estimatated CSD and the frequency points vector.
        The CSD{i,j}(f) are returned in a square "matrix" of vectors
        holding Sij(f). For an input array of (M,N), the output is (M,M,N)

    Notes
    -----

    The bandwidth of the windowing function will determine the number
    tapers to use. This parameters represents trade-off between frequency
    resolution (lower main lobe BW for the taper) and variance reduction
    (higher BW and number of averaged estimates). Typically, the number of
    tapers is calculated as 2x the bandwidth-to-fundamental-frequency
    ratio, as these eigenfunctions have the best energy concentration.

    """
    # have last axis be time series for now
    N = s.shape[-1]
    M = int(np.product(s.shape[:-1]))

    if BW is not None:
        # BW wins in a contest (since it was the original implementation)
        norm_BW = np.round(BW * N / Fs)
        NW = norm_BW / 2.0
    elif NW is None:
        # default NW
        NW = 4
    # (else BW is None and NW is not None) ... all set
    Kmax = int(2 * NW)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    # Find the direct spectral estimators S_k(f) for k tapered signals..
    # don't normalize the periodograms by 1/N as normal.. since the taper
    # windows are orthonormal, they effectively scale the signal by 1/N
    spectra, eigvals = tapered_spectra(
        s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
        )
    NFFT = spectra.shape[-1]
    K = len(eigvals)
    # collapse spectra's shape back down to 3 dimensions
    spectra.shape = (M, K, NFFT)

    # compute the cross-spectral density functions
    last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT

    if adaptive:
        w = np.empty((M, K, last_freq))
        nu = np.empty((M, last_freq))
        for i in range(M):
            w[i], nu[i] = utils.adaptive_weights(
                spectra[i], eigvals, sides=sides
                )
    else:
        weights = np.sqrt(eigvals).reshape(K, 1)

    csd_pairs = np.zeros((M, M, last_freq), 'D')
    for i in range(M):
        if adaptive:
            wi = w[i]
        else:
            wi = weights
        for j in range(i + 1):
            if adaptive:
                wj = w[j]
            else:
                wj = weights
            ti = spectra[i]
            tj = spectra[j]
            csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)

    csdfs = csd_pairs.transpose(1,0,2).conj()
    csdfs += csd_pairs
    diag_idc = (np.arange(M), np.arange(M))
    csdfs[diag_idc] /= 2
    csdfs /= Fs

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, NFFT, endpoint=False)

    return freqs, csdfs
Пример #9
0
def multi_taper_psd(
        s, Fs=2 * np.pi, NW=None, BW=None, adaptive=False,
        jackknife=True, low_bias=True, sides='default', NFFT=None
        ):
    """Returns an estimate of the PSD function of s using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified
    by the user, a bandwidth of 4 times the fundamental frequency,
    corresponding to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
       An array of sampled random processes, where the time axis is assumed to
       be on the last axis

    Fs : float
        Sampling rate of the signal

    NW : float
        The normalized half-bandwidth of the data tapers, indicating a
        multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4. This parameter is unitless
        and more MATLAB compatible. As an alternative, set the BW
        parameter in Hz. See Notes on bandwidth.

    BW : float
        The sampling-relative bandwidth of the data tapers, in Hz.

    adaptive : {True/False}
       Use an adaptive weighting routine to combine the PSD estimates of
       different tapers.

    jackknife : {True/False}
       Use the jackknife method to make an estimate of the PSD variance
       at each point.

    low_bias : {True/False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)

    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.
         For complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, psd_est, var_or_nu) : ndarrays
        The first two arrays are the frequency points vector and the
        estimated PSD. The last returned array differs depending on whether
        the jackknife was used. It is either

        * The jackknife estimated variance of the log-psd, OR
        * The degrees of freedom in a chi2 model of how the estimated
          PSD is distributed about the true log-PSD (this is either
          2*floor(2*NW), or calculated from adaptive weights)

    Notes
    -----

    The bandwidth of the windowing function will determine the number
    tapers to use. This parameters represents trade-off between frequency
    resolution (lower main lobe BW for the taper) and variance reduction
    (higher BW and number of averaged estimates). Typically, the number of
    tapers is calculated as 2x the bandwidth-to-fundamental-frequency
    ratio, as these eigenfunctions have the best energy concentration.

    """
    # have last axis be time series for now
    N = s.shape[-1]
    M = int(np.product(s.shape[:-1]))

    if BW is not None:
        # BW wins in a contest (since it was the original implementation)
        norm_BW = np.round(BW * N / Fs)
        NW = norm_BW / 2.0
    elif NW is None:
        # default NW
        NW = 4
    # (else BW is None and NW is not None) ... all set
    Kmax = int(2 * NW)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    # Find the direct spectral estimators S_k(f) for k tapered signals..
    # don't normalize the periodograms by 1/N as normal.. since the taper
    # windows are orthonormal, they effectively scale the signal by 1/N
    spectra, eigvals = tapered_spectra(
        s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
        )
    NFFT = spectra.shape[-1]
    K = len(eigvals)
    # collapse spectra's shape back down to 3 dimensions
    spectra.shape = (M, K, NFFT)

    last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT

    # degrees of freedom at each timeseries, at each freq
    nu = np.empty((M, last_freq))
    if adaptive:
        weights = np.empty((M, K, last_freq))
        for i in range(M):
            weights[i], nu[i] = utils.adaptive_weights(
                spectra[i], eigvals, sides=sides
                )
    else:
        # let the weights simply be the square-root of the eigenvalues.
        # repeat these values across all n_chan channels of data
        weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)
        nu.fill(2 * K)

    if jackknife:
        jk_var = np.empty_like(nu)
        for i in range(M):
            jk_var[i] = utils.jackknifed_sdf_variance(
                spectra[i], eigvals, sides=sides, adaptive=adaptive
                )

    # Compute the unbiased spectral estimator for S(f) as the sum of
    # the S_k(f) weighted by the function w_k(f)**2, all divided by the
    # sum of the w_k(f)**2 over k

    # 1st, roll the tapers axis forward
    spectra = np.rollaxis(spectra, 1, start=0)
    weights = np.rollaxis(weights, 1, start=0)
    sdf_est = mtm_cross_spectrum(
        spectra, spectra, weights, sides=sides
        )
    sdf_est /= Fs

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, NFFT, endpoint=False)

    out_shape = s.shape[:-1] + (len(freqs),)
    sdf_est.shape = out_shape
    if jackknife:
        jk_var.shape = out_shape
        return freqs, sdf_est, jk_var
    else:
        nu.shape = out_shape
        return freqs, sdf_est, nu
Пример #10
0
def compute_coherence_original(s1,
                               s2,
                               sample_rate,
                               bandwidth,
                               jackknife=False,
                               tanh_transform=False):
    """
        An implementation of computing the coherence. Don't use this.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    window_length = len(s1) / sample_rate
    window_length_bins = int(window_length * sample_rate)

    #compute DPSS tapers for signals
    NW = int(window_length * bandwidth)
    K = 2 * NW - 1
    print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers, eigs = ntalg.dpss_windows(window_length_bins, NW, K)

    njn = len(eigs)
    jn_indices = [range(njn)]
    #compute jackknife indices
    if jackknife:
        jn_indices = list()
        for i in range(len(eigs)):
            jn = range(len(eigs))
            jn.remove(i)
            jn_indices.append(jn)

    #taper the signals
    s1_tap = tapers * s1
    s2_tap = tapers * s2

    #compute fft of tapered signals
    s1_fft = fftpack.fft(s1_tap, axis=1)
    s2_fft = fftpack.fft(s2_tap, axis=1)

    #compute adaptive weights for each taper
    w1, nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
    w2, nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

    coherence_estimates = list()
    for jn in jn_indices:

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft[jn, :],
                                       s2_fft[jn, :], (w1[jn], w2[jn]),
                                       sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft[jn, :],
                                       s1_fft[jn, :],
                                       w1[jn],
                                       sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft[jn, :],
                                       s2_fft[jn, :],
                                       w2[jn],
                                       sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)
    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates[0]
    if jackknife:
        coherence_mean = coherence_estimates.mean(axis=0)
        #mean subtract and square
        cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
        coherence_variance[:] = (1.0 - 1.0 / njn) * cv

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = minlen / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    cmean = coherence_mean
    coherence_lower = cmean - 2 * np.sqrt(coherence_variance)
    coherence_upper = cmean + 2 * np.sqrt(coherence_variance)

    cdata = CoherenceData()
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
Пример #11
0
def compare_weight_methods(spectra, eigvals):
    L = spectra.shape[-1]
    fxf_weights, _ = utils.adaptive_weights_cython(spectra, eigvals, L)
    vec_weights, _ = utils.adaptive_weights(spectra, eigvals, L)
    err = np.abs(fxf_weights - vec_weights)
    return err
Пример #12
0
def compute_coherence_original(s1, s2, sample_rate, bandwidth, jackknife=False, tanh_transform=False):
    """
        An implementation of computing the coherence. Don't use this.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    window_length = len(s1) / sample_rate
    window_length_bins = int(window_length * sample_rate)

    #compute DPSS tapers for signals
    NW = int(window_length*bandwidth)
    K = 2*NW - 1
    print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers,eigs = ntalg.dpss_windows(window_length_bins, NW, K)

    njn = len(eigs)
    jn_indices = [range(njn)]
    #compute jackknife indices
    if jackknife:
        jn_indices = list()
        for i in range(len(eigs)):
            jn = range(len(eigs))
            jn.remove(i)
            jn_indices.append(jn)

    #taper the signals
    s1_tap = tapers * s1
    s2_tap = tapers * s2

    #compute fft of tapered signals
    s1_fft = fftpack.fft(s1_tap, axis=1)
    s2_fft = fftpack.fft(s2_tap, axis=1)

    #compute adaptive weights for each taper
    w1,nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
    w2,nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

    coherence_estimates = list()
    for jn in jn_indices:

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft[jn, :], s2_fft[jn, :], (w1[jn], w2[jn]), sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft[jn, :], s1_fft[jn, :], w1[jn], sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft[jn, :], s2_fft[jn, :], w2[jn], sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)
    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates[0]
    if jackknife:
        coherence_mean = coherence_estimates.mean(axis=0)
        #mean subtract and square
        cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
        coherence_variance[:] = (1.0 - 1.0/njn) * cv

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = minlen / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    cmean = coherence_mean
    coherence_lower = cmean - 2*np.sqrt(coherence_variance)
    coherence_upper = cmean + 2*np.sqrt(coherence_variance)

    cdata = CoherenceData()
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
Пример #13
0
"""
"""

Test 2
------

Now we'll compare multitaper baseband power estimation with regular
Hilbert transform method under more realistic non-narrowband
conditions.

"""

# MT method
xk = nt_alg.tapered_spectra(s_mod, dpss, NFFT=nfft)
w, n = nt_ut.adaptive_weights(xk, eigs, sides='onesided')
mtm_bband = np.sum(2 * (xk[:, fm] * np.sqrt(eigs))[:, None] * dpss, axis=0)

# Hilbert transform method
hb_bband = signal.hilbert(s_mod, N=nfft)[:N]

pp.figure()
pp.subplot(211)
pp.plot(s_mod, 'g')
pp.plot(np.abs(mtm_bband), color='b', linewidth=3)
pp.title('Multitaper Baseband Power')

pp.subplot(212)
pp.plot(s_mod, 'g')
pp.plot(np.abs(hb_bband), color='b', linewidth=3)
pp.title('Hilbert Baseband Power')
def multitaper_cross_spectral_estimates(traces,
                                        delta,
                                        NW,
                                        compute_confidence_intervals=True,
                                        confidence_interval=0.95):

    # Define the number of tapers, their values and associated eigenvalues:
    npts = len(traces[0])
    K = 2 * NW - 1
    tapers, eigs = alg.dpss_windows(npts, NW, K)

    # Multiply the data by the tapers, calculate the Fourier transform
    # We multiply the data by the tapers and derive the fourier transform and the
    # magnitude of the squared spectra (the power) for each tapered time-series:
    tdata = tapers[None, :, :] * traces[:, None, :]
    tspectra = fftpack.fft(tdata)

    # The coherency for real sequences is symmetric so only half
    # the spectrum if required
    L = npts // 2 + 1

    if L < npts:
        freqs = np.linspace(0, 1. / (2. * delta), L)
    else:
        freqs = np.linspace(0, 1. / delta, L, endpoint=False)

    # Estimate adaptive weighting of the tapers, based on the data
    # (see Thomsen, 2007; 10.1109/MSP.2007.4286561)
    w = np.empty((2, K, L))
    for i in range(2):
        w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides='onesided')

    # Calculate the multi-tapered cross spectrum
    # and the PSDs for the two time-series:
    sxy = alg.mtm_cross_spectrum(tspectra[0],
                                 tspectra[1], (w[0], w[1]),
                                 sides='onesided')
    sxx = alg.mtm_cross_spectrum(tspectra[0],
                                 tspectra[0],
                                 w[0],
                                 sides='onesided')
    syy = alg.mtm_cross_spectrum(tspectra[1],
                                 tspectra[1],
                                 w[1],
                                 sides='onesided')

    Z = sxy / syy

    spectral_estimates = {}
    spectral_estimates['frequencies'] = freqs
    spectral_estimates['magnitude_squared_coherence'] = np.abs(sxy)**2 / (sxx *
                                                                          syy)
    spectral_estimates['transfer_function'] = Z  # Transfer function
    spectral_estimates['admittance'] = np.real(Z)
    spectral_estimates['gain'] = np.absolute(Z)
    spectral_estimates['phase'] = np.angle(Z, deg=True)

    # Estimate confidence intervals
    if compute_confidence_intervals:
        spectral_estimates['confidence_bounds'] = {}
        c_bnds = [
            0.5 - confidence_interval / 2., 0.5 + confidence_interval / 2.
        ]
        variances = jackknifed_variances(tspectra[0],
                                         tspectra[1],
                                         eigs,
                                         adaptive=True)
        spectral_estimates['confidence_bounds']['admittance'] = [
            spectral_estimates['admittance'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['admittance']),
            spectral_estimates['admittance'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['admittance'])
        ]
        spectral_estimates['confidence_bounds']['gain'] = [
            spectral_estimates['gain'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['gain']),
            spectral_estimates['gain'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['gain'])
        ]
        spectral_estimates['confidence_bounds']['phase'] = [
            spectral_estimates['phase'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['phase']),
            spectral_estimates['phase'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['phase'])
        ]
        spectral_estimates['confidence_bounds'][
            'magnitude_squared_coherence'] = [
                spectral_estimates['magnitude_squared_coherence'] +
                dist.t.ppf(c_bnds[0], K - 1) *
                np.sqrt(variances['magnitude_squared_coherence']),
                spectral_estimates['magnitude_squared_coherence'] +
                dist.t.ppf(c_bnds[1], K - 1) *
                np.sqrt(variances['magnitude_squared_coherence'])
            ]

    return spectral_estimates
def jackknifed_variances(tx, ty, eigvals, adaptive=True, deg=True):
    """
    Returns the variance of the admittance (real-part), 
    gain (modulus) and phase of the transfer function and 
    gamma^2 (modulus-squared coherence) between x and y, 
    estimated through jack-knifing the tapered samples in {tx, ty}.

    Parameters
    ----------

    tx : ndarray, (K, L)
       The K complex spectra of tapered timeseries x
    ty : ndarray, (K, L)
       The K complex spectra of tapered timeseries y
    eigvals : ndarray (K,)
       The eigenvalues associated with the K DPSS tapers

    Returns
    -------

    jk_var : dictionary of ndarrays 
       (entries are 'admittance', 'gain', 'phase', 
       'magnitude_squared_coherence')
       The variance computed in the transformed domain
    """

    K = tx.shape[0]

    # calculate leave-one-out estimates of the admittance
    jk_admittance = []
    jk_gain = []
    jk_phase = []
    jk_magnitude_squared_coherence = []
    sides = 'onesided'
    all_orders = set(range(K))

    import nitime.algorithms as alg

    # get the leave-one-out estimates
    for i in range(K):
        items = list(all_orders.difference([i]))
        tx_i = np.take(tx, items, axis=0)
        ty_i = np.take(ty, items, axis=0)
        eigs_i = np.take(eigvals, items)
        if adaptive:
            wx, _ = utils.adaptive_weights(tx_i, eigs_i, sides=sides)
            wy, _ = utils.adaptive_weights(ty_i, eigs_i, sides=sides)
        else:
            wx = wy = eigs_i[:, None]
        # The CSD
        sxy_i = alg.mtm_cross_spectrum(tx_i, ty_i, (wx, wy), sides=sides)
        # The PSDs
        sxx_i = alg.mtm_cross_spectrum(tx_i, tx_i, wx, sides=sides)
        syy_i = alg.mtm_cross_spectrum(ty_i, ty_i, wy, sides=sides)

        # these are the Zr_i samples
        Z = sxy_i / syy_i
        jk_admittance.append(np.real(Z))
        jk_gain.append(np.absolute(Z))
        jk_phase.append(np.angle(Z, deg=deg))
        jk_magnitude_squared_coherence.append(
            np.abs(sxy_i)**2 / (sxx_i * syy_i))

    # The jackknifed variance is equal to
    # (K-1)/K * sum_i ( (x_i - mean(x_i))^2 )
    jk_var = {}
    for (name, jk_variance) in [('admittance', np.array(jk_admittance)),
                                ('gain', np.array(jk_gain)),
                                ('phase', np.array(jk_phase)),
                                ('magnitude_squared_coherence',
                                 np.array(jk_magnitude_squared_coherence))]:
        jk_avg = np.mean(jk_variance, axis=0)
        jk_var[name] = (float(K - 1.) / K) * (np.power(
            (jk_variance - jk_avg), 2.)).sum(axis=0)

    return jk_var
Пример #16
0
NW = 4
K = 2*NW-1
tapers, eigs = alg.DPSS_windows(n_samples, NW, 2*NW-1)

tdata = tapers[None,:,:] * pdata[:,None,:]

tspectra = np.fft.fft(tdata)
mag_sqr_spectra = np.abs(tspectra)
np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
# Only compute half the spectrum.. coherence for real sequences is symmetric
L = n_samples/2 + 1
#L = n_samples
w = np.empty( (nseq, K, L) )
for i in xrange(nseq):
   w[i], _ = utils.adaptive_weights(mag_sqr_spectra[i], eigs, L)

# calculate the coherence
csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
for i in xrange(nseq):
   for j in xrange(i):
      sxy = alg.mtm_cross_spectrum(
         tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
         )
      sxx = alg.mtm_cross_spectrum(
         tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided'
         ).real
      syy = alg.mtm_cross_spectrum(
Пример #17
0
def multi_taper_csd(s,
                    Fs=2 * np.pi,
                    NW=None,
                    BW=None,
                    low_bias=True,
                    adaptive=False,
                    sides='default',
                    NFFT=None):
    """Returns an estimate of the Cross Spectral Density (CSD) function
    between all (N choose 2) pairs of timeseries in s, using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified by
    the user, a bandwidth of 4 times the fundamental frequency, corresponding
    to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
        An array of sampled random processes, where the time axis is
        assumed to be on the last axis. If ndim > 2, the number of time
        series to compare will still be taken as prod(s.shape[:-1])

    Fs : float, Sampling rate of the signal

    NW : float
        The normalized half-bandwidth of the data tapers, indicating a
        multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4. This parameter is unitless
        and more MATLAB compatible. As an alternative, set the BW
        parameter in Hz. See Notes on bandwidth.

    BW : float
        The sampling-relative bandwidth of the data tapers, in Hz.

    adaptive : {True, False}
       Use adaptive weighting to combine spectra

    low_bias : {True, False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)

    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.  For
         complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, csd_est) : ndarrays
        The estimatated CSD and the frequency points vector.
        The CSD{i,j}(f) are returned in a square "matrix" of vectors
        holding Sij(f). For an input array of (M,N), the output is (M,M,N)

    Notes
    -----

    The bandwidth of the windowing function will determine the number
    tapers to use. This parameters represents trade-off between frequency
    resolution (lower main lobe BW for the taper) and variance reduction
    (higher BW and number of averaged estimates). Typically, the number of
    tapers is calculated as 2x the bandwidth-to-fundamental-frequency
    ratio, as these eigenfunctions have the best energy concentration.

    """
    # have last axis be time series for now
    N = s.shape[-1]
    M = int(np.product(s.shape[:-1]))

    if BW is not None:
        # BW wins in a contest (since it was the original implementation)
        norm_BW = np.round(BW * N / Fs)
        NW = norm_BW / 2.0
    elif NW is None:
        # default NW
        NW = 4
    # (else BW is None and NW is not None) ... all set
    Kmax = int(2 * NW)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    # Find the direct spectral estimators S_k(f) for k tapered signals..
    # don't normalize the periodograms by 1/N as normal.. since the taper
    # windows are orthonormal, they effectively scale the signal by 1/N
    spectra, eigvals = tapered_spectra(s, (NW, Kmax),
                                       NFFT=NFFT,
                                       low_bias=low_bias)
    NFFT = spectra.shape[-1]
    K = len(eigvals)
    # collapse spectra's shape back down to 3 dimensions
    spectra.shape = (M, K, NFFT)

    # compute the cross-spectral density functions
    last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT

    if adaptive:
        w = np.empty((M, K, last_freq))
        nu = np.empty((M, last_freq))
        for i in range(M):
            w[i], nu[i] = utils.adaptive_weights(spectra[i],
                                                 eigvals,
                                                 sides=sides)
    else:
        weights = np.sqrt(eigvals).reshape(K, 1)

    csd_pairs = np.zeros((M, M, last_freq), 'D')
    for i in range(M):
        if adaptive:
            wi = w[i]
        else:
            wi = weights
        for j in range(i + 1):
            if adaptive:
                wj = w[j]
            else:
                wj = weights
            ti = spectra[i]
            tj = spectra[j]
            csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)

    csdfs = csd_pairs.transpose(1, 0, 2).conj()
    csdfs += csd_pairs
    diag_idc = (np.arange(M), np.arange(M))
    csdfs[diag_idc] /= 2
    csdfs /= Fs

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, NFFT, endpoint=False)

    return freqs, csdfs
Пример #18
0
def multi_taper_psd(s,
                    Fs=2 * np.pi,
                    NW=None,
                    BW=None,
                    adaptive=False,
                    jackknife=True,
                    low_bias=True,
                    sides='default',
                    NFFT=None):
    """Returns an estimate of the PSD function of s using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified
    by the user, a bandwidth of 4 times the fundamental frequency,
    corresponding to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
       An array of sampled random processes, where the time axis is assumed to
       be on the last axis

    Fs : float
        Sampling rate of the signal

    NW : float
        The normalized half-bandwidth of the data tapers, indicating a
        multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4. This parameter is unitless
        and more MATLAB compatible. As an alternative, set the BW
        parameter in Hz. See Notes on bandwidth.

    BW : float
        The sampling-relative bandwidth of the data tapers, in Hz.

    adaptive : {True/False}
       Use an adaptive weighting routine to combine the PSD estimates of
       different tapers.

    jackknife : {True/False}
       Use the jackknife method to make an estimate of the PSD variance
       at each point.

    low_bias : {True/False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)

    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.
         For complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, psd_est, var_or_nu) : ndarrays
        The first two arrays are the frequency points vector and the
        estimated PSD. The last returned array differs depending on whether
        the jackknife was used. It is either

        * The jackknife estimated variance of the log-psd, OR
        * The degrees of freedom in a chi2 model of how the estimated
          PSD is distributed about the true log-PSD (this is either
          2*floor(2*NW), or calculated from adaptive weights)

    Notes
    -----

    The bandwidth of the windowing function will determine the number
    tapers to use. This parameters represents trade-off between frequency
    resolution (lower main lobe BW for the taper) and variance reduction
    (higher BW and number of averaged estimates). Typically, the number of
    tapers is calculated as 2x the bandwidth-to-fundamental-frequency
    ratio, as these eigenfunctions have the best energy concentration.

    """
    # have last axis be time series for now
    N = s.shape[-1]
    M = int(np.product(s.shape[:-1]))

    if BW is not None:
        # BW wins in a contest (since it was the original implementation)
        norm_BW = np.round(BW * N / Fs)
        NW = norm_BW / 2.0
    elif NW is None:
        # default NW
        NW = 4
    # (else BW is None and NW is not None) ... all set
    Kmax = int(2 * NW)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    # Find the direct spectral estimators S_k(f) for k tapered signals..
    # don't normalize the periodograms by 1/N as normal.. since the taper
    # windows are orthonormal, they effectively scale the signal by 1/N
    spectra, eigvals = tapered_spectra(s, (NW, Kmax),
                                       NFFT=NFFT,
                                       low_bias=low_bias)
    NFFT = spectra.shape[-1]
    K = len(eigvals)
    # collapse spectra's shape back down to 3 dimensions
    spectra.shape = (M, K, NFFT)

    last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT

    # degrees of freedom at each timeseries, at each freq
    nu = np.empty((M, last_freq))
    if adaptive:
        weights = np.empty((M, K, last_freq))
        for i in range(M):
            weights[i], nu[i] = utils.adaptive_weights(spectra[i],
                                                       eigvals,
                                                       sides=sides)
    else:
        # let the weights simply be the square-root of the eigenvalues.
        # repeat these values across all n_chan channels of data
        weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)
        nu.fill(2 * K)

    if jackknife:
        jk_var = np.empty_like(nu)
        for i in range(M):
            jk_var[i] = utils.jackknifed_sdf_variance(spectra[i],
                                                      eigvals,
                                                      sides=sides,
                                                      adaptive=adaptive)

    # Compute the unbiased spectral estimator for S(f) as the sum of
    # the S_k(f) weighted by the function w_k(f)**2, all divided by the
    # sum of the w_k(f)**2 over k

    # 1st, roll the tapers axis forward
    spectra = np.rollaxis(spectra, 1, start=0)
    weights = np.rollaxis(weights, 1, start=0)
    sdf_est = mtm_cross_spectrum(spectra, spectra, weights, sides=sides)
    sdf_est /= Fs

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, NFFT, endpoint=False)

    out_shape = s.shape[:-1] + (len(freqs), )
    sdf_est.shape = out_shape
    if jackknife:
        jk_var.shape = out_shape
        return freqs, sdf_est, jk_var
    else:
        nu.shape = out_shape
        return freqs, sdf_est, nu
Пример #19
0
def multi_taper_csd(s, Fs=2 * np.pi, BW=None, low_bias=True,
                    adaptive=False, sides='default'):
    """Returns an estimate of the Cross Spectral Density (CSD) function
    between all (N choose 2) pairs of timeseries in s, using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified by
    the user, a bandwidth of 4 times the fundamental frequency, corresponding
    to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
        An array of sampled random processes, where the time axis is
        assumed to be on the last axis. If ndim > 2, the number of time
        series to compare will still be taken as prod(s.shape[:-1])

    Fs: float, Sampling rate of the signal

    BW: float,
       The bandwidth of the windowing function will determine the number tapers
       to use. This parameters represents trade-off between frequency
       resolution (lower main lobe BW for the taper) and variance reduction
       (higher BW and number of averaged estimates).

    adaptive : {True, False}
       Use adaptive weighting to combine spectra
    low_bias : {True, False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)
    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.  For
         complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, csd_est) : ndarrays
        The estimatated CSD and the frequency points vector.
        The CSD{i,j}(f) are returned in a square "matrix" of vectors
        holding Sij(f). For an input array of (M,N), the output is (M,M,N)
    """
    # have last axis be time series for now
    N = s.shape[-1]
    rest_of = s.shape[:-1]
    M = int(np.product(rest_of))

    s = s.reshape(M, N)
    # de-mean this sucker
    s = utils.remove_bias(s, axis=-1)

    #Get the number of tapers from the sampling rate and the bandwidth:
    if BW is not None:
        NW = BW / (2 * Fs) * N
    else:
        NW = 4

    Kmax = int(2 * NW)

    dpss, eigvals = dpss_windows(N, NW, Kmax)
    if low_bias:
        keepers = (eigvals > 0.9)
        dpss = dpss[keepers]
        eigvals = eigvals[keepers]
        Kmax = len(dpss)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    sig_sl = [slice(None)] * len(s.shape)
    sig_sl.insert(len(s.shape) - 1, np.newaxis)

    # tapered.shape is (M, Kmax, N)
    tapered = s[sig_sl] * dpss

    # compute the y_{i,k}(f)
    tapered_spectra = fftpack.fft(tapered)

    # compute the cross-spectral density functions
    last_freq = N / 2 + 1 if sides == 'onesided' else N

    if adaptive:
        w = np.empty(tapered_spectra.shape[:-1] + (last_freq,))
        nu = np.empty((M, last_freq))
        for i in xrange(M):
            w[i], nu[i] = utils.adaptive_weights(
                tapered_spectra[i], eigvals, sides=sides
                )
    else:
        weights = np.sqrt(eigvals).reshape(Kmax, 1)

    csdfs = np.empty((M, M, last_freq), 'D')
    for i in xrange(M):
        if adaptive:
            wi = w[i]
        else:
            wi = weights
        for j in xrange(i + 1):
            if adaptive:
                wj = w[j]
            else:
                wj = weights
            ti = tapered_spectra[i]
            tj = tapered_spectra[j]
            csdfs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)

    upper_idc = triu_indices(M, k=1)
    lower_idc = tril_indices(M, k=-1)
    csdfs[upper_idc] = csdfs[lower_idc].conj()

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, N / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, N, endpoint=False)

    return freqs, csdfs
Пример #20
0
    def FKCoherence(self, st, inv, DT, linf, lsup, slim, win_len, sinc,
                    method):
        def find_nearest(array, value):

            idx, val = min(enumerate(array), key=lambda x: abs(x[1] - value))
            return idx, val

        sides = 'onesided'
        pi = math.pi

        smax = slim
        smin = -1 * smax
        Sx = np.arange(smin, smax, sinc)[np.newaxis]
        Sy = np.arange(smin, smax, sinc)[np.newaxis]
        nx = ny = len(Sx[0])
        Sy = np.fliplr(Sy)

        #####Convert start from Greogorian to actual date###############
        Time = DT
        Time = Time - int(Time)
        d = date.fromordinal(int(DT))
        date1 = d.isoformat()
        H = (Time * 24)
        H1 = int(H)  # Horas
        minutes = (H - int(H)) * 60
        minutes1 = int(minutes)
        seconds = (minutes - int(minutes)) * 60
        H1 = str(H1).zfill(2)
        minutes1 = str(minutes1).zfill(2)
        seconds = "%.2f" % seconds
        seconds = str(seconds).zfill(2)
        DATE = date1 + "T" + str(H1) + minutes1 + seconds
        t1 = UTCDateTime(DATE)
        ########End conversion###############################

        st.trim(starttime=t1, endtime=t1 + win_len)
        st.sort()
        n = len(st)
        for i in range(n):
            coords = inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        coord = get_geometry(st, coordsys='lonlat', return_center=True)

        tr = st[0]
        win = len(tr.data)
        if (win % 2) == 0:
            nfft = win / 2 + 1
        else:
            nfft = (win + 1) / 2

        nr = st.count()  # number of stations
        delta = st[0].stats.delta
        fs = 1 / delta
        fn = fs / 2
        freq = np.arange(0, fn, fn / nfft)
        value1, freq1 = find_nearest(freq, linf)
        value2, freq2 = find_nearest(freq, lsup)
        df = value2 - value1
        m = np.zeros((win, nr))

        WW = np.hamming(int(win))
        WW = np.transpose(WW)
        for i in range(nr):
            tr = st[i]
            if method == "FK":
                m[:, i] = (tr.data - np.mean(tr.data)) * WW
            else:
                m[:, i] = (tr.data - np.mean(tr.data))
        pdata = np.transpose(m)

        #####Coherence######
        NW = 2  # the time-bandwidth product##Buena seleccion de 2-3
        K = 2 * NW - 1
        tapers, eigs = alg.dpss_windows(win, NW, K)
        tdata = tapers[None, :, :] * pdata[:, None, :]
        tspectra = fftpack.fft(tdata)

        w = np.empty((nr, int(K), int(nfft)))
        for i in range(nr):
            w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)

        nseq = nr
        L = int(nfft)
        #csd_mat = np.zeros((nseq, nseq, L), 'D')
        #psd_mat = np.zeros((2, nseq, nseq, L), 'd')
        coh_mat = np.zeros((nseq, nseq, L), 'd')
        #coh_var = np.zeros_like(coh_mat)
        Cx = np.ones((nr, nr, df), dtype=np.complex128)

        if method == "MTP.COHERENCE":
            for i in range(nr):
                for j in range(nr):
                    sxy = alg.mtm_cross_spectrum(tspectra[i], (tspectra[j]),
                                                 (w[i], w[j]),
                                                 sides='onesided')
                    sxx = alg.mtm_cross_spectrum(tspectra[i],
                                                 tspectra[i],
                                                 w[i],
                                                 sides='onesided')
                    syy = alg.mtm_cross_spectrum(tspectra[j],
                                                 tspectra[j],
                                                 w[j],
                                                 sides='onesided')
                    s = sxy / np.sqrt((sxx * syy))
                    cxcohe = s[value1:value2]
                    Cx[i, j, :] = cxcohe

        # Calculates Conventional FK-power
        if method == "FK":
            for i in range(nr):
                for j in range(nr):
                    A = np.fft.rfft(m[:, i])
                    B = np.fft.rfft(m[:, j])
                    #Relative Power
                    den = np.absolute(A) * np.absolute(np.conjugate(B))
                    out = (A * np.conjugate(B)) / den
                    cxcohe = out[value1:value2]
                    Cx[i, j, :] = cxcohe

        r = np.zeros((nr, 2), dtype=np.complex128)
        S = np.zeros((1, 2), dtype=np.complex128)
        Pow = np.zeros((len(Sx[0]), len(Sy[0]), df))
        for n in range(nr):
            r[n, :] = coord[n][0:2]

        freq = freq[value1:value2]

        for i in range(ny):
            for j in range(nx):
                S[0, 0] = Sx[0][j]
                S[0, 1] = Sy[0][i]
                k = (S * r)
                K = np.sum(k, axis=1)
                n = 0
                for f in freq:
                    A = np.exp(-1j * 2 * pi * f * K)
                    B = np.conjugate(np.transpose(A))
                    D = np.matmul(B, Cx[:, :, n]) / nr
                    P = np.matmul(D, A) / nr
                    Pow[i, j, n] = np.abs(P)
                    n = n + 1
        Pow = np.mean(Pow, axis=2)
        #Pow = Pow / len(freq)
        Pow = np.fliplr(Pow)
        x = y = np.linspace(smin, smax, nx)

        nn = len(x)
        maximum_power = np.where(Pow == np.amax(Pow))
        Sxpow = (maximum_power[1] - nn / 2) * sinc
        Sypow = (maximum_power[0] - nn / 2) * sinc

        return Pow, Sxpow, Sypow, coord
Пример #21
0
the spectrum (the other half is equal):

"""

L = n_samples / 2 + 1
sides = 'onesided'
"""

We estimate adaptive weighting of the tapers, based on the data (see
:ref:`multi-taper-psd` for an explanation and references):

"""

w = np.empty((nseq, K, L))
for i in xrange(nseq):
    w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)
"""

We proceed to calculate the coherence. We initialize empty data containers:

"""

csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
"""

Looping over the ROIs:

"""
Пример #22
0
"""

"""

Test 2
------

Now we'll compare multitaper baseband power estimation with regular
Hilbert transform method under more realistic non-narrowband
conditions.

"""

# MT method
xk = nt_alg.tapered_spectra(s_mod, dpss, NFFT=nfft)
w, n = nt_ut.adaptive_weights(xk, eigs, sides='onesided')
mtm_bband = np.sum( 2 * (xk[:,fm] * np.sqrt(eigs))[:,None] * dpss, axis=0 )

# Hilbert transform method
hb_bband = signal.hilbert(s_mod, N=nfft)[:N]

pp.figure()
pp.subplot(211)
pp.plot(s_mod, 'g')
pp.plot(np.abs(mtm_bband), color='b', linewidth=3)
pp.title('Multitaper Baseband Power')

pp.subplot(212)
pp.plot(s_mod, 'g')
pp.plot(np.abs(hb_bband), color='b', linewidth=3)
pp.title('Hilbert Baseband Power')
Пример #23
0
    def __vespa_az(self, st):
        def find_nearest(array, value):

            idx, val = min(enumerate(array), key=lambda x: abs(x[1] - value))
            return idx, val

        sides = 'onesided'
        pi = math.pi
        st.sort()
        n = len(st)
        for i in range(n):
            coords = self.inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        coord = get_geometry(st, coordsys='lonlat', return_center=True)

        tr = st[0]
        win = len(tr.data)
        if (win % 2) == 0:
            nfft = win / 2 + 1
        else:
            nfft = (win + 1) / 2

        nr = st.count()  # number of stations
        delta = st[0].stats.delta
        fs = 1 / delta
        fn = fs / 2
        freq = np.arange(0, fn, fn / nfft)

        value1, freq1 = find_nearest(freq, self.linf)
        value2, freq2 = find_nearest(freq, self.lsup)
        df = value2 - value1
        m = np.zeros((win, nr))

        WW = np.hamming(int(win))
        WW = np.transpose(WW)
        for i in range(nr):
            tr = st[i]
            if self.method == "FK":
                m[:, i] = (tr.data - np.mean(tr.data)) * WW
            else:
                m[:, i] = (tr.data - np.mean(tr.data))
        pdata = np.transpose(m)

        #####Coherence######
        NW = 2  # the time-bandwidth product##Buena seleccion de 2-3
        K = 2 * NW - 1
        tapers, eigs = alg.dpss_windows(win, NW, K)
        tdata = tapers[None, :, :] * pdata[:, None, :]
        tspectra = fftpack.fft(tdata)

        w = np.empty((nr, int(K), int(nfft)))
        for i in range(nr):
            w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)

        Cx = np.ones((nr, nr, df), dtype=np.complex128)

        if self.method == "MTP.COHERENCE":
            for i in range(nr):
                for j in range(nr):
                    sxy = alg.mtm_cross_spectrum(tspectra[i], (tspectra[j]),
                                                 (w[i], w[j]),
                                                 sides='onesided')
                    sxx = alg.mtm_cross_spectrum(tspectra[i],
                                                 tspectra[i],
                                                 w[i],
                                                 sides='onesided')
                    syy = alg.mtm_cross_spectrum(tspectra[j],
                                                 tspectra[j],
                                                 w[j],
                                                 sides='onesided')
                    s = sxy / np.sqrt((sxx * syy))
                    cxcohe = s[value1:value2]
                    Cx[i, j, :] = cxcohe

        ####Calculates Conventional FK-power  ##without normalization
        if self.method == "FK":
            for i in range(nr):
                for j in range(nr):
                    A = np.fft.rfft(m[:, i])
                    B = np.fft.rfft(m[:, j])
                    #Power
                    #out = A * np.conjugate(B)

                    #Relative Power
                    den = np.absolute(A) * np.absolute(np.conjugate(B))
                    out = (A * np.conjugate(B)) / den

                    cxcohe = out[value1:value2]
                    Cx[i, j, :] = cxcohe

        r = np.zeros((nr, 2))
        S = np.zeros((1, 2))
        Pow = np.zeros((360, df))
        for n in range(nr):
            r[n, :] = coord[n][0:2]

        freq = freq[value1:value2]

        rad = np.pi / 180

        slow_range = np.linspace(0, self.slow, 360)

        for j in range(360):

            ang = self.azimuth2mathangle(self.baz)
            S[0, 0] = slow_range[j] * np.cos(rad * ang)
            S[0, 1] = slow_range[j] * np.sin(rad * ang)

            k = (S * r)
            K = np.sum(k, axis=1)
            n = 0
            for f in freq:
                A = np.exp(-1j * 2 * pi * f * K)
                B = np.conjugate(np.transpose(A))
                D = np.matmul(B, Cx[:, :, n]) / nr
                P = np.matmul(D, A) / nr
                Pow[j, n] = np.abs(P)
                n = n + 1

        Pow = np.mean(Pow, axis=1)

        return Pow
Пример #24
0
"""

L = n_samples / 2 + 1
sides = 'onesided'

"""

We estimate adaptive weighting of the tapers, based on the data (see
:ref:`multi-taper-psd` for an explanation and references):

"""

w = np.empty((nseq, K, L))
for i in xrange(nseq):
    w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)


"""

We proceed to calculate the coherence. We initialize empty data containers:

"""

csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)


"""
Пример #25
0
def compute_mtcoherence(s1, s2, sample_rate, window_size, bandwidth=15.0, chunk_len_percentage_tolerance=0.30,
                      frequency_cutoff=None, tanh_transform=False, debug=False):
    """
        Computing the multi-taper coherence between signals s1 and s2. To do so, the signals are broken up into segments of length
        specified by window_size. Then the multi-taper coherence is computed between each segment. The mean coherence
        is computed across segments, and an estimate of the coherence variance is computed across segments.

        sample_rate: the sample rate in Hz of s1 and s2

        window_size: size of the segments in seconds

        bandwidth: related to the # of tapers used to compute the spectral density. The higher the bandwidth, the more tapers.

        chunk_len_percentage_tolerance: If there are leftover segments whose lengths are less than window_size, use them
            if they comprise at least the fraction of window_size specified by chunk_len_percentage_tolerance

        frequency_cutoff: the frequency at which to cut off the coherence when computing the normal mutual information

        tanh_transform: whether to transform the coherences when computing the upper and lower bounds, supposedly
            improves the estimate of variance.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    sample_length_bins = min(len(s1), int(window_size * sample_rate))

    #compute DPSS tapers for signals
    NW = int(window_size*bandwidth)
    K = 2*NW - 1
    #print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers,eigs = ntalg.dpss_windows(sample_length_bins, NW, K)
    if debug:
        print '[compute_coherence] bandwidth=%0.1f, # of tapers: %d' % (bandwidth, len(eigs))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s1) / float(sample_length_bins)))
    nleft = len(s1) % sample_length_bins
    if nleft > 0:
        nchunks += 1
    #print 'sample_length_bins=%d, # of chunks:%d, # samples in last chunk: %d' % (sample_length_bins, nchunks, nleft)
    coherence_estimates = list()
    for k in range(nchunks):
        s = k*sample_length_bins
        e = min(len(s1), s + sample_length_bins)
        chunk_len = e - s
        chunk_percentage = chunk_len / float(sample_length_bins)
        if chunk_percentage < chunk_len_percentage_tolerance:
            #don't compute coherence for a chunk whose length is less than a certain percentage of sample_length_bins
            continue
        s1_chunk = np.zeros([sample_length_bins])
        s2_chunk = np.zeros([sample_length_bins])
        s1_chunk[:chunk_len] = s1[s:e]
        s2_chunk[:chunk_len] = s2[s:e]

        #taper the signals
        s1_tap = tapers * s1_chunk
        s2_tap = tapers * s2_chunk

        #compute fft of tapered signals
        s1_fft = fftpack.fft(s1_tap, axis=1)
        s2_fft = fftpack.fft(s2_tap, axis=1)

        #compute adaptive weights for each taper
        w1,nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
        w2,nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft, s2_fft, (w1, w2), sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft, s1_fft, w1, sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft, s2_fft, w2, sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)

    if tanh_transform:
        coherence_estimates = np.arctanh(coherence_estimates)

    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates.mean(axis=0)
    #mean subtract and square
    cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
    coherence_variance[:] = (1.0 - 1.0/nchunks) * cv

    if tanh_transform:
        coherence_variance = np.tanh(coherence_variance)
        coherence_mean = np.tanh(coherence_mean)

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = sample_length_bins / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    coherence_lower = coherence_mean - 2*np.sqrt(coherence_variance)
    coherence_upper = coherence_mean + 2*np.sqrt(coherence_variance)

    cdata = CoherenceData(frequency_cutoff=frequency_cutoff)
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
Пример #26
0
def compute_mtcoherence(s1,
                        s2,
                        sample_rate,
                        window_size,
                        bandwidth=15.0,
                        chunk_len_percentage_tolerance=0.30,
                        frequency_cutoff=None,
                        tanh_transform=False,
                        debug=False):
    """
        Computing the multi-taper coherence between signals s1 and s2. To do so, the signals are broken up into segments of length
        specified by window_size. Then the multi-taper coherence is computed between each segment. The mean coherence
        is computed across segments, and an estimate of the coherence variance is computed across segments.

        sample_rate: the sample rate in Hz of s1 and s2

        window_size: size of the segments in seconds

        bandwidth: related to the # of tapers used to compute the spectral density. The higher the bandwidth, the more tapers.

        chunk_len_percentage_tolerance: If there are leftover segments whose lengths are less than window_size, use them
            if they comprise at least the fraction of window_size specified by chunk_len_percentage_tolerance

        frequency_cutoff: the frequency at which to cut off the coherence when computing the normal mutual information

        tanh_transform: whether to transform the coherences when computing the upper and lower bounds, supposedly
            improves the estimate of variance.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    sample_length_bins = min(len(s1), int(window_size * sample_rate))

    #compute DPSS tapers for signals
    NW = int(window_size * bandwidth)
    K = 2 * NW - 1
    #print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers, eigs = ntalg.dpss_windows(sample_length_bins, NW, K)
    if debug:
        print '[compute_coherence] bandwidth=%0.1f, # of tapers: %d' % (
            bandwidth, len(eigs))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s1) / float(sample_length_bins)))
    nleft = len(s1) % sample_length_bins
    if nleft > 0:
        nchunks += 1
    #print 'sample_length_bins=%d, # of chunks:%d, # samples in last chunk: %d' % (sample_length_bins, nchunks, nleft)
    coherence_estimates = list()
    for k in range(nchunks):
        s = k * sample_length_bins
        e = min(len(s1), s + sample_length_bins)
        chunk_len = e - s
        chunk_percentage = chunk_len / float(sample_length_bins)
        if chunk_percentage < chunk_len_percentage_tolerance:
            #don't compute coherence for a chunk whose length is less than a certain percentage of sample_length_bins
            continue
        s1_chunk = np.zeros([sample_length_bins])
        s2_chunk = np.zeros([sample_length_bins])
        s1_chunk[:chunk_len] = s1[s:e]
        s2_chunk[:chunk_len] = s2[s:e]

        #taper the signals
        s1_tap = tapers * s1_chunk
        s2_tap = tapers * s2_chunk

        #compute fft of tapered signals
        s1_fft = fftpack.fft(s1_tap, axis=1)
        s2_fft = fftpack.fft(s2_tap, axis=1)

        #compute adaptive weights for each taper
        w1, nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
        w2, nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft,
                                       s2_fft, (w1, w2),
                                       sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft, s1_fft, w1, sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft, s2_fft, w2, sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)

    if tanh_transform:
        coherence_estimates = np.arctanh(coherence_estimates)

    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates.mean(axis=0)
    #mean subtract and square
    cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
    coherence_variance[:] = (1.0 - 1.0 / nchunks) * cv

    if tanh_transform:
        coherence_variance = np.tanh(coherence_variance)
        coherence_mean = np.tanh(coherence_mean)

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = sample_length_bins / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    coherence_lower = coherence_mean - 2 * np.sqrt(coherence_variance)
    coherence_upper = coherence_mean + 2 * np.sqrt(coherence_variance)

    cdata = CoherenceData(frequency_cutoff=frequency_cutoff)
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata