コード例 #1
0
    def coherence(self):
        nrows = self.input.data.shape[0]
        psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
        coh_mat = np.zeros((nrows, nrows, self._L), 'd')

        for i in range(self.input.data.shape[0]):
            for j in range(i):
                sxy = tsa.mtm_cross_spectrum(
                    self.spectra[i],
                    self.spectra[j], (self.weights[i], self.weights[j]),
                    sides='onesided')
                sxx = tsa.mtm_cross_spectrum(self.spectra[i],
                                             self.spectra[i],
                                             self.weights[i],
                                             sides='onesided')
                syy = tsa.mtm_cross_spectrum(self.spectra[j],
                                             self.spectra[j],
                                             self.weights[i],
                                             sides='onesided')
                psd_mat[0, i, j] = sxx
                psd_mat[1, i, j] = syy
                coh_mat[i, j] = np.abs(sxy)**2
                coh_mat[i, j] /= (sxx * syy)

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()

        return coh_mat
コード例 #2
0
ファイル: test_spectral.py プロジェクト: zyq11223/nitime
def test_mtm_lin_combo():
    "Test the functionality of cross and autospectrum MTM combinations"
    spec1 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
    spec2 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
    # test on both broadcasted weights and per-point weights
    for wshape in ((2, 5, 1), (2, 5, 100)):
        weights = np.random.randn(*wshape)
        sides = 'onesided'
        mtm_cross = tsa.mtm_cross_spectrum(spec1,
                                           spec2, (weights[0], weights[1]),
                                           sides=sides)
        npt.assert_(mtm_cross.dtype in np.sctypes['complex'],
                    'Wrong dtype for crossspectrum')
        npt.assert_(len(mtm_cross) == 51, 'Wrong length for halfband spectrum')
        sides = 'twosided'
        mtm_cross = tsa.mtm_cross_spectrum(spec1,
                                           spec2, (weights[0], weights[1]),
                                           sides=sides)
        npt.assert_(
            len(mtm_cross) == 100, 'Wrong length for fullband spectrum')
        sides = 'onesided'
        mtm_auto = tsa.mtm_cross_spectrum(spec1,
                                          spec1,
                                          weights[0],
                                          sides=sides)
        npt.assert_(mtm_auto.dtype in np.sctypes['float'],
                    'Wrong dtype for autospectrum')
        npt.assert_(len(mtm_auto) == 51, 'Wrong length for halfband spectrum')
        sides = 'twosided'
        mtm_auto = tsa.mtm_cross_spectrum(spec1,
                                          spec2,
                                          weights[0],
                                          sides=sides)
        npt.assert_(len(mtm_auto) == 100, 'Wrong length for fullband spectrum')
コード例 #3
0
ファイル: coherence.py プロジェクト: JohnGriffiths/nitime
    def coherence(self):
        nrows = self.input.data.shape[0]
        psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
        coh_mat = np.zeros((nrows, nrows, self._L), 'd')

        for i in range(self.input.data.shape[0]):
            for j in range(i):
                sxy = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[j],
                                           (self.weights[i], self.weights[j]),
                                           sides='onesided')
                sxx = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[i],
                                             self.weights[i],
                                             sides='onesided')
                syy = tsa.mtm_cross_spectrum(self.spectra[j], self.spectra[j],
                                             self.weights[i],
                                             sides='onesided')
                psd_mat[0, i, j] = sxx
                psd_mat[1, i, j] = syy
                coh_mat[i, j] = np.abs(sxy) ** 2
                coh_mat[i, j] /= (sxx * syy)

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()

        return coh_mat
コード例 #4
0
ファイル: test_spectral.py プロジェクト: ilustreous/nitime
def test_mtm_lin_combo():
    "Test the functionality of cross and autospectrum MTM combinations"
    spec1 = np.random.randn(5, 100) + 1j*np.random.randn(5, 100)
    spec2 = np.random.randn(5, 100) + 1j*np.random.randn(5, 100)
    # test on both broadcasted weights and per-point weights
    for wshape in ( (2,5,1), (2,5,100) ):
        weights = np.random.randn(*wshape)
        sides = 'onesided'
        mtm_cross = tsa.mtm_cross_spectrum(
            spec1, spec2, (weights[0], weights[1]), sides=sides
            )
        nt.assert_true(mtm_cross.dtype in np.sctypes['complex'],
               'Wrong dtype for crossspectrum')
        nt.assert_true(len(mtm_cross) == 51,
               'Wrong length for halfband spectrum')
        sides = 'twosided'
        mtm_cross = tsa.mtm_cross_spectrum(
            spec1, spec2, (weights[0], weights[1]), sides=sides
            )
        nt.assert_true (len(mtm_cross) == 100,
               'Wrong length for fullband spectrum')
        sides = 'onesided'
        mtm_auto = tsa.mtm_cross_spectrum(
            spec1, spec1, weights[0], sides=sides
            )
        nt.assert_true(mtm_auto.dtype in np.sctypes['float'],
               'Wrong dtype for autospectrum')
        nt.assert_true(len(mtm_auto) == 51,
               'Wrong length for halfband spectrum')
        sides = 'twosided'
        mtm_auto = tsa.mtm_cross_spectrum(
            spec1, spec2, weights[0], sides=sides
            )
        nt.assert_true(len(mtm_auto) == 100,
               'Wrong length for fullband spectrum')
コード例 #5
0
ファイル: test_spectral.py プロジェクト: arokem/nitime
def test_mtm_cross_spectrum():
    """

    Test the multi-taper cross-spectral estimation. Based on the example in
    doc/examples/multi_taper_coh.py

    """
    NW = 4
    K = 2 * NW - 1

    N = 2 ** 10
    n_reps = 10
    n_freqs = N

    tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)

    est_psd = []
    for k in range(n_reps):
        data, nz, alpha = utils.ar_generator(N=N)
        fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
        # 'one-sided', so multiply by 2:
        psd = 2 * (hz * hz.conj()).real

        tdata = tapers * data

        tspectra = fftpack.fft(tdata)

        L = N / 2 + 1
        sides = 'onesided'
        w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)

        sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
        est_psd.append(sxx)

    fxx = np.mean(est_psd, 0)

    psd_ratio = np.mean(fxx / psd)

    # This is a rather lenient test, making sure that the average ratio is 1 to
    # within an order of magnitude. That is, that they are equal on average:
    npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)

    # Test raising of error in case the inputs don't make sense:
    with pytest.raises(ValueError) as e_info:
        tsa.mtm_cross_spectrum(tspectra, np.r_[tspectra, tspectra], (w, w))
コード例 #6
0
ファイル: test_spectral.py プロジェクト: zyq11223/nitime
def test_mtm_cross_spectrum():
    """

    Test the multi-taper cross-spectral estimation. Based on the example in
    doc/examples/multi_taper_coh.py

    """
    NW = 4
    K = 2 * NW - 1

    N = 2**10
    n_reps = 10
    n_freqs = N

    tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)

    est_psd = []
    for k in range(n_reps):
        data, nz, alpha = utils.ar_generator(N=N)
        fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
        # 'one-sided', so multiply by 2:
        psd = 2 * (hz * hz.conj()).real

        tdata = tapers * data

        tspectra = fftpack.fft(tdata)

        L = N / 2 + 1
        sides = 'onesided'
        w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)

        sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
        est_psd.append(sxx)

    fxx = np.mean(est_psd, 0)

    psd_ratio = np.mean(fxx / psd)

    # This is a rather lenient test, making sure that the average ratio is 1 to
    # within an order of magnitude. That is, that they are equal on average:
    npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)

    # Test raising of error in case the inputs don't make sense:
    with pytest.raises(ValueError) as e_info:
        tsa.mtm_cross_spectrum(tspectra, np.r_[tspectra, tspectra], (w, w))
コード例 #7
0
def jackknifed_variances(tx, ty, eigvals, adaptive=True, deg=True):
    """
    Returns the variance of the admittance (real-part), 
    gain (modulus) and phase of the transfer function and 
    gamma^2 (modulus-squared coherence) between x and y, 
    estimated through jack-knifing the tapered samples in {tx, ty}.

    Parameters
    ----------

    tx : ndarray, (K, L)
       The K complex spectra of tapered timeseries x
    ty : ndarray, (K, L)
       The K complex spectra of tapered timeseries y
    eigvals : ndarray (K,)
       The eigenvalues associated with the K DPSS tapers

    Returns
    -------

    jk_var : dictionary of ndarrays 
       (entries are 'admittance', 'gain', 'phase', 
       'magnitude_squared_coherence')
       The variance computed in the transformed domain
    """

    K = tx.shape[0]

    # calculate leave-one-out estimates of the admittance
    jk_admittance = []
    jk_gain = []
    jk_phase = []
    jk_magnitude_squared_coherence = []
    sides = 'onesided'
    all_orders = set(range(K))

    import nitime.algorithms as alg

    # get the leave-one-out estimates
    for i in range(K):
        items = list(all_orders.difference([i]))
        tx_i = np.take(tx, items, axis=0)
        ty_i = np.take(ty, items, axis=0)
        eigs_i = np.take(eigvals, items)
        if adaptive:
            wx, _ = utils.adaptive_weights(tx_i, eigs_i, sides=sides)
            wy, _ = utils.adaptive_weights(ty_i, eigs_i, sides=sides)
        else:
            wx = wy = eigs_i[:, None]
        # The CSD
        sxy_i = alg.mtm_cross_spectrum(tx_i, ty_i, (wx, wy), sides=sides)
        # The PSDs
        sxx_i = alg.mtm_cross_spectrum(tx_i, tx_i, wx, sides=sides)
        syy_i = alg.mtm_cross_spectrum(ty_i, ty_i, wy, sides=sides)

        # these are the Zr_i samples
        Z = sxy_i / syy_i
        jk_admittance.append(np.real(Z))
        jk_gain.append(np.absolute(Z))
        jk_phase.append(np.angle(Z, deg=deg))
        jk_magnitude_squared_coherence.append(
            np.abs(sxy_i)**2 / (sxx_i * syy_i))

    # The jackknifed variance is equal to
    # (K-1)/K * sum_i ( (x_i - mean(x_i))^2 )
    jk_var = {}
    for (name, jk_variance) in [('admittance', np.array(jk_admittance)),
                                ('gain', np.array(jk_gain)),
                                ('phase', np.array(jk_phase)),
                                ('magnitude_squared_coherence',
                                 np.array(jk_magnitude_squared_coherence))]:
        jk_avg = np.mean(jk_variance, axis=0)
        jk_var[name] = (float(K - 1.) / K) * (np.power(
            (jk_variance - jk_avg), 2.)).sum(axis=0)

    return jk_var
コード例 #8
0
ファイル: multi_taper_coh.py プロジェクト: saifrahmed/nitime
Looping over the ROIs:

"""

for i in xrange(nseq):
    for j in xrange(i):
        """

        We calculate the multi-tapered cross spectrum between each two
        time-series:

        """

        sxy = alg.mtm_cross_spectrum(tspectra[i],
                                     tspectra[j], (w[i], w[j]),
                                     sides='onesided')
        """

        And the individual PSD for each:

        """

        sxx = alg.mtm_cross_spectrum(tspectra[i],
                                     tspectra[i],
                                     w[i],
                                     sides='onesided')
        syy = alg.mtm_cross_spectrum(tspectra[j],
                                     tspectra[j],
                                     w[j],
                                     sides='onesided')
コード例 #9
0
ファイル: multi_taper_coh.py プロジェクト: ilustreous/nitime
Looping over the ROIs:

"""

for i in xrange(nseq):
    for j in xrange(i):

        """

        We calculate the multi-tapered cross spectrum between each two
        time-series:

        """

        sxy = alg.mtm_cross_spectrum(
           tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
         )

        """

        And the individual PSD for each:

        """

        sxx = alg.mtm_cross_spectrum(
           tspectra[i], tspectra[i], w[i], sides='onesided'
           )
        syy = alg.mtm_cross_spectrum(
           tspectra[j], tspectra[j], w[j], sides='onesided'
           )
コード例 #10
0
ファイル: coherence.py プロジェクト: choldgraf/LaSP
def compute_mtcoherence(s1, s2, sample_rate, window_size, bandwidth=15.0, chunk_len_percentage_tolerance=0.30,
                      frequency_cutoff=None, tanh_transform=False, debug=False):
    """
        Computing the multi-taper coherence between signals s1 and s2. To do so, the signals are broken up into segments of length
        specified by window_size. Then the multi-taper coherence is computed between each segment. The mean coherence
        is computed across segments, and an estimate of the coherence variance is computed across segments.

        sample_rate: the sample rate in Hz of s1 and s2

        window_size: size of the segments in seconds

        bandwidth: related to the # of tapers used to compute the spectral density. The higher the bandwidth, the more tapers.

        chunk_len_percentage_tolerance: If there are leftover segments whose lengths are less than window_size, use them
            if they comprise at least the fraction of window_size specified by chunk_len_percentage_tolerance

        frequency_cutoff: the frequency at which to cut off the coherence when computing the normal mutual information

        tanh_transform: whether to transform the coherences when computing the upper and lower bounds, supposedly
            improves the estimate of variance.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    sample_length_bins = min(len(s1), int(window_size * sample_rate))

    #compute DPSS tapers for signals
    NW = int(window_size*bandwidth)
    K = 2*NW - 1
    #print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers,eigs = ntalg.dpss_windows(sample_length_bins, NW, K)
    if debug:
        print '[compute_coherence] bandwidth=%0.1f, # of tapers: %d' % (bandwidth, len(eigs))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s1) / float(sample_length_bins)))
    nleft = len(s1) % sample_length_bins
    if nleft > 0:
        nchunks += 1
    #print 'sample_length_bins=%d, # of chunks:%d, # samples in last chunk: %d' % (sample_length_bins, nchunks, nleft)
    coherence_estimates = list()
    for k in range(nchunks):
        s = k*sample_length_bins
        e = min(len(s1), s + sample_length_bins)
        chunk_len = e - s
        chunk_percentage = chunk_len / float(sample_length_bins)
        if chunk_percentage < chunk_len_percentage_tolerance:
            #don't compute coherence for a chunk whose length is less than a certain percentage of sample_length_bins
            continue
        s1_chunk = np.zeros([sample_length_bins])
        s2_chunk = np.zeros([sample_length_bins])
        s1_chunk[:chunk_len] = s1[s:e]
        s2_chunk[:chunk_len] = s2[s:e]

        #taper the signals
        s1_tap = tapers * s1_chunk
        s2_tap = tapers * s2_chunk

        #compute fft of tapered signals
        s1_fft = fftpack.fft(s1_tap, axis=1)
        s2_fft = fftpack.fft(s2_tap, axis=1)

        #compute adaptive weights for each taper
        w1,nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
        w2,nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft, s2_fft, (w1, w2), sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft, s1_fft, w1, sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft, s2_fft, w2, sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)

    if tanh_transform:
        coherence_estimates = np.arctanh(coherence_estimates)

    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates.mean(axis=0)
    #mean subtract and square
    cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
    coherence_variance[:] = (1.0 - 1.0/nchunks) * cv

    if tanh_transform:
        coherence_variance = np.tanh(coherence_variance)
        coherence_mean = np.tanh(coherence_mean)

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = sample_length_bins / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    coherence_lower = coherence_mean - 2*np.sqrt(coherence_variance)
    coherence_upper = coherence_mean + 2*np.sqrt(coherence_variance)

    cdata = CoherenceData(frequency_cutoff=frequency_cutoff)
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
コード例 #11
0
ファイル: coherence.py プロジェクト: choldgraf/LaSP
def compute_coherence_original(s1, s2, sample_rate, bandwidth, jackknife=False, tanh_transform=False):
    """
        An implementation of computing the coherence. Don't use this.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    window_length = len(s1) / sample_rate
    window_length_bins = int(window_length * sample_rate)

    #compute DPSS tapers for signals
    NW = int(window_length*bandwidth)
    K = 2*NW - 1
    print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers,eigs = ntalg.dpss_windows(window_length_bins, NW, K)

    njn = len(eigs)
    jn_indices = [range(njn)]
    #compute jackknife indices
    if jackknife:
        jn_indices = list()
        for i in range(len(eigs)):
            jn = range(len(eigs))
            jn.remove(i)
            jn_indices.append(jn)

    #taper the signals
    s1_tap = tapers * s1
    s2_tap = tapers * s2

    #compute fft of tapered signals
    s1_fft = fftpack.fft(s1_tap, axis=1)
    s2_fft = fftpack.fft(s2_tap, axis=1)

    #compute adaptive weights for each taper
    w1,nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
    w2,nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

    coherence_estimates = list()
    for jn in jn_indices:

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft[jn, :], s2_fft[jn, :], (w1[jn], w2[jn]), sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft[jn, :], s1_fft[jn, :], w1[jn], sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft[jn, :], s2_fft[jn, :], w2[jn], sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)
    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates[0]
    if jackknife:
        coherence_mean = coherence_estimates.mean(axis=0)
        #mean subtract and square
        cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
        coherence_variance[:] = (1.0 - 1.0/njn) * cv

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = minlen / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    cmean = coherence_mean
    coherence_lower = cmean - 2*np.sqrt(coherence_variance)
    coherence_upper = cmean + 2*np.sqrt(coherence_variance)

    cdata = CoherenceData()
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
コード例 #12
0
def multitaper_cross_spectral_estimates(traces,
                                        delta,
                                        NW,
                                        compute_confidence_intervals=True,
                                        confidence_interval=0.95):

    # Define the number of tapers, their values and associated eigenvalues:
    npts = len(traces[0])
    K = 2 * NW - 1
    tapers, eigs = alg.dpss_windows(npts, NW, K)

    # Multiply the data by the tapers, calculate the Fourier transform
    # We multiply the data by the tapers and derive the fourier transform and the
    # magnitude of the squared spectra (the power) for each tapered time-series:
    tdata = tapers[None, :, :] * traces[:, None, :]
    tspectra = fftpack.fft(tdata)

    # The coherency for real sequences is symmetric so only half
    # the spectrum if required
    L = npts // 2 + 1

    if L < npts:
        freqs = np.linspace(0, 1. / (2. * delta), L)
    else:
        freqs = np.linspace(0, 1. / delta, L, endpoint=False)

    # Estimate adaptive weighting of the tapers, based on the data
    # (see Thomsen, 2007; 10.1109/MSP.2007.4286561)
    w = np.empty((2, K, L))
    for i in range(2):
        w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides='onesided')

    # Calculate the multi-tapered cross spectrum
    # and the PSDs for the two time-series:
    sxy = alg.mtm_cross_spectrum(tspectra[0],
                                 tspectra[1], (w[0], w[1]),
                                 sides='onesided')
    sxx = alg.mtm_cross_spectrum(tspectra[0],
                                 tspectra[0],
                                 w[0],
                                 sides='onesided')
    syy = alg.mtm_cross_spectrum(tspectra[1],
                                 tspectra[1],
                                 w[1],
                                 sides='onesided')

    Z = sxy / syy

    spectral_estimates = {}
    spectral_estimates['frequencies'] = freqs
    spectral_estimates['magnitude_squared_coherence'] = np.abs(sxy)**2 / (sxx *
                                                                          syy)
    spectral_estimates['transfer_function'] = Z  # Transfer function
    spectral_estimates['admittance'] = np.real(Z)
    spectral_estimates['gain'] = np.absolute(Z)
    spectral_estimates['phase'] = np.angle(Z, deg=True)

    # Estimate confidence intervals
    if compute_confidence_intervals:
        spectral_estimates['confidence_bounds'] = {}
        c_bnds = [
            0.5 - confidence_interval / 2., 0.5 + confidence_interval / 2.
        ]
        variances = jackknifed_variances(tspectra[0],
                                         tspectra[1],
                                         eigs,
                                         adaptive=True)
        spectral_estimates['confidence_bounds']['admittance'] = [
            spectral_estimates['admittance'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['admittance']),
            spectral_estimates['admittance'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['admittance'])
        ]
        spectral_estimates['confidence_bounds']['gain'] = [
            spectral_estimates['gain'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['gain']),
            spectral_estimates['gain'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['gain'])
        ]
        spectral_estimates['confidence_bounds']['phase'] = [
            spectral_estimates['phase'] +
            dist.t.ppf(c_bnds[0], K - 1) * np.sqrt(variances['phase']),
            spectral_estimates['phase'] +
            dist.t.ppf(c_bnds[1], K - 1) * np.sqrt(variances['phase'])
        ]
        spectral_estimates['confidence_bounds'][
            'magnitude_squared_coherence'] = [
                spectral_estimates['magnitude_squared_coherence'] +
                dist.t.ppf(c_bnds[0], K - 1) *
                np.sqrt(variances['magnitude_squared_coherence']),
                spectral_estimates['magnitude_squared_coherence'] +
                dist.t.ppf(c_bnds[1], K - 1) *
                np.sqrt(variances['magnitude_squared_coherence'])
            ]

    return spectral_estimates
コード例 #13
0
ファイル: utils.py プロジェクト: fperez/nitime
def jackknifed_coh_variance(tx, ty, weights=None, last_freq=None):
    """
    Returns the variance of the coherency between x and y, estimated
    through jack-knifing the tapered samples in {tx, ty}.

    Parameters
    ----------

    tx: ndarray, (K, L)
       The K complex spectra of tapered timeseries x
    ty: ndarray, (K, L)
       The K complex spectra of tapered timeseries y
    weights: ndarray, or sequence-of-ndarrays 2 x (K, [N]), optional
       The weights to use for combining the K spectra in tx and ty
    last_freq: int, optional
       The last frequency for which to compute variance (e.g., if only
       computing half of the coherence spectrum)

    Returns
    -------

    jk_var: ndarray
       The variance computed in the transformed domain (see normalize_coherence)
    """

    K = tx.shape[0]
    L = tx.shape[1] if last_freq is None else last_freq
    tx = tx[:,:L]
    ty = ty[:,:L]
    # prepare weights
    if weights is None:
        weights = ( np.ones(K), np.ones(K) )
    if len(weights) != 2:
        raise ValueError('Must provide 2 sets of weights')
    weights_x, weights_y = weights
    if len(weights_x.shape) < 2:
        weights_x = weights_x.reshape(K, 1)
        weights_y = weights_y.reshape(K, 1)
    if weights_x.shape[1] > L:
        weights_x = weights_x[:,:L]
        weights_y = weights_y[:,:L]
    
    # calculate leave-one-out estimates of MSC (magnitude squared coherence)
    jk_coh = np.empty((K, L), 'd')
    
    all_orders = set(range(K))

    import nitime.algorithms as alg

    # get the leave-one-out estimates
    for i in xrange(K):
        items = list(all_orders.difference([i]))
        tx_i = np.take(tx, items, axis=0)
        ty_i = np.take(ty, items, axis=0)
        wx = np.take(weights_x, items, axis=0)
        wy = np.take(weights_y, items, axis=0)
        weights = (wx, wy)
        # The CSD
        sxy_i = alg.mtm_cross_spectrum(tx_i, ty_i, weights)
        # The PSDs
        sxx_i = alg.mtm_cross_spectrum(tx_i, tx_i, weights).real
        syy_i = alg.mtm_cross_spectrum(ty_i, ty_i, weights).real
        # these are the | c_i | samples
        jk_coh[i] = np.abs(sxy_i)
        jk_coh[i] /= np.sqrt(sxx_i * syy_i)

    jk_avg = np.mean(jk_coh, axis=0)
    # now normalize the coherence estimates and the avg
    normalize_coherence(jk_coh, 2*K-2, jk_coh)
    normalize_coherence(jk_avg, 2*K-2, jk_avg)

    jk_var = (jk_coh - jk_avg)
    np.power(jk_var, 2, jk_var)
    jk_var = jk_var.sum(axis=0)

    # Do/Don't use the alternative scaling here??
    f = float(K-1)/K

    jk_var *= f

    return jk_var
コード例 #14
0
ファイル: coherence.py プロジェクト: fedeadolfi/soundsig
def compute_mtcoherence(s1,
                        s2,
                        sample_rate,
                        window_size,
                        bandwidth=15.0,
                        chunk_len_percentage_tolerance=0.30,
                        frequency_cutoff=None,
                        tanh_transform=False,
                        debug=False):
    """
        Computing the multi-taper coherence between signals s1 and s2. To do so, the signals are broken up into segments of length
        specified by window_size. Then the multi-taper coherence is computed between each segment. The mean coherence
        is computed across segments, and an estimate of the coherence variance is computed across segments.

        sample_rate: the sample rate in Hz of s1 and s2

        window_size: size of the segments in seconds

        bandwidth: related to the # of tapers used to compute the spectral density. The higher the bandwidth, the more tapers.

        chunk_len_percentage_tolerance: If there are leftover segments whose lengths are less than window_size, use them
            if they comprise at least the fraction of window_size specified by chunk_len_percentage_tolerance

        frequency_cutoff: the frequency at which to cut off the coherence when computing the normal mutual information

        tanh_transform: whether to transform the coherences when computing the upper and lower bounds, supposedly
            improves the estimate of variance.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    sample_length_bins = min(len(s1), int(window_size * sample_rate))

    #compute DPSS tapers for signals
    NW = int(window_size * bandwidth)
    K = 2 * NW - 1
    #print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers, eigs = ntalg.dpss_windows(sample_length_bins, NW, K)
    if debug:
        print '[compute_coherence] bandwidth=%0.1f, # of tapers: %d' % (
            bandwidth, len(eigs))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s1) / float(sample_length_bins)))
    nleft = len(s1) % sample_length_bins
    if nleft > 0:
        nchunks += 1
    #print 'sample_length_bins=%d, # of chunks:%d, # samples in last chunk: %d' % (sample_length_bins, nchunks, nleft)
    coherence_estimates = list()
    for k in range(nchunks):
        s = k * sample_length_bins
        e = min(len(s1), s + sample_length_bins)
        chunk_len = e - s
        chunk_percentage = chunk_len / float(sample_length_bins)
        if chunk_percentage < chunk_len_percentage_tolerance:
            #don't compute coherence for a chunk whose length is less than a certain percentage of sample_length_bins
            continue
        s1_chunk = np.zeros([sample_length_bins])
        s2_chunk = np.zeros([sample_length_bins])
        s1_chunk[:chunk_len] = s1[s:e]
        s2_chunk[:chunk_len] = s2[s:e]

        #taper the signals
        s1_tap = tapers * s1_chunk
        s2_tap = tapers * s2_chunk

        #compute fft of tapered signals
        s1_fft = fftpack.fft(s1_tap, axis=1)
        s2_fft = fftpack.fft(s2_tap, axis=1)

        #compute adaptive weights for each taper
        w1, nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
        w2, nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft,
                                       s2_fft, (w1, w2),
                                       sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft, s1_fft, w1, sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft, s2_fft, w2, sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)

    if tanh_transform:
        coherence_estimates = np.arctanh(coherence_estimates)

    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates.mean(axis=0)
    #mean subtract and square
    cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
    coherence_variance[:] = (1.0 - 1.0 / nchunks) * cv

    if tanh_transform:
        coherence_variance = np.tanh(coherence_variance)
        coherence_mean = np.tanh(coherence_mean)

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = sample_length_bins / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    coherence_lower = coherence_mean - 2 * np.sqrt(coherence_variance)
    coherence_upper = coherence_mean + 2 * np.sqrt(coherence_variance)

    cdata = CoherenceData(frequency_cutoff=frequency_cutoff)
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
コード例 #15
0
ファイル: coherence.py プロジェクト: fedeadolfi/soundsig
def compute_coherence_original(s1,
                               s2,
                               sample_rate,
                               bandwidth,
                               jackknife=False,
                               tanh_transform=False):
    """
        An implementation of computing the coherence. Don't use this.
    """

    minlen = min(len(s1), len(s2))
    if s1.shape != s2.shape:
        s1 = s1[:minlen]
        s2 = s2[:minlen]

    window_length = len(s1) / sample_rate
    window_length_bins = int(window_length * sample_rate)

    #compute DPSS tapers for signals
    NW = int(window_length * bandwidth)
    K = 2 * NW - 1
    print 'compute_coherence: NW=%d, K=%d' % (NW, K)
    tapers, eigs = ntalg.dpss_windows(window_length_bins, NW, K)

    njn = len(eigs)
    jn_indices = [range(njn)]
    #compute jackknife indices
    if jackknife:
        jn_indices = list()
        for i in range(len(eigs)):
            jn = range(len(eigs))
            jn.remove(i)
            jn_indices.append(jn)

    #taper the signals
    s1_tap = tapers * s1
    s2_tap = tapers * s2

    #compute fft of tapered signals
    s1_fft = fftpack.fft(s1_tap, axis=1)
    s2_fft = fftpack.fft(s2_tap, axis=1)

    #compute adaptive weights for each taper
    w1, nu1 = ntutils.adaptive_weights(s1_fft, eigs, sides='onesided')
    w2, nu2 = ntutils.adaptive_weights(s2_fft, eigs, sides='onesided')

    coherence_estimates = list()
    for jn in jn_indices:

        #compute cross spectral density
        sxy = ntalg.mtm_cross_spectrum(s1_fft[jn, :],
                                       s2_fft[jn, :], (w1[jn], w2[jn]),
                                       sides='onesided')

        #compute individual power spectrums
        sxx = ntalg.mtm_cross_spectrum(s1_fft[jn, :],
                                       s1_fft[jn, :],
                                       w1[jn],
                                       sides='onesided')
        syy = ntalg.mtm_cross_spectrum(s2_fft[jn, :],
                                       s2_fft[jn, :],
                                       w2[jn],
                                       sides='onesided')

        #compute coherence
        coherence = np.abs(sxy)**2 / (sxx * syy)
        coherence_estimates.append(coherence)

    #compute variance
    coherence_estimates = np.array(coherence_estimates)
    coherence_variance = np.zeros([coherence_estimates.shape[1]])
    coherence_mean = coherence_estimates[0]
    if jackknife:
        coherence_mean = coherence_estimates.mean(axis=0)
        #mean subtract and square
        cv = np.sum((coherence_estimates - coherence_mean)**2, axis=0)
        coherence_variance[:] = (1.0 - 1.0 / njn) * cv

    #compute frequencies
    sampint = 1.0 / sample_rate
    L = minlen / 2 + 1
    freq = np.linspace(0, 1 / (2 * sampint), L)

    #compute upper and lower bounds
    cmean = coherence_mean
    coherence_lower = cmean - 2 * np.sqrt(coherence_variance)
    coherence_upper = cmean + 2 * np.sqrt(coherence_variance)

    cdata = CoherenceData()
    cdata.coherence = coherence_mean
    cdata.coherence_lower = coherence_lower
    cdata.coherence_upper = coherence_upper
    cdata.frequency = freq
    cdata.sample_rate = sample_rate

    return cdata
コード例 #16
0
ファイル: array_analysis.py プロジェクト: ProjectISP/ISP
    def __vespa_az(self, st):
        def find_nearest(array, value):

            idx, val = min(enumerate(array), key=lambda x: abs(x[1] - value))
            return idx, val

        sides = 'onesided'
        pi = math.pi
        st.sort()
        n = len(st)
        for i in range(n):
            coords = self.inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        coord = get_geometry(st, coordsys='lonlat', return_center=True)

        tr = st[0]
        win = len(tr.data)
        if (win % 2) == 0:
            nfft = win / 2 + 1
        else:
            nfft = (win + 1) / 2

        nr = st.count()  # number of stations
        delta = st[0].stats.delta
        fs = 1 / delta
        fn = fs / 2
        freq = np.arange(0, fn, fn / nfft)

        value1, freq1 = find_nearest(freq, self.linf)
        value2, freq2 = find_nearest(freq, self.lsup)
        df = value2 - value1
        m = np.zeros((win, nr))

        WW = np.hamming(int(win))
        WW = np.transpose(WW)
        for i in range(nr):
            tr = st[i]
            if self.method == "FK":
                m[:, i] = (tr.data - np.mean(tr.data)) * WW
            else:
                m[:, i] = (tr.data - np.mean(tr.data))
        pdata = np.transpose(m)

        #####Coherence######
        NW = 2  # the time-bandwidth product##Buena seleccion de 2-3
        K = 2 * NW - 1
        tapers, eigs = alg.dpss_windows(win, NW, K)
        tdata = tapers[None, :, :] * pdata[:, None, :]
        tspectra = fftpack.fft(tdata)

        w = np.empty((nr, int(K), int(nfft)))
        for i in range(nr):
            w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)

        Cx = np.ones((nr, nr, df), dtype=np.complex128)

        if self.method == "MTP.COHERENCE":
            for i in range(nr):
                for j in range(nr):
                    sxy = alg.mtm_cross_spectrum(tspectra[i], (tspectra[j]),
                                                 (w[i], w[j]),
                                                 sides='onesided')
                    sxx = alg.mtm_cross_spectrum(tspectra[i],
                                                 tspectra[i],
                                                 w[i],
                                                 sides='onesided')
                    syy = alg.mtm_cross_spectrum(tspectra[j],
                                                 tspectra[j],
                                                 w[j],
                                                 sides='onesided')
                    s = sxy / np.sqrt((sxx * syy))
                    cxcohe = s[value1:value2]
                    Cx[i, j, :] = cxcohe

        ####Calculates Conventional FK-power  ##without normalization
        if self.method == "FK":
            for i in range(nr):
                for j in range(nr):
                    A = np.fft.rfft(m[:, i])
                    B = np.fft.rfft(m[:, j])
                    #Power
                    #out = A * np.conjugate(B)

                    #Relative Power
                    den = np.absolute(A) * np.absolute(np.conjugate(B))
                    out = (A * np.conjugate(B)) / den

                    cxcohe = out[value1:value2]
                    Cx[i, j, :] = cxcohe

        r = np.zeros((nr, 2))
        S = np.zeros((1, 2))
        Pow = np.zeros((360, df))
        for n in range(nr):
            r[n, :] = coord[n][0:2]

        freq = freq[value1:value2]

        rad = np.pi / 180

        slow_range = np.linspace(0, self.slow, 360)

        for j in range(360):

            ang = self.azimuth2mathangle(self.baz)
            S[0, 0] = slow_range[j] * np.cos(rad * ang)
            S[0, 1] = slow_range[j] * np.sin(rad * ang)

            k = (S * r)
            K = np.sum(k, axis=1)
            n = 0
            for f in freq:
                A = np.exp(-1j * 2 * pi * f * K)
                B = np.conjugate(np.transpose(A))
                D = np.matmul(B, Cx[:, :, n]) / nr
                P = np.matmul(D, A) / nr
                Pow[j, n] = np.abs(P)
                n = n + 1

        Pow = np.mean(Pow, axis=1)

        return Pow
コード例 #17
0
ファイル: array_analysis.py プロジェクト: ProjectISP/ISP
    def FKCoherence(self, st, inv, DT, linf, lsup, slim, win_len, sinc,
                    method):
        def find_nearest(array, value):

            idx, val = min(enumerate(array), key=lambda x: abs(x[1] - value))
            return idx, val

        sides = 'onesided'
        pi = math.pi

        smax = slim
        smin = -1 * smax
        Sx = np.arange(smin, smax, sinc)[np.newaxis]
        Sy = np.arange(smin, smax, sinc)[np.newaxis]
        nx = ny = len(Sx[0])
        Sy = np.fliplr(Sy)

        #####Convert start from Greogorian to actual date###############
        Time = DT
        Time = Time - int(Time)
        d = date.fromordinal(int(DT))
        date1 = d.isoformat()
        H = (Time * 24)
        H1 = int(H)  # Horas
        minutes = (H - int(H)) * 60
        minutes1 = int(minutes)
        seconds = (minutes - int(minutes)) * 60
        H1 = str(H1).zfill(2)
        minutes1 = str(minutes1).zfill(2)
        seconds = "%.2f" % seconds
        seconds = str(seconds).zfill(2)
        DATE = date1 + "T" + str(H1) + minutes1 + seconds
        t1 = UTCDateTime(DATE)
        ########End conversion###############################

        st.trim(starttime=t1, endtime=t1 + win_len)
        st.sort()
        n = len(st)
        for i in range(n):
            coords = inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        coord = get_geometry(st, coordsys='lonlat', return_center=True)

        tr = st[0]
        win = len(tr.data)
        if (win % 2) == 0:
            nfft = win / 2 + 1
        else:
            nfft = (win + 1) / 2

        nr = st.count()  # number of stations
        delta = st[0].stats.delta
        fs = 1 / delta
        fn = fs / 2
        freq = np.arange(0, fn, fn / nfft)
        value1, freq1 = find_nearest(freq, linf)
        value2, freq2 = find_nearest(freq, lsup)
        df = value2 - value1
        m = np.zeros((win, nr))

        WW = np.hamming(int(win))
        WW = np.transpose(WW)
        for i in range(nr):
            tr = st[i]
            if method == "FK":
                m[:, i] = (tr.data - np.mean(tr.data)) * WW
            else:
                m[:, i] = (tr.data - np.mean(tr.data))
        pdata = np.transpose(m)

        #####Coherence######
        NW = 2  # the time-bandwidth product##Buena seleccion de 2-3
        K = 2 * NW - 1
        tapers, eigs = alg.dpss_windows(win, NW, K)
        tdata = tapers[None, :, :] * pdata[:, None, :]
        tspectra = fftpack.fft(tdata)

        w = np.empty((nr, int(K), int(nfft)))
        for i in range(nr):
            w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)

        nseq = nr
        L = int(nfft)
        #csd_mat = np.zeros((nseq, nseq, L), 'D')
        #psd_mat = np.zeros((2, nseq, nseq, L), 'd')
        coh_mat = np.zeros((nseq, nseq, L), 'd')
        #coh_var = np.zeros_like(coh_mat)
        Cx = np.ones((nr, nr, df), dtype=np.complex128)

        if method == "MTP.COHERENCE":
            for i in range(nr):
                for j in range(nr):
                    sxy = alg.mtm_cross_spectrum(tspectra[i], (tspectra[j]),
                                                 (w[i], w[j]),
                                                 sides='onesided')
                    sxx = alg.mtm_cross_spectrum(tspectra[i],
                                                 tspectra[i],
                                                 w[i],
                                                 sides='onesided')
                    syy = alg.mtm_cross_spectrum(tspectra[j],
                                                 tspectra[j],
                                                 w[j],
                                                 sides='onesided')
                    s = sxy / np.sqrt((sxx * syy))
                    cxcohe = s[value1:value2]
                    Cx[i, j, :] = cxcohe

        # Calculates Conventional FK-power
        if method == "FK":
            for i in range(nr):
                for j in range(nr):
                    A = np.fft.rfft(m[:, i])
                    B = np.fft.rfft(m[:, j])
                    #Relative Power
                    den = np.absolute(A) * np.absolute(np.conjugate(B))
                    out = (A * np.conjugate(B)) / den
                    cxcohe = out[value1:value2]
                    Cx[i, j, :] = cxcohe

        r = np.zeros((nr, 2), dtype=np.complex128)
        S = np.zeros((1, 2), dtype=np.complex128)
        Pow = np.zeros((len(Sx[0]), len(Sy[0]), df))
        for n in range(nr):
            r[n, :] = coord[n][0:2]

        freq = freq[value1:value2]

        for i in range(ny):
            for j in range(nx):
                S[0, 0] = Sx[0][j]
                S[0, 1] = Sy[0][i]
                k = (S * r)
                K = np.sum(k, axis=1)
                n = 0
                for f in freq:
                    A = np.exp(-1j * 2 * pi * f * K)
                    B = np.conjugate(np.transpose(A))
                    D = np.matmul(B, Cx[:, :, n]) / nr
                    P = np.matmul(D, A) / nr
                    Pow[i, j, n] = np.abs(P)
                    n = n + 1
        Pow = np.mean(Pow, axis=2)
        #Pow = Pow / len(freq)
        Pow = np.fliplr(Pow)
        x = y = np.linspace(smin, smax, nx)

        nn = len(x)
        maximum_power = np.where(Pow == np.amax(Pow))
        Sxpow = (maximum_power[1] - nn / 2) * sinc
        Sypow = (maximum_power[0] - nn / 2) * sinc

        return Pow, Sxpow, Sypow, coord