Exemple #1
0
    def confidence_interval(self):
        """The size of the 1-alpha confidence interval"""
        coh_var = np.zeros(
            (self.input.data.shape[0], self.input.data.shape[0], self._L), 'd')
        for i in range(self.input.data.shape[0]):
            for j in range(i):
                if i != j:
                    coh_var[i, j] = tsu.jackknifed_coh_variance(
                        self.spectra[i],
                        self.spectra[j],
                        self.eigs,
                        adaptive=self._adaptive)

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()

        coh_mat_xform = tsu.normalize_coherence(self.coherence,
                                                2 * self.df - 2)

        lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,
                                        self.df - 1) * np.sqrt(coh_var)
        ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,
                                        self.df - 1) * np.sqrt(coh_var)

        # convert this measure with the normalizing function
        tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)
        tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)

        return ub - lb
Exemple #2
0
    def coherence(self):
        nrows = self.input.data.shape[0]
        psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
        coh_mat = np.zeros((nrows, nrows, self._L), 'd')

        for i in range(self.input.data.shape[0]):
            for j in range(i):
                sxy = tsa.mtm_cross_spectrum(
                    self.spectra[i],
                    self.spectra[j], (self.weights[i], self.weights[j]),
                    sides='onesided')
                sxx = tsa.mtm_cross_spectrum(self.spectra[i],
                                             self.spectra[i],
                                             self.weights[i],
                                             sides='onesided')
                syy = tsa.mtm_cross_spectrum(self.spectra[j],
                                             self.spectra[j],
                                             self.weights[i],
                                             sides='onesided')
                psd_mat[0, i, j] = sxx
                psd_mat[1, i, j] = syy
                coh_mat[i, j] = np.abs(sxy)**2
                coh_mat[i, j] /= (sxx * syy)

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()

        return coh_mat
Exemple #3
0
    def confidence_interval(self):
        """The size of the 1-alpha confidence interval"""
        coh_var = np.zeros((self.input.data.shape[0],
                            self.input.data.shape[0],
                            self._L), 'd')
        for i in range(self.input.data.shape[0]):
            for j in range(i):
                if i != j:
                    coh_var[i, j] = tsu.jackknifed_coh_variance(
                        self.spectra[i],
                        self.spectra[j],
                        self.eigs,
                        adaptive=self._adaptive
                        )

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()

        coh_mat_xform = tsu.normalize_coherence(self.coherence,
                                                2 * self.df - 2)

        lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,
                                        self.df - 1) * np.sqrt(coh_var)
        ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,
                                        self.df - 1) * np.sqrt(coh_var)

        # convert this measure with the normalizing function
        tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)
        tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)

        return ub - lb
Exemple #4
0
    def coherence(self):
        nrows = self.input.data.shape[0]
        psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
        coh_mat = np.zeros((nrows, nrows, self._L), 'd')

        for i in range(self.input.data.shape[0]):
            for j in range(i):
                sxy = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[j],
                                           (self.weights[i], self.weights[j]),
                                           sides='onesided')
                sxx = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[i],
                                             self.weights[i],
                                             sides='onesided')
                syy = tsa.mtm_cross_spectrum(self.spectra[j], self.spectra[j],
                                             self.weights[i],
                                             sides='onesided')
                psd_mat[0, i, j] = sxx
                psd_mat[1, i, j] = syy
                coh_mat[i, j] = np.abs(sxy) ** 2
                coh_mat[i, j] /= (sxx * syy)

        idx = triu_indices(self.input.data.shape[0], 1)
        coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()

        return coh_mat
Exemple #5
0
def multi_taper_csd(s, Fs=2 * np.pi, BW=None, low_bias=True,
                    adaptive=False, sides='default'):
    """Returns an estimate of the Cross Spectral Density (CSD) function
    between all (N choose 2) pairs of timeseries in s, using the multitaper
    method. If the NW product, or the BW and Fs in Hz are not specified by
    the user, a bandwidth of 4 times the fundamental frequency, corresponding
    to NW = 4 will be used.

    Parameters
    ----------
    s : ndarray
        An array of sampled random processes, where the time axis is
        assumed to be on the last axis. If ndim > 2, the number of time
        series to compare will still be taken as prod(s.shape[:-1])

    Fs: float, Sampling rate of the signal

    BW: float,
       The bandwidth of the windowing function will determine the number tapers
       to use. This parameters represents trade-off between frequency
       resolution (lower main lobe BW for the taper) and variance reduction
       (higher BW and number of averaged estimates).

    adaptive : {True, False}
       Use adaptive weighting to combine spectra
    low_bias : {True, False}
       Rather than use 2NW tapers, only use the tapers that have better than
       90% spectral concentration within the bandwidth (still using
       a maximum of 2NW tapers)
    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
         This determines which sides of the spectrum to return.  For
         complex-valued inputs, the default is two-sided, for real-valued
         inputs, default is one-sided Indicates whether to return a one-sided
         or two-sided

    Returns
    -------
    (freqs, csd_est) : ndarrays
        The estimatated CSD and the frequency points vector.
        The CSD{i,j}(f) are returned in a square "matrix" of vectors
        holding Sij(f). For an input array of (M,N), the output is (M,M,N)
    """
    # have last axis be time series for now
    N = s.shape[-1]
    rest_of = s.shape[:-1]
    M = int(np.product(rest_of))

    s = s.reshape(M, N)
    # de-mean this sucker
    s = utils.remove_bias(s, axis=-1)

    #Get the number of tapers from the sampling rate and the bandwidth:
    if BW is not None:
        NW = BW / (2 * Fs) * N
    else:
        NW = 4

    Kmax = int(2 * NW)

    dpss, eigvals = dpss_windows(N, NW, Kmax)
    if low_bias:
        keepers = (eigvals > 0.9)
        dpss = dpss[keepers]
        eigvals = eigvals[keepers]
        Kmax = len(dpss)

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    sig_sl = [slice(None)] * len(s.shape)
    sig_sl.insert(len(s.shape) - 1, np.newaxis)

    # tapered.shape is (M, Kmax, N)
    tapered = s[sig_sl] * dpss

    # compute the y_{i,k}(f)
    tapered_spectra = fftpack.fft(tapered)

    # compute the cross-spectral density functions
    last_freq = N / 2 + 1 if sides == 'onesided' else N

    if adaptive:
        w = np.empty(tapered_spectra.shape[:-1] + (last_freq,))
        nu = np.empty((M, last_freq))
        for i in xrange(M):
            w[i], nu[i] = utils.adaptive_weights(
                tapered_spectra[i], eigvals, sides=sides
                )
    else:
        weights = np.sqrt(eigvals).reshape(Kmax, 1)

    csdfs = np.empty((M, M, last_freq), 'D')
    for i in xrange(M):
        if adaptive:
            wi = w[i]
        else:
            wi = weights
        for j in xrange(i + 1):
            if adaptive:
                wj = w[j]
            else:
                wj = weights
            ti = tapered_spectra[i]
            tj = tapered_spectra[j]
            csdfs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)

    upper_idc = triu_indices(M, k=1)
    lower_idc = tril_indices(M, k=-1)
    csdfs[upper_idc] = csdfs[lower_idc].conj()

    if sides == 'onesided':
        freqs = np.linspace(0, Fs / 2, N / 2 + 1)
    else:
        freqs = np.linspace(0, Fs, N, endpoint=False)

    return freqs, csdfs
Exemple #6
0
def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
                    normalize=True):
    """Takes an N-point periodogram estimate of all the cross spectral
    density functions between rows of s.

    The number of points N, or a precomputed FFT Sk may be provided. By
    default, the CSD function returned is normalized so that the integral of
    the PSD is equal to the mean squared amplitude (mean energy) of s (see
    Notes).

    Parameters
    ---------

    s : ndarray
        Signals for which to estimate the CSD, time dimension in the last axis

    Fs: float (optional)
       The sampling rate. Defaults to 2*pi

    Sk : ndarray (optional)
        Precomputed FFT of rows of s

    NFFT : int (optional)
        Indicates an N-point FFT where N != s.shape[-1]

    sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
        This determines which sides of the spectrum to return.
        For complex-valued inputs, the default is two-sided, for real-valued
        inputs, default is one-sided Indicates whether to return a one-sided
        or two-sided

    normalize : boolean (optional)
        Normalizes the PSD

    Returns
    -------

    freqs, csd_est : ndarrays
        The estimatated CSD and the frequency points vector.
        The CSD{i,j}(f) are returned in a square "matrix" of vectors
        holding Sij(f). For an input array that is reshaped to (M,N),
        the output is (M,M,N)

    Notes
    -----
    setting dw = 2*PI/N, then the integral from -PI, PI (or 0,PI) of PSD/(2PI)
    will be nearly equal to sxy(0), where sxx is the crosscovariance function
    of s1(n), s2(n). By definition, sxy(0) = E{s1(n)s2*(n)} ~
    (s1*s2.conj()).mean()
    """
    s_shape = s.shape
    s.shape = (np.prod(s_shape[:-1]), s_shape[-1])
    # defining an Sk_loc is a little opaque, but it avoids having to
    # reset the shape of any user-given Sk later on
    if Sk is not None:
        Sk_shape = Sk.shape
        N = Sk.shape[-1]
        Sk_loc = Sk.reshape(np.prod(Sk_shape[:-1]), N)
    else:
        if NFFT is not None:
            N = NFFT
        else:
            N = s.shape[-1]
        Sk_loc = fftpack.fft(s, n=N)
    # reset s.shape
    s.shape = s_shape

    M = Sk_loc.shape[0]
    norm = float(s.shape[-1])

    # if the time series is a complex vector, a one sided PSD is invalid:
    if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
        sides = 'twosided'
    elif sides in ('default', 'onesided'):
        sides = 'onesided'

    if sides == 'onesided':
        # putative Nyquist freq
        Fn = N / 2 + 1
        # last duplicate freq
        Fl = (N + 1) / 2
        csd_mat = np.empty((M, M, Fn), 'D')
        freqs = np.linspace(0, Fs / 2, Fn)
        for i in xrange(M):
            for j in xrange(i + 1):
                csd_mat[i, j, 0] = Sk_loc[i, 0] * Sk_loc[j, 0].conj()
                csd_mat[i, j, 1:Fl] = 2 * (Sk_loc[i, 1:Fl] *
                                           Sk_loc[j, 1:Fl].conj())
                if Fn > Fl:
                    csd_mat[i, j, Fn - 1] = (Sk_loc[i, Fn - 1] *
                                             Sk_loc[j, Fn - 1].conj())

    else:
        csd_mat = np.empty((M, M, N), 'D')
        freqs = np.linspace(0, Fs / 2, N, endpoint=False)
        for i in xrange(M):
            for j in xrange(i + 1):
                csd_mat[i, j] = Sk_loc[i] * Sk_loc[j].conj()
    if normalize:
        csd_mat /= norm

    upper_idc = triu_indices(M, k=1)
    lower_idc = tril_indices(M, k=-1)
    csd_mat[upper_idc] = csd_mat[lower_idc].conj()
    return freqs, csd_mat