Ejemplo n.º 1
0
def test_zero_pad():
    """
    Test the zero_pad function
    """
    # Freely assume that time is the last dimension:
    ts1 = np.empty((64, 64, 35, 32))
    NFFT = 64 
    zp1 = utils.zero_pad(ts1, NFFT)
    npt.assert_equal(zp1.shape[-1], NFFT)

    # Try this with something with only 1 dimension:
    ts2 = np.empty(64)
    zp2 = utils.zero_pad(ts2, NFFT)
    npt.assert_equal(zp2.shape[-1], NFFT)
Ejemplo n.º 2
0
def test_zero_pad():
    """
    Test the zero_pad function
    """
    # Freely assume that time is the last dimension:
    ts1 = np.empty((64, 64, 35, 32))
    NFFT = 64
    zp1 = utils.zero_pad(ts1, NFFT)
    npt.assert_equal(zp1.shape[-1], NFFT)

    # Try this with something with only 1 dimension:
    ts2 = np.empty(64)
    zp2 = utils.zero_pad(ts2, NFFT)
    npt.assert_equal(zp2.shape[-1], NFFT)
Ejemplo n.º 3
0
def cache_fft(time_series, ij, lb=0, ub=None,
                  method=None, prefer_speed_over_memory=False,
                  scale_by_freq=True):
    """compute and cache the windowed FFTs of the time_series, in such a way
    that computing the psd and csd of any combination of them can be done
    quickly.

    Parameters
    ----------

    time_series : float array
       An ndarray with time-series, where time is the last dimension

    ij: list of tuples
      Each tuple in this variable should contain a pair of
      indices of the form (i,j). The resulting cache will contain the fft of
      time-series in the rows indexed by the unique elements of the union of i
      and j

    lb,ub: float
       Define a frequency band of interest, for which the fft will be cached

    method: dict, optional
        See :func:`get_spectra` for details on how this is used. For this set
        of functions, 'this_method' has to be 'welch'


    Returns
    -------
    freqs, cache

        where: cache =
             {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices,
             'norm_val':norm_val}

    Notes
    -----

    - For these functions, only the Welch windowed periodogram ('welch') is
      available.

    - Detrending the input is not an option here, in order to save
      time on an empty function call.

    """
    if method is None:
        method = {'this_method': 'welch'}  # The default

    this_method = method.get('this_method', 'welch')

    if this_method == 'welch':
        NFFT = method.get('NFFT', 64)
        Fs = method.get('Fs', 2 * np.pi)
        window = method.get('window', mlab.window_hanning)
        n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
    else:
        e_s = "For cache_fft, spectral estimation method must be welch"
        raise ValueError(e_s)
    time_series = utils.zero_pad(time_series, NFFT)

    #The shape of the zero-padded version:
    n_channels, n_time_points = time_series.shape

    # get all the unique channels in time_series that we are interested in by
    # checking the ij tuples
    all_channels = set()
    for i, j in ij:
        all_channels.add(i)
        all_channels.add(j)

    # for real time_series, ignore the negative frequencies
    if np.iscomplexobj(time_series):
        n_freqs = NFFT
    else:
        n_freqs = NFFT // 2 + 1

    #Which frequencies
    freqs = utils.get_freqs(Fs, NFFT)

    #If there are bounds, limit the calculation to within that band,
    #potentially include the DC component:
    lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub)

    n_freqs = ub_idx - lb_idx
    #Make the window:
    if mlab.cbook.iterable(window):
        assert(len(window) == NFFT)
        window_vals = window
    else:
        window_vals = window(np.ones(NFFT, time_series.dtype))

    #Each fft needs to be normalized by the square of the norm of the window
    #and, for consistency with newer versions of mlab.csd (which, in turn, are
    #consistent with Matlab), normalize also by the sampling rate:

    if scale_by_freq:
        #This is the normalization factor for one-sided estimation, taking into
        #account the sampling rate. This makes the PSD a density function, with
        #units of dB/Hz, so that integrating over frequencies gives you the RMS
        #(XXX this should be in the tests!).
        norm_val = (np.abs(window_vals) ** 2).sum() * (Fs / 2)

    else:
        norm_val = (np.abs(window_vals) ** 2).sum() / 2

    # cache the FFT of every windowed, detrended NFFT length segement
    # of every channel.  If prefer_speed_over_memory, cache the conjugate
    # as well

    i_times = list(range(0, n_time_points - NFFT + 1, NFFT - n_overlap))
    n_slices = len(i_times)
    FFT_slices = {}
    FFT_conj_slices = {}

    for i_channel in all_channels:
        #dbg:
        #print i_channel
        Slices = np.zeros((n_slices, n_freqs), dtype=np.complex)
        for iSlice in range(n_slices):
            thisSlice = time_series[i_channel,
                                    i_times[iSlice]:i_times[iSlice] + NFFT]

            #Windowing:
            thisSlice = window_vals * thisSlice  # No detrending
            #Derive the fft for that slice:
            Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx])

        FFT_slices[i_channel] = Slices

        if prefer_speed_over_memory:
            FFT_conj_slices[i_channel] = np.conjugate(Slices)

    cache = {'FFT_slices': FFT_slices, 'FFT_conj_slices': FFT_conj_slices,
             'norm_val': norm_val, 'Fs': Fs, 'scale_by_freq': scale_by_freq}

    return freqs, cache
Ejemplo n.º 4
0
def cache_fft(time_series,
              ij,
              lb=0,
              ub=None,
              method=None,
              prefer_speed_over_memory=False,
              scale_by_freq=True):
    """compute and cache the windowed FFTs of the time_series, in such a way
    that computing the psd and csd of any combination of them can be done
    quickly.

    Parameters
    ----------

    time_series : float array
       An ndarray with time-series, where time is the last dimension

    ij: list of tuples
      Each tuple in this variable should contain a pair of
      indices of the form (i,j). The resulting cache will contain the fft of
      time-series in the rows indexed by the unique elements of the union of i
      and j

    lb,ub: float
       Define a frequency band of interest, for which the fft will be cached

    method: dict, optional
        See :func:`get_spectra` for details on how this is used. For this set
        of functions, 'this_method' has to be 'welch'


    Returns
    -------
    freqs, cache

        where: cache =
             {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices,
             'norm_val':norm_val}

    Notes
    -----

    - For these functions, only the Welch windowed periodogram ('welch') is
      available.

    - Detrending the input is not an option here, in order to save
      time on an empty function call.

    """
    if method is None:
        method = {'this_method': 'welch'}  # The default

    this_method = method.get('this_method', 'welch')

    if this_method == 'welch':
        NFFT = method.get('NFFT', 64)
        Fs = method.get('Fs', 2 * np.pi)
        window = method.get('window', mlab.window_hanning)
        n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
    else:
        e_s = "For cache_fft, spectral estimation method must be welch"
        raise ValueError(e_s)
    time_series = utils.zero_pad(time_series, NFFT)

    # The shape of the zero-padded version:
    n_channels, n_time_points = time_series.shape

    # get all the unique channels in time_series that we are interested in by
    # checking the ij tuples
    all_channels = set()
    for i, j in ij:
        all_channels.add(i)
        all_channels.add(j)

    # for real time_series, ignore the negative frequencies
    if np.iscomplexobj(time_series):
        n_freqs = NFFT
    else:
        n_freqs = NFFT // 2 + 1

    # Which frequencies
    freqs = utils.get_freqs(Fs, NFFT)

    # If there are bounds, limit the calculation to within that band,
    # potentially include the DC component:
    lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub)

    n_freqs = ub_idx - lb_idx
    # Make the window:
    if mlab.cbook.iterable(window):
        assert (len(window) == NFFT)
        window_vals = window
    else:
        window_vals = window(np.ones(NFFT, time_series.dtype))

    # Each fft needs to be normalized by the square of the norm of the window
    # and, for consistency with newer versions of mlab.csd (which, in turn, are
    # consistent with Matlab), normalize also by the sampling rate:

    if scale_by_freq:
        # This is the normalization factor for one-sided estimation, taking
        # into account the sampling rate. This makes the PSD a density
        # function, with units of dB/Hz, so that integrating over
        # frequencies gives you the RMS. (XXX this should be in the tests!).
        norm_val = (np.abs(window_vals)**2).sum() * (Fs / 2)

    else:
        norm_val = (np.abs(window_vals)**2).sum() / 2

    # cache the FFT of every windowed, detrended NFFT length segment
    # of every channel.  If prefer_speed_over_memory, cache the conjugate
    # as well

    i_times = list(range(0, n_time_points - NFFT + 1, NFFT - n_overlap))
    n_slices = len(i_times)
    FFT_slices = {}
    FFT_conj_slices = {}

    for i_channel in all_channels:
        Slices = np.zeros((n_slices, n_freqs), dtype=np.complex)
        for iSlice in range(n_slices):
            thisSlice = time_series[i_channel,
                                    i_times[iSlice]:i_times[iSlice] + NFFT]

            # Windowing:
            thisSlice = window_vals * thisSlice  # No detrending
            # Derive the fft for that slice:
            Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx])

        FFT_slices[i_channel] = Slices

        if prefer_speed_over_memory:
            FFT_conj_slices[i_channel] = np.conjugate(Slices)

    cache = {
        'FFT_slices': FFT_slices,
        'FFT_conj_slices': FFT_conj_slices,
        'norm_val': norm_val,
        'Fs': Fs,
        'scale_by_freq': scale_by_freq
    }

    return freqs, cache