Example #1
0
def compound_sound(freqs, duration, samples_per_sec=None):
    """
    Generate a sound made out of several frequencies

    Parameters
    ---------
    freqs: list
        A list of frequencies to be included in the 
    
    """
    if samples_per_sec is None:
        samples_per_sec = 44100

    time = np.arange(0,duration*samples_per_sec)
    snd = np.zeros_like(time)
    
    for f in freqs:
        snd =  snd + np.sin(time*f*(2*np.pi)/samples_per_sec)

    # window the sound vector with a 50 ms raised cosine
    numAtten = np.round(samples_per_sec*.05);
    # don't window if requested sound is too short
    if len(snd) >= numAtten:
        snd[:numAtten/2] *= window_hanning(np.ones(numAtten))[:numAtten/2]
        snd[-(numAtten/2):] *= window_hanning(np.ones(numAtten))[-(numAtten/2):]

    # normalize
    snd = snd/np.max(np.abs(snd))

    return snd
Example #2
0
def compound_sound(freqs, duration, samples_per_sec=None):
    """
    Generate a sound made out of several frequencies

    Parameters
    ---------
    freqs: list
        A list of frequencies to be included in the 
    
    """
    if samples_per_sec is None:
        samples_per_sec = 44100

    time = np.arange(0, duration * samples_per_sec)
    snd = np.zeros_like(time)

    for f in freqs:
        snd = snd + np.sin(time * f * (2 * np.pi) / samples_per_sec)

    # window the sound vector with a 50 ms raised cosine
    numAtten = np.round(samples_per_sec * .05)
    # don't window if requested sound is too short
    if len(snd) >= numAtten:
        snd[:numAtten / 2] *= window_hanning(np.ones(numAtten))[:numAtten / 2]
        snd[-(numAtten / 2):] *= window_hanning(
            np.ones(numAtten))[-(numAtten / 2):]

    # normalize
    snd = snd / np.max(np.abs(snd))

    return snd
Example #3
0
def test_window():
    np.random.seed(0)
    n = 1000
    rand = np.random.standard_normal(n) + 100
    ones = np.ones(n)
    assert_array_equal(mlab.window_none(ones), ones)
    assert_array_equal(mlab.window_none(rand), rand)
    assert_array_equal(np.hanning(len(rand)) * rand, mlab.window_hanning(rand))
    assert_array_equal(np.hanning(len(ones)), mlab.window_hanning(ones))
Example #4
0
    def test_psd_windowarray_scale_by_freq(self):
        win = mlab.window_hanning(np.ones(self.NFFT_density_real))

        spec, fsp = mlab.psd(x=self.y,
                             NFFT=self.NFFT_density,
                             Fs=self.Fs,
                             noverlap=self.nover_density,
                             pad_to=self.pad_to_density,
                             sides=self.sides,
                             window=mlab.window_hanning)
        spec_s, fsp_s = mlab.psd(x=self.y,
                                 NFFT=self.NFFT_density,
                                 Fs=self.Fs,
                                 noverlap=self.nover_density,
                                 pad_to=self.pad_to_density,
                                 sides=self.sides,
                                 window=mlab.window_hanning,
                                 scale_by_freq=True)
        spec_n, fsp_n = mlab.psd(x=self.y,
                                 NFFT=self.NFFT_density,
                                 Fs=self.Fs,
                                 noverlap=self.nover_density,
                                 pad_to=self.pad_to_density,
                                 sides=self.sides,
                                 window=mlab.window_hanning,
                                 scale_by_freq=False)
        assert_array_equal(fsp, fsp_s)
        assert_array_equal(fsp, fsp_n)
        assert_array_equal(spec, spec_s)
        assert_allclose(spec_s * (win**2).sum(),
                        spec_n / self.Fs * win.sum()**2,
                        atol=1e-08)
Example #5
0
def test_cached_coherence():
    """Testing the cached coherence functions """
    NFFT = 64 #This is the default behavior
    n_freqs = NFFT//2 + 1
    ij = [(0,1),(1,0)]
    ts = np.loadtxt(os.path.join(test_dir_path,'tseries12.txt'))
    freqs,cache = tsa.cache_fft(ts,ij)

    #Are the frequencies the right ones?
    yield npt.assert_equal,freqs,ut.get_freqs(2*np.pi,NFFT)
                     
    #Check that the fft of the first window is what we expect:
    hann = mlab.window_hanning(np.ones(NFFT))
    w_ts = ts[0][:NFFT]*hann
    w_ft = np.fft.fft(w_ts)[0:n_freqs]

    #This is the result of the function:
    first_window_fft = cache['FFT_slices'][0][0]
    
    yield npt.assert_equal,w_ft,first_window_fft
    
    coh_cached = tsa.cache_to_coherency(cache,ij)[0,1]
    f,c = tsa.coherency(ts)
    coh_direct = c[0,1]

    yield npt.assert_almost_equal,coh_direct,coh_cached
Example #6
0
def test_cached_coherence():
    """Testing the cached coherence functions """
    NFFT = 64  # This is the default behavior
    n_freqs = NFFT // 2 + 1
    ij = [(0, 1), (1, 0)]
    ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
    freqs, cache = tsa.cache_fft(ts, ij)

    # Are the frequencies the right ones?
    npt.assert_equal(freqs, utils.get_freqs(2 * np.pi, NFFT))

    # Check that the fft of the first window is what we expect:
    hann = mlab.window_hanning(np.ones(NFFT))
    w_ts = ts[0][:NFFT] * hann
    w_ft = fftpack.fft(w_ts)[0:n_freqs]

    # This is the result of the function:
    first_window_fft = cache['FFT_slices'][0][0]

    npt.assert_equal(w_ft, first_window_fft)

    coh_cached = tsa.cache_to_coherency(cache, ij)[0, 1]
    f, c = tsa.coherency(ts)
    coh_direct = c[0, 1]

    npt.assert_almost_equal(coh_direct, coh_cached)

    # Only welch PSD works and an error is thrown otherwise. This tests that
    # the error is thrown:
    with pytest.raises(ValueError) as e_info:
        tsa.cache_fft(ts, ij, method=methods[2])

    # Take the method in which the window is defined on input:
    freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3])
    # And compare it to the method in which it isn't:
    freqs, cache2 = tsa.cache_fft(ts, ij, method=methods[4])
    npt.assert_equal(cache1, cache2)

    # Do the same, while setting scale_by_freq to False:
    freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3],
                                  scale_by_freq=False)
    freqs, cache2 = tsa.cache_fft(ts, ij, method=methods[4],
                                  scale_by_freq=False)
    npt.assert_equal(cache1, cache2)

    # Test cache_to_psd:
    psd1 = tsa.cache_to_psd(cache, ij)[0]
    # Against the standard get_spectra:
    f, c = tsa.get_spectra(ts)
    psd2 = c[0][0]

    npt.assert_almost_equal(psd1, psd2)

    # Test that prefer_speed_over_memory doesn't change anything:
    freqs, cache1 = tsa.cache_fft(ts, ij)
    freqs, cache2 = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)
    psd1 = tsa.cache_to_psd(cache1, ij)[0]
    psd2 = tsa.cache_to_psd(cache2, ij)[0]
    npt.assert_almost_equal(psd1, psd2)
Example #7
0
 def test_psd_window_hanning_detrend_linear(self):
     if self.NFFT_density is None:
         return
     ydata = np.arange(self.NFFT_density)
     ycontrol = np.zeros(self.NFFT_density)
     ydata1 = ydata + 5
     ydata2 = ydata + 3.3
     ycontrol1 = ycontrol
     ycontrol2 = ycontrol
     windowVals = mlab.window_hanning(np.ones_like(ycontrol1))
     ycontrol1 = ycontrol1 * windowVals
     ycontrol2 = mlab.window_hanning(ycontrol2)
     ydata = np.vstack([ydata1, ydata2])
     ycontrol = np.vstack([ycontrol1, ycontrol2])
     ydata = np.tile(ydata, (20, 1))
     ycontrol = np.tile(ycontrol, (20, 1))
     ydatab = ydata.T.flatten()
     ydataf = ydata.flatten()
     ycontrol = ycontrol.flatten()
     spec_g, fsp_g = mlab.psd(x=ydataf,
                              NFFT=self.NFFT_density,
                              Fs=self.Fs,
                              noverlap=0,
                              sides=self.sides,
                              detrend=mlab.detrend_linear,
                              window=mlab.window_hanning)
     spec_b, fsp_b = mlab.psd(x=ydatab,
                              NFFT=self.NFFT_density,
                              Fs=self.Fs,
                              noverlap=0,
                              sides=self.sides,
                              detrend=mlab.detrend_linear,
                              window=mlab.window_hanning)
     spec_c, fsp_c = mlab.psd(x=ycontrol,
                              NFFT=self.NFFT_density,
                              Fs=self.Fs,
                              noverlap=0,
                              sides=self.sides,
                              window=mlab.window_none)
     spec_c *= len(ycontrol1) / (np.abs(windowVals)**2).sum()
     assert_array_equal(fsp_g, fsp_c)
     assert_array_equal(fsp_b, fsp_c)
     assert_allclose(spec_g, spec_c, atol=1e-08)
     # these should not be almost equal
     with pytest.raises(AssertionError):
         assert_allclose(spec_b, spec_c, atol=1e-08)
Example #8
0
def sound_freq_sweep(startFreq, endFreq, duration, samples_per_sec=None):
    """   
    Creates a normalized sound vector (duration seconds long) where the
    frequency sweeps from startFreq to endFreq (on a log2 scale).

    Parameters
    ----------

    startFreq: float, the starting frequency of the sweep in Hz
    
    endFreq: float, the ending frequency of the sweep in Hz

    duration: float, the duration of the sweep in seconds

    samples_per_sec: float, the sampling rate, defaults to 44100 


    """
    if samples_per_sec is None:
        samples_per_sec = 44100

    time = np.arange(0, duration * samples_per_sec)

    if startFreq != endFreq:
        startFreq = np.log2(startFreq)
        endFreq = np.log2(endFreq)
        freq = 2**np.arange(startFreq, endFreq,
                            (endFreq - startFreq) / (len(time)))
        freq = freq[:time.shape[0]]
    else:
        freq = startFreq

    snd = np.sin(time * freq * (2 * np.pi) / samples_per_sec)

    # window the sound vector with a 50 ms raised cosine
    numAtten = np.round(samples_per_sec * .05)
    # don't window if requested sound is too short
    if len(snd) >= numAtten:
        snd[:numAtten / 2] *= window_hanning(np.ones(numAtten))[:numAtten / 2]
        snd[-(numAtten / 2):] *= window_hanning(
            np.ones(numAtten))[-(numAtten / 2):]

    # normalize
    snd = snd / np.max(np.abs(snd))

    return snd
Example #9
0
def sound_freq_sweep(startFreq, endFreq, duration, samples_per_sec=None):
    """   
    Creates a normalized sound vector (duration seconds long) where the
    frequency sweeps from startFreq to endFreq (on a log2 scale).

    Parameters
    ----------

    startFreq: float, the starting frequency of the sweep in Hz
    
    endFreq: float, the ending frequency of the sweep in Hz

    duration: float, the duration of the sweep in seconds

    samples_per_sec: float, the sampling rate, defaults to 44100 


    """
    if samples_per_sec is None:
        samples_per_sec = 44100

    time = np.arange(0,duration*samples_per_sec)

    if startFreq != endFreq:
        startFreq = np.log2(startFreq)
        endFreq = np.log2(endFreq)
        freq = 2**np.arange(startFreq,endFreq,(endFreq-startFreq)/(len(time)))
        freq = freq[:time.shape[0]]
    else:
        freq = startFreq
    
    snd = np.sin(time*freq*(2*np.pi)/samples_per_sec)

    # window the sound vector with a 50 ms raised cosine
    numAtten = np.round(samples_per_sec*.05);
    # don't window if requested sound is too short
    if len(snd) >= numAtten:
        snd[:numAtten/2] *= window_hanning(np.ones(numAtten))[:numAtten/2]
        snd[-(numAtten/2):] *= window_hanning(np.ones(numAtten))[-(numAtten/2):]

    # normalize
    snd = snd/np.max(np.abs(snd))

    return snd
Example #10
0
 def test_psd_windowarray_equal(self):
     win = mlab.window_hanning(np.ones(self.NFFT_density_real))
     speca, fspa = mlab.psd(x=self.y,
                            NFFT=self.NFFT_density,
                            Fs=self.Fs,
                            noverlap=self.nover_density,
                            pad_to=self.pad_to_density,
                            sides=self.sides,
                            window=win)
     specb, fspb = mlab.psd(x=self.y,
                            NFFT=self.NFFT_density,
                            Fs=self.Fs,
                            noverlap=self.nover_density,
                            pad_to=self.pad_to_density,
                            sides=self.sides)
     assert_array_equal(fspa, fspb)
     assert_allclose(speca, specb, atol=1e-08)
Example #11
0
    None, {
        "this_method": 'multi_taper_csd',
        "Fs": 2 * np.pi
    }, {
        "this_method": 'periodogram_csd',
        "Fs": 2 * np.pi,
        "NFFT": 256
    }
]

if has_mpl:
    methods.append({
        "this_method": 'welch',
        "NFFT": 256,
        "Fs": 2 * np.pi,
        "window": mlab.window_hanning(np.ones(256))
    })
    methods.append({"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi})


def test_coherency():
    """
    Tests that the coherency algorithm runs smoothly, using the different
    csd routines, that the resulting matrix is symmetric and for the welch
    method, that the frequency bands in the output make sense
    """

    for method in methods:
        f, c = tsa.coherency(tseries, csd_method=method)

        npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())
def plot_many_psds(
	seglist, seg_index, time, strain, fs, n_chunk, plotting=False):

	"""
	Calculate n_chunk PSDs for a given data segment.

	If n_chunk is too large for the segment, it will be reduced to the 
	maximum allowable value.

	seglist -- a list of segments of strain data without errors

	seg_index -- an index indicating which strain segment is used

	time -- array of all time values

	strain -- array of all strian values, corresponding to the time values

	fs -- sampling frequency in Hz

	n_chunk -- number of requested chunks to divide the timeseries into and
	calculate a PSD for
	
	plotting -- Boolean; if true plots are produced

	Returns:

	psds_dict -- a dictionary with a key of frequency and a value of a
	list of all associated PSD values 
	"""

	print '=== PSD statistics ==='
	print '* Use segment #', seg_index
	time_seg = time[seglist[seg_index]]
	time_seg_max = max(time_seg)
	time_seg_min = min(time_seg)
	segment_length = int((time_seg_max - time_seg_min) * fs)  # num samples
	print '* Length of the segment: ', time_seg_max - time_seg_min, 'sec =>', segment_length, ' samples'

	chunk_length = 2**12  # number of samples in a chunk
	chunk_length_sec = chunk_length / fs # chunk length in seconds
	print '* Length of one chunk: ', chunk_length_sec, ' sec => ', chunk_length, ' samples'

	# User selects desired number of chunks
	print '* Requested number of chunks: ', n_chunk 

	# Check that segment is of sufficient length
	analyze_length = (chunk_length/2) * (n_chunk + 1)
	if analyze_length > segment_length:
		print '  The requested data length exceeds the length of the segment.'
		n_chunk = int(segment_length/(chunk_length/2) - 1) 
		print '  Reduced the requested number of chunks: ', n_chunk
	print ''  # blank line

	# Calculate PSDs for each chunk
	if plotting:
		plt.figure(4)
	num_PSDs = 0
	psds = []
	strain_seg = strain[seglist[seg_index]]
	my_window = mlab.window_hanning(np.ones(chunk_length))
	for i_PSD in range(n_chunk):
		i_start = int(i_PSD * (chunk_length/2))
		i_end = int(i_start + chunk_length - 1)
		pxx, freqs = mlab.psd(
			strain_seg[i_start:i_end], Fs=fs, NFFT=chunk_length, 
			noverlap=chunk_length/2, window=my_window)
		if plotting:
			plt.loglog(freqs, np.sqrt(np.fabs(pxx)))		
		num_PSDs += 1
		
		# Store list of tuples for PSD values
		psds.append((freqs, np.fabs(pxx)))
	print '* Finished processing all chunks'	
	print '* Number of PSDs plotted: ', num_PSDs 
	print ''  # blank line


	psds_dict = {}
	# Add frequencies to keys for the first PSD
	for index, freq in enumerate(psds[0][0]):
		psds_dict[freq] = [psds[0][1][index]]
	
	for psd in psds[1:]:
		# Loop over all frequencies for a given PSD array
		for index, freq in enumerate(psd[0]):
			# Append new PSD value if frequency already in dict
			psds_dict[freq].append(psd[1][index])


	if plotting:
		plt.grid('on')
		plt.xlim([10,100])
		plt.xlabel('Frequency (Hz)', fontsize=18)
		plt.ylabel(r'PSD (strain /  $\sqrt{Hz}$)', fontsize=18)
		plt.tick_params(axis='x', labelsize=14)
		plt.tick_params(axis='y', labelsize=14)
		plt.title(
			str(num_PSDs) + ' PSDs for L1 data starting at GPS ' + 
			str(time_seg[0]))
		plt.ylim([1e-26, 1e-16])
		plt.savefig('manyPSDs_zoomed.png')

	return psds_dict	
Example #13
0
                
                if r == 0 and nr == 0:
                    #for the first ROI, set up the structure
                    data[cond] = np.zeros((len(rois),num_runs,len(ind[cond][nr]))) * np.nan

                data[cond][r,nr,:] = all_data[nr,ind[cond][nr]]

                #plt.subplot(3,3,nr+1)
                #plt.plot(data[cond][r,nr,:],'k')

                #apply a hanning window to block lengths to minimize edge
                #artifacts
                if do_windowing:
                    count = 0
                    while count < (len(data[cond][r,nr,:])):
                        data[cond][r,nr,count:count+block_length[cond]] = mlab.window_hanning(data[cond][r,nr,count:count+block_length[cond]])
                        count += block_length[cond]
                        #plt.vlines(count,-3,3)


                #plt.plot(data[cond][r,nr,:],'r')
             
        del all_data

    #for debugging purposes, do some plotting
    #for cond in conds:
    #    plt.figure()
    #    plt.suptitle(cond)
    #    for nr in range(num_runs):
    #        plt.subplot(num_runs,1,nr+1)
    #        plt.plot(data[cond][:,nr,:].transpose())
Example #14
0
#Define globally
test_dir_path = os.path.join(nitime.__path__[0], 'tests')

# Define these once globally:
t = np.linspace(0, 16 * np.pi, 1024)
x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])
y = x + np.random.rand(t.shape[-1])

tseries = np.vstack([x, y])

methods = (None,
           {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
           {"this_method": 'multi_taper_csd', "Fs": 2 * np.pi},
           {"this_method": 'periodogram_csd', "Fs": 2 * np.pi, "NFFT": 256},
           {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi,
            "window": mlab.window_hanning(np.ones(256))})


def test_coherency():
    """
    Tests that the coherency algorithm runs smoothly, using the different
    csd routines, that the resulting matrix is symmetric and for the welch
    method, that the frequency bands in the output make sense
    """

    for method in methods:
        f, c = tsa.coherency(tseries, csd_method=method)

        npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())
        npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))
Example #15
0
def test_BurgAutoSpectralDensity_windowing():
    # Check that raw vs. windowed signals give rise to roughly same PSD,
    # say within +/- 25%, due to reasons explained in Marple
    #
    # Could also have a similar test fail for Fourier spectra

    # Create random signal w/ lots of power at low frequencies such that
    # FFT-based spectral estimates will suffer from leakage if data blocks
    # are not smoothly tapered/windowed to zero at their edges:
    # --------------------------------------------------------------------
    # Sampling parameters
    Fs = 10.  # 1.
    t0 = 0
    T = 1e3

    # Random signal parameters
    f0_broad = 0.
    tau_broad = 4. / Fs
    G0 = 1.
    noise_floor = 1e-6
    seed = None

    # Coherent signal parameters
    f0 = 2.5  # 0.25
    A = 0.3

    # Generate random signal
    sig = RandomSignal(
        Fs=Fs, t0=t0, T=T,
        f0=f0_broad, tau=tau_broad, G0=G0,
        noise_floor=noise_floor, seed=seed)
    t = sig.t()
    x = sig.x.copy()

    # Add coherent signal
    x += (A * np.sin(2 * np.pi * f0 * t))
    x -= np.mean(x)

    # Windowing should "minimally" affect Burg autospectral-density estimate
    # (here, "minimally" means within a few tens of percent; when talking
    # about windowing FFT-based calculations, we're usually worried about
    # e.g. strong, low-frequency components of the signal contaminating
    # the higher-frequency spectral estimates, resulting in order-of-magnitude
    # errors in the spectral estimates):
    # -----------------------------------------------------------------------
    Nf = 128
    asd_burg = BurgAutoSpectralDensity(
        2, x, Fs=Fs, Nf=Nf, normalize=True)
    asd_burg_hanning = BurgAutoSpectralDensity(
        2, np.sqrt(8. / 3) * mlab.window_hanning(x),
        Fs=Fs, Nf=Nf, normalize=True)

    # A unity ratio implies that windowing has no effect on the spectral
    # estimate, with larger divergences from unity indicating that windowing
    # has a larger effect on the estimate. Variations about unity are
    # typically +/-15%. The below ensures that the raw and windowed estimates
    # differ by < 150% in all locations.
    burg_ratio = asd_burg_hanning.Sxx / asd_burg.Sxx
    np.testing.assert_almost_equal(
        burg_ratio,
        np.ones(len(burg_ratio)),
        decimal=0)

    # In contrast, windowing should significantly affect FFT-based
    # autospectral-density estimates:
    # ------------------------------------------------------------
    NFFT = Nf
    noverlap = NFFT // 2
    asd_welch, f = mlab.psd(
        x, NFFT=NFFT, Fs=Fs,
        window=mlab.window_none,
        noverlap=noverlap,
        sides='twosided')
    asd_welch_hanning, f = mlab.psd(
        x, NFFT=NFFT, Fs=Fs,
        window=mlab.window_hanning,
        noverlap=noverlap,
        sides='twosided')

    welch_ratio = asd_welch_hanning / asd_welch

    # Numpy does *not* have the logical converse of
    # `np.testing.assert_almost_equal`, so we have to
    # hack something together ourselves...
    #
    # If the below test passes, it shows that the raw and windowed
    # FFT spectral estimates differ significantly (by > 50% in one
    # or more locations, where the less-stringent constraint of 50%
    # results from application of triangle inequality to the logic in
    # `np.testing.assert_almost_equal`).
    np.testing.assert_raises(
        AssertionError,
        np.testing.assert_almost_equal,
        *[welch_ratio, np.ones(len(welch_ratio))],
        **{'decimal': 0})

    return
Example #16
0
def test_cached_coherence():
    """Testing the cached coherence functions """
    NFFT = 64  # This is the default behavior
    n_freqs = NFFT // 2 + 1
    ij = [(0, 1), (1, 0)]
    ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
    freqs, cache = tsa.cache_fft(ts, ij)

    # Are the frequencies the right ones?
    npt.assert_equal(freqs, utils.get_freqs(2 * np.pi, NFFT))

    # Check that the fft of the first window is what we expect:
    hann = mlab.window_hanning(np.ones(NFFT))
    w_ts = ts[0][:NFFT] * hann
    w_ft = fftpack.fft(w_ts)[0:n_freqs]

    # This is the result of the function:
    first_window_fft = cache['FFT_slices'][0][0]

    npt.assert_equal(w_ft, first_window_fft)

    coh_cached = tsa.cache_to_coherency(cache, ij)[0, 1]
    f, c = tsa.coherency(ts)
    coh_direct = c[0, 1]

    npt.assert_almost_equal(coh_direct, coh_cached)

    # Only welch PSD works and an error is thrown otherwise. This tests that
    # the error is thrown:
    with pytest.raises(ValueError) as e_info:
        tsa.cache_fft(ts, ij, method=methods[2])

    # Take the method in which the window is defined on input:
    freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3])
    # And compare it to the method in which it isn't:
    freqs, cache2 = tsa.cache_fft(ts, ij, method=methods[4])
    npt.assert_equal(cache1, cache2)

    # Do the same, while setting scale_by_freq to False:
    freqs, cache1 = tsa.cache_fft(ts,
                                  ij,
                                  method=methods[3],
                                  scale_by_freq=False)
    freqs, cache2 = tsa.cache_fft(ts,
                                  ij,
                                  method=methods[4],
                                  scale_by_freq=False)
    npt.assert_equal(cache1, cache2)

    # Test cache_to_psd:
    psd1 = tsa.cache_to_psd(cache, ij)[0]
    # Against the standard get_spectra:
    f, c = tsa.get_spectra(ts)
    psd2 = c[0][0]

    npt.assert_almost_equal(psd1, psd2)

    # Test that prefer_speed_over_memory doesn't change anything:
    freqs, cache1 = tsa.cache_fft(ts, ij)
    freqs, cache2 = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)
    psd1 = tsa.cache_to_psd(cache1, ij)[0]
    psd2 = tsa.cache_to_psd(cache2, ij)[0]
    npt.assert_almost_equal(psd1, psd2)
	strain_chunk = strain[seglist[seg_index]][
		chunk_length*chunk_index:chunk_length*(chunk_index+1)]
	time_chunk = time[seglist[seg_index]][
		chunk_length*chunk_index:chunk_length*(chunk_index+1)]

	print '* Extracted chunk length: ', len(time_chunk)
	print ''  # blank line

	plotting = False
	# Plot a time series
	if chunk_index == 1:
		plot_timeseries(time_chunk, strain_chunk)
		#plotting = True
	
	# Calculate a PSD
	my_window = mlab.window_hanning(np.ones(chunk_length))
	#pxx, freqs = mlab.psd(strain_chunk, Fs=fs, NFFT=chunk_length, 
			      #noverlap=chunk_length/2, window=my_window)

	pxx, freqs = plot_psd(time_chunk, strain_chunk, fs, chunk_length, 
			      my_window, plotting)
	
	f_resolution = freqs[1] - freqs[0]
	print '* Frequency resolution returned by mlab.psd: ', f_resolution,'Hz'
	print '  1/(chunk_length_sec): ', 1/chunk_length_sec, 'Hz'
	print '  This must agree'
	print ''  # blank line


	# Check the PSD through Parseval's Theorem
	rms_psd = np.sqrt(np.sum(pxx * f_resolution)) # Integrate PSD over all freqs
Example #18
0
                    data[cond] = np.zeros(
                        (len(rois), num_runs, len(ind[cond][nr]))) * np.nan

                data[cond][r, nr, :] = all_data[nr, ind[cond][nr]]

                #plt.subplot(3,3,nr+1)
                #plt.plot(data[cond][r,nr,:],'k')

                #apply a hanning window to block lengths to minimize edge
                #artifacts
                if do_windowing:
                    count = 0
                    while count < (len(data[cond][r, nr, :])):
                        data[cond][r, nr, count:count +
                                   block_length[cond]] = mlab.window_hanning(
                                       data[cond][r, nr, count:count +
                                                  block_length[cond]])
                        count += block_length[cond]
                        #plt.vlines(count,-3,3)

                #plt.plot(data[cond][r,nr,:],'r')

        del all_data

    #for debugging purposes, do some plotting
    #for cond in conds:
    #    plt.figure()
    #    plt.suptitle(cond)
    #    for nr in range(num_runs):
    #        plt.subplot(num_runs,1,nr+1)
    #        plt.plot(data[cond][:,nr,:].transpose())