示例#1
0
    def spectrum_multi_taper(self):
        """

        The spectrum and cross-spectra, computed using
        :func:`multi_taper_csd'

        """
        #Initialize the output
        spectrum_multi_taper = np.empty(
            (self.input.shape[0], self.input.shape[-1] / 2 + 1))

        #If multi-channel data:
        if len(self.input.data.shape) > 1:
            for i in xrange(self.input.data.shape[0]):
                # 'f' are the center frequencies of the frequency bands
                # represented in the MT psd. These are identical in each
                # iteration of the loop, so they get reassigned into the same
                # variable in each iteration:
                f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(
                    self.input.data[i],
                    Fs=self.input.sampling_rate,
                    BW=self.BW,
                    adaptive=self.adaptive,
                    low_bias=self.low_bias)
        else:
            f, spectrum_multi_taper, _ = tsa.multi_taper_psd(
                self.input.data,
                Fs=self.input.sampling_rate,
                BW=self.BW,
                adaptive=self.adaptive,
                low_bias=self.low_bias)

        return f, spectrum_multi_taper
示例#2
0
文件: spectral.py 项目: chrox/nitime
    def spectrum_multi_taper(self):
        """

        The spectrum and cross-spectra, computed using
        :func:`multi_taper_csd'

        """
        #Initialize the output
        spectrum_multi_taper = np.empty((self.input.shape[0],
                                         self.input.shape[-1] / 2 + 1))

        #If multi-channel data:
        if len(self.input.data.shape) > 1:
            for i in xrange(self.input.data.shape[0]):
                # 'f' are the center frequencies of the frequency bands
                # represented in the MT psd. These are identical in each
                # iteration of the loop, so they get reassigned into the same
                # variable in each iteration:
                f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(
                    self.input.data[i],
                    Fs=self.input.sampling_rate,
                    BW=self.BW,
                    adaptive=self.adaptive,
                    low_bias=self.low_bias)
        else:
            f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,
                                                  Fs=self.input.sampling_rate,
                                                  BW=self.BW,
                                                  adaptive=self.adaptive,
                                                  low_bias=self.low_bias)

        return f, spectrum_multi_taper
示例#3
0
def test_multitaper_spectral_normalization():
    """
    Check that the spectral estimators are normalized in the
    correct Watts/Hz fashion
    """

    x = np.random.randn(1024)
    f1, Xp1, _ = tsa.multi_taper_psd(x)
    f2, Xp2, _ = tsa.multi_taper_psd(x, Fs=100)
    f3, Xp3, _ = tsa.multi_taper_psd(x, NFFT=2**12)

    p1 = np.sum(Xp1) * 2 * np.pi / 2**10
    p2 = np.sum(Xp2) * 100 / 2**10
    p3 = np.sum(Xp3) * 2 * np.pi / 2**12
    npt.assert_( np.abs(p1 - p2) < 1e-14,
                    'Inconsistent frequency normalization in MTM PSD (1)' )
    npt.assert_( np.abs(p3 - p2) < 1e-8,
                    'Inconsistent frequency normalization in MTM PSD (2)' )

    td_var = np.var(x)
    # assure that the estimators are at least in the same
    # order of magnitude as the time-domain variance
    npt.assert_( np.abs(np.log10(p1/td_var)) < 1,
                    'Incorrect frequency normalization in MTM PSD' )

    # check the freq vector while we're here
    npt.assert_( f2.max() == 50, 'MTM PSD returns wrong frequency bins' )
示例#4
0
def fakequakes_allpsd(home,project_name,run_name):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''
    
    from numpy import savez
    from obspy import read
    import nitime.algorithms as tsa
    from glob import glob
    from os import path,makedirs
    
    #Decide what I'm going to work on
    paths=glob(home+run_name+'/output/waveforms/'+run_name+'.*')
    for k in range(len(paths)):
        waveforms=glob(paths[k]+'/*.sac')
        print 'Working on '+paths[k]
        outpath=home+project_name+'/analysis/frequency/'+paths[k].split('/')[-1]
        if not path.exists(outpath):
            makedirs(outpath)
        for ksta in range(len(waveforms)):
            sta=waveforms[ksta].split('/')[-1].split('.')[0]
            n=read(paths[k]+'/'+sta+'.LYN.sac')
            e=read(paths[k]+'/'+sta+'.LYE.sac')
            u=read(paths[k]+'/'+sta+'.LYZ.sac')
            outname=sta+'.psd'
            #Compute spectra
            fn, npsd, nu = tsa.multi_taper_psd(n[0].data,Fs=1./n[0].stats.delta,adaptive=True,jackknife=False,low_bias=True,NFFT=512)
            fe, epsd, nu = tsa.multi_taper_psd(e[0].data,Fs=1./e[0].stats.delta,adaptive=True,jackknife=False,low_bias=True,NFFT=512)
            fu, upsd, nu = tsa.multi_taper_psd(u[0].data,Fs=1./u[0].stats.delta,adaptive=True,jackknife=False,low_bias=True,NFFT=512)
            #Write to file
            
            savez(outpath+'/'+outname,fn=fn,fe=fe,fu=fu,npsd=npsd,epsd=epsd,upsd=upsd)
示例#5
0
def test_multitaper_spectral_normalization():
    """
    Check that the spectral estimators are normalized in the
    correct Watts/Hz fashion
    """

    x = np.random.randn(1024)
    f1, Xp1, _ = tsa.multi_taper_psd(x)
    f2, Xp2, _ = tsa.multi_taper_psd(x, Fs=100)
    f3, Xp3, _ = tsa.multi_taper_psd(x, NFFT=2**12)

    p1 = np.sum(Xp1) * 2 * np.pi / 2**10
    p2 = np.sum(Xp2) * 100 / 2**10
    p3 = np.sum(Xp3) * 2 * np.pi / 2**12
    nt.assert_true(
        np.abs(p1 - p2) < 1e-14,
        'Inconsistent frequency normalization in MTM PSD (1)')
    nt.assert_true(
        np.abs(p3 - p2) < 1e-8,
        'Inconsistent frequency normalization in MTM PSD (2)')

    td_var = np.var(x)
    # assure that the estimators are at least in the same
    # order of magnitude as the time-domain variance
    nt.assert_true(
        np.abs(np.log10(p1 / td_var)) < 1,
        'Incorrect frequency normalization in MTM PSD')

    # check the freq vector while we're here
    nt.assert_true(f2.max() == 50, 'MTM PSD returns wrong frequency bins')
示例#6
0
def fakequakes_allpsd(home, project_name, run_name):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''

    from numpy import savez
    from obspy import read
    import nitime.algorithms as tsa
    from glob import glob
    from os import path, makedirs

    #Decide what I'm going to work on
    paths = glob(home + run_name + '/output/waveforms/' + run_name + '.*')
    for k in range(len(paths)):
        waveforms = glob(paths[k] + '/*.sac')
        print('Working on ' + paths[k])
        outpath = home + project_name + '/analysis/frequency/' + paths[
            k].split('/')[-1]
        if not path.exists(outpath):
            makedirs(outpath)
        for ksta in range(len(waveforms)):
            sta = waveforms[ksta].split('/')[-1].split('.')[0]
            n = read(paths[k] + '/' + sta + '.LYN.sac')
            e = read(paths[k] + '/' + sta + '.LYE.sac')
            u = read(paths[k] + '/' + sta + '.LYZ.sac')
            outname = sta + '.psd'
            #Compute spectra
            fn, npsd, nu = tsa.multi_taper_psd(n[0].data,
                                               Fs=1. / n[0].stats.delta,
                                               adaptive=True,
                                               jackknife=False,
                                               low_bias=True,
                                               NFFT=512)
            fe, epsd, nu = tsa.multi_taper_psd(e[0].data,
                                               Fs=1. / e[0].stats.delta,
                                               adaptive=True,
                                               jackknife=False,
                                               low_bias=True,
                                               NFFT=512)
            fu, upsd, nu = tsa.multi_taper_psd(u[0].data,
                                               Fs=1. / u[0].stats.delta,
                                               adaptive=True,
                                               jackknife=False,
                                               low_bias=True,
                                               NFFT=512)
            #Write to file

            savez(outpath + '/' + outname,
                  fn=fn,
                  fe=fe,
                  fu=fu,
                  npsd=npsd,
                  epsd=epsd,
                  upsd=upsd)
def multitaper_spectrogram(EEG, Fs, NW, window_length, window_step, EEG_segs=None, dpss=None, eigvals=None):
    """Compute spectrogram using multitaper estimation.

    Arguments:
    EEG -- EEG signal, size=(channel_num, sample_point_num)
    Fs -- sampling frequency in Hz
    NW -- the time-halfbandwidth product
    window_length -- length of windows in seconds
    window_step -- step of windows in seconds

    Outputs:
    psd estimation, size=(window_num, freq_point_num, channel_num)
    frequencies, size=(freq_point_num,)
    """

    #window_length = int(round(window_length*Fs))
    #window_step = int(round(window_step*Fs))

    nfft = max(1<<(window_length-1).bit_length(), window_length)

    #freqs = np.arange(0, Fs, Fs*1.0/nfft)[:nfft//2+1]
    if EEG_segs is None:
        window_starts = np.arange(0,EEG.shape[1]-window_length+1,window_step)
        #window_num = len(window_starts)
        EEG_segs = detrend(EEG[:,list(map(lambda x:np.arange(x,x+window_length), window_starts))], axis=2)
    _, pxx, _ = tsa.multi_taper_psd(EEG_segs, Fs=Fs, NW=NW, adaptive=False, jackknife=False, low_bias=True,
                                NFFT=nfft)#, dpss=dpss, eigvals=eigvals)

    return pxx.transpose(1,2,0)
示例#8
0
def get_psd_multitaper(data,
                       fs,
                       NW=None,
                       BW=None,
                       adaptive=False,
                       jackknife=True,
                       sides='default'):
    '''
     Computes power spectral density using Multitaper functions

    Args:
        data (nt, nch): time series data where time axis is assumed to be on the last axis
        fs (float): sampling rate of the signal
        NW (float): Normalized half bandwidth of the data tapers in Hz
        BW (float): sampling bandwidth of the data tapers in Hz
        adaptive (bool): Use an adaptive weighting routine to combine the PSD estimates of different tapers.
        jackknife (bool): Use the jackknife method to make an estimate of the PSD variance at each point.
        sides (str): This determines which sides of the spectrum to return.

    Returns:
        tuple: Tuple containing:
            | **f (nfft):** Frequency points vector
            | **psd_est (nfft, nch):** estimated power spectral density (PSD)
            | **nu (nfft, nch):** if jackknife = True; estimated variance of the log-psd. If Jackknife = False; degrees of freedom in a chi square model of how the estimated psd is distributed wrt true log - PSD
    '''
    data = data.T  # move time to the last axis

    f, psd_mt, nu = tsa.multi_taper_psd(data, fs, NW, BW, adaptive, jackknife,
                                        sides)
    return f, psd_mt.T, nu.T
示例#9
0
def source_spectra(home,project_name,run_name,run_number,rupt,nstrike,ndip):
    '''
    Tile plot of subfault source-time functions
    '''
    from numpy import genfromtxt,unique,zeros,where,arange,savez,mean
    from mudpy.forward import get_source_time_function,add2stf
    import nitime.algorithms as tsa
    from string import rjust
    
    outpath=home+project_name+'/analysis/frequency/'
    f=genfromtxt(rupt)
    num=f[:,0]
    nfault=nstrike*ndip
    #Get slips
    all_ss=f[:,8]
    all_ds=f[:,9]
    all=zeros(len(all_ss)*2)
    iss=2*arange(0,len(all)/2,1)
    ids=2*arange(0,len(all)/2,1)+1
    all[iss]=all_ss
    all[ids]=all_ds
    #Now parse for multiple rupture speeds
    unum=unique(num)
    #Count number of windows
    nwin=len(where(num==unum[0])[0])
    #Get rigidities
    mu=f[0:len(unum),13]
    #Get rise times
    rise_time=f[0:len(unum),7]
    #Get areas
    area=f[0:len(unum),10]*f[0:len(unum),11]
    for kfault in range(nfault):
        if kfault%10==0:
            print '... working on subfault '+str(kfault)+' of '+str(nfault)
        #Get rupture times for subfault windows
        i=where(num==unum[kfault])[0]
        trup=f[i,12]
        #Get slips on windows
        ss=all_ss[i]
        ds=all_ds[i]
        #Add it up
        slip=(ss**2+ds**2)**0.5
        #Get first source time function
        t1,M1=get_source_time_function(mu[kfault],area[kfault],rise_time[kfault],trup[0],slip[0])
        #Loop over windows
        for kwin in range(nwin-1):
            #Get next source time function
            t2,M2=get_source_time_function(mu[kfault],area[kfault],rise_time[kfault],trup[kwin+1],slip[kwin+1])
            #Add the soruce time functions
            t1,M1=add2stf(t1,M1,t2,M2)
        #Convert to slip rate
        s=M1/(mu[kfault]*area[kfault])
        #remove mean
        s=s-mean(s)
        #Done now compute spectra of STF
        fsample=1./(t1[1]-t1[0])
        freq, psd, nu = tsa.multi_taper_psd(s,Fs=fsample,adaptive=True,jackknife=False,low_bias=True)
        outname=run_name+'.'+run_number+'.sub'+rjust(str(kfault),4,'0')+'.stfpsd'
        savez(outpath+outname,freq=freq,psd=psd) 
        
示例#10
0
文件: signal.py 项目: mschachter/LaSP
def mt_power_spectrum(s, sample_rate, window_size, low_bias=False, bandwidth=5.0):
    """
        Computes a jackknifed multi-taper power spectrum of a given signal. The jackknife is over
        windowed segments of the signal, specified by window_size.
    """

    sample_length_bins = min(len(s), int(window_size * sample_rate))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s) / float(sample_length_bins)))
    nleft = len(s) % sample_length_bins
    #ignore the last chunk if it's too short
    if nleft > (sample_length_bins / 2.0):
        nchunks += 1

    ps_freq = None
    ps_ests = list()
    for k in range(nchunks):
        si = k*sample_length_bins
        ei = min(len(s), si + sample_length_bins)
        print 'si=%d, ei=%d, len(s)=%d' % (si, ei, len(s))

        ps_freq,mt_ps,var = ntalg.multi_taper_psd(s[si:ei], Fs=sample_rate, adaptive=True, BW=bandwidth, jackknife=False,
                                                  low_bias=low_bias, sides='onesided')
        ps_ests.append(mt_ps)

    ps_ests = np.array(ps_ests)

    ps_mean = ps_ests.mean(axis=0)
    ps_std = ps_ests.std(axis=0, ddof=1)

    return ps_freq,ps_mean,ps_std
示例#11
0
文件: snr.py 项目: TomDLT/nitime
 def mt_signal_psd(self):
     _, p, _ = tsa.multi_taper_psd(self.signal.data,
                                 Fs=self.input.sampling_rate,
                                 BW=self.bandwidth,
                                 adaptive=self.adaptive,
                                 low_bias=self.low_bias)
     return p
示例#12
0
def mt_power_spectrum(s, sample_rate, window_size, low_bias=False, bandwidth=5.0):
    """
        Computes a jackknifed multi-taper power spectrum of a given signal. The jackknife is over
        windowed segments of the signal, specified by window_size.
    """

    sample_length_bins = min(len(s), int(window_size * sample_rate))

    #break signal into chunks and estimate coherence for each chunk
    nchunks = int(np.floor(len(s) / float(sample_length_bins)))
    nleft = len(s) % sample_length_bins
    #ignore the last chunk if it's too short
    if nleft > (sample_length_bins / 2.0):
        nchunks += 1

    ps_freq = None
    ps_ests = list()
    for k in range(nchunks):
        si = k*sample_length_bins
        ei = min(len(s), si + sample_length_bins)
        print 'si=%d, ei=%d, len(s)=%d' % (si, ei, len(s))

        ps_freq,mt_ps,var = ntalg.multi_taper_psd(s[si:ei], Fs=sample_rate, adaptive=True, BW=bandwidth, jackknife=False,
                                                  low_bias=low_bias, sides='onesided')
        ps_ests.append(mt_ps)

    ps_ests = np.array(ps_ests)

    ps_mean = ps_ests.mean(axis=0)
    ps_std = ps_ests.std(axis=0, ddof=1)

    return ps_freq,ps_mean,ps_std
示例#13
0
def mtspecgram(sig, Fs, BW, window, timestep, plot=False, ax=None):
    Nwin = int(round(window * Fs))
    Nstep = int(round(timestep * Fs))
    N = len(sig)
    winstart = np.arange(0, N - Nwin + 1, Nstep)
    nw = len(winstart)

    nfft = max(nextpower2(Nwin), Nwin)
    f = getgrid(Fs, nfft)
    Nf = len(f)
    S = np.zeros((Nf, nw))

    for n in range(nw):
        idx = np.arange(winstart[n], winstart[n] + Nwin - 1)
        data = sig[idx]
        f, s, nu = tsa.multi_taper_psd(data, Fs=Fs, BW=BW, NFFT=nfft)
        S[:, n] = s
    winmid = winstart + round(Nwin / 2.0)
    t = winmid / float(Fs)

    if plot == True:
        plot_specgram(S, t, f, ax)
    elif plot == 'dB':
        plot_specgram(dB(S), t, f, ax)

    return (S, t, f)
示例#14
0
 def mt_signal_psd(self):
     _, p, _ = tsa.multi_taper_psd(self.signal.data,
                                   Fs=self.input.sampling_rate,
                                   BW=self.bandwidth,
                                   adaptive=self.adaptive,
                                   low_bias=self.low_bias)
     return p
示例#15
0
def test_gh57():
    """
    https://github.com/nipy/nitime/issues/57
    """
    data = np.random.randn(10, 1000)
    for jk in [True,False]:
        for adaptive in [True,False]:
            f, psd, sigma = tsa.multi_taper_psd(data, adaptive=adaptive,
                                                jackknife=jk)
示例#16
0
def test_gh57():
    """
    https://github.com/nipy/nitime/issues/57
    """
    data = np.random.randn(10, 1000)
    for jk in [True, False]:
        for adaptive in [True, False]:
            f, psd, sigma = tsa.multi_taper_psd(data,
                                                adaptive=adaptive,
                                                jackknife=jk)
示例#17
0
文件: snr.py 项目: TomDLT/nitime
    def mt_noise_psd(self):
        p = np.empty((self.noise.data.shape[0],
                     self.noise.data.shape[-1] // 2 + 1))

        for i in range(p.shape[0]):
            _, p[i], _ = tsa.multi_taper_psd(self.noise.data[i],
                                    Fs=self.input.sampling_rate,
                                    BW=self.bandwidth,
                                    adaptive=self.adaptive,
                                    low_bias=self.low_bias)
        return np.mean(p, 0)
示例#18
0
    def mt_noise_psd(self):
        p = np.empty(
            (self.noise.data.shape[0], self.noise.data.shape[-1] / 2 + 1))

        for i in range(p.shape[0]):
            _, p[i], _ = tsa.multi_taper_psd(self.noise.data[i],
                                             Fs=self.input.sampling_rate,
                                             BW=self.bandwidth,
                                             adaptive=self.adaptive,
                                             low_bias=self.low_bias)
        return np.mean(p, 0)
示例#19
0
def get_freq_spectrum(timeseries_data, fsampling, fmin, fmax, plot=True):
    """Transform a clean, downsampled time series data into frequency domain
    using the multi-taper method

    Args:
        timeseries_data ([type]): [N x t time series data, with N brain regions for source localized data or
                                   N channels for sensor data. Duration = t time points, or t/fs seconds]
        fsampling ([type]): [sampling frequency of timeseries_data, no default given because this will vary]
        fmin (int, optional): Defaults to 2. [low cutoff frequency]
        fmax (int, optional): Defaults to 45. [high cutoff frequency]
        plot (boolean, optional): Defaults to True. [Plot the spectra?]

    Returns:
        freq_data (dict): [power spectrum for all input regions/channels]
        frange: frequency vector ranging from fmin to fmax
        Freq_range: frequency magnitudes within frange
    """

    freq_data = {}
    lpf = np.array([1, 2, 5, 2, 1])
    lpf = lpf / np.sum(lpf)
    freq_data = {}

    for key in list(timeseries_data.keys()):
        timedata = np.asarray(timeseries_data[key].astype(float))
        f, psd, nu = tsa.multi_taper_psd(timedata,
                                         Fs=fsampling,
                                         NW=3,
                                         BW=1,
                                         adaptive=False,
                                         jackknife=False)
        Fdata = np.convolve(psd, lpf, mode="same")
        freq_data[key] = Fdata

    ind_fmin = np.abs(f - fmin).argmin()
    ind_fmax = np.abs(f - fmax).argmin()
    frange = f[ind_fmin:ind_fmax]
    Freq_range = Freq_data[:, ind_fmin:ind_fmax]
    if plot == True:
        # fig1, ax1 = mpl.subplots(1, 1)
        # Plotting source localized MEG data
        plt.figure(num=1, figsize=[3, 1.5], dpi=300)
        plt.xlabel("Frequency (Hz)")
        plt.ylabel("Magnitude (dB)")
        for g in range(len(Freq_data)):
            plt.plot(frange, mag2db(Freq_range[g, :]))
    else:
        print("No plot")

    return freq_data, frange, Freq_range
示例#20
0
def test_multi_taper_psd_csd():
    """

    Test the multi taper psd and csd estimation functions.
    Based on the example in
    doc/examples/multi_taper_spectral_estimation.py

    """

    N = 2**10
    n_reps = 10

    psd = []
    est_psd = []
    est_csd = []
    for jk in [True, False]:
        for k in range(n_reps):
            for adaptive in [True, False]:
                ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
                ar_seq -= ar_seq.mean()
                fgrid, hz = tsa.freq_response(1.0,
                                              a=np.r_[1, -alpha],
                                              n_freqs=N)
                psd.append(2 * (hz * hz.conj()).real)
                f, psd_mt, nu = tsa.multi_taper_psd(ar_seq,
                                                    adaptive=adaptive,
                                                    jackknife=jk)
                est_psd.append(psd_mt)
                f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),
                                                adaptive=adaptive)
                # Symmetrical in this case, so take one element out:
                est_csd.append(csd_mt[0][1])

        fxx = np.mean(psd, axis=0)
        fxx_est1 = np.mean(est_psd, axis=0)
        fxx_est2 = np.mean(est_csd, axis=0)

        # Tests the psd:
        psd_ratio1 = np.mean(fxx_est1 / fxx)
        npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)
        # Tests the csd:
        psd_ratio2 = np.mean(fxx_est2 / fxx)
        npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)
示例#21
0
def test_multi_taper_psd_csd():
    """

    Test the multi taper psd and csd estimation functions.
    Based on the example in
    doc/examples/multi_taper_spectral_estimation.py

    """

    N = 2 ** 10
    n_reps = 10

    psd = []
    est_psd = []
    est_csd = []
    for jk in [True, False]:
        for k in range(n_reps):
            for adaptive in [True, False]:
                ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
                ar_seq -= ar_seq.mean()
                fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha],
                                              n_freqs=N)
                psd.append(2 * (hz * hz.conj()).real)
                f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=adaptive,
                                                    jackknife=jk)
                est_psd.append(psd_mt)
                f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),
                                               adaptive=adaptive)
                # Symmetrical in this case, so take one element out:
                est_csd.append(csd_mt[0][1])

        fxx = np.mean(psd, axis=0)
        fxx_est1 = np.mean(est_psd, axis=0)
        fxx_est2 = np.mean(est_csd, axis=0)

        # Tests the psd:
        psd_ratio1 = np.mean(fxx_est1 / fxx)
        npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)
        # Tests the csd:
        psd_ratio2 = np.mean(fxx_est2 / fxx)
        npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)
示例#22
0
def test_hermitian_multitaper_csd():
    """
    Make sure CSD matrices returned by various methods have
    Hermitian symmetry.
    """

    sig = np.random.randn(4, 256)

    _, csd1 = tsa.multi_taper_csd(sig, adaptive=False)

    for i in range(4):
        for j in range(i + 1):
            xc1 = csd1[i, j]
            xc2 = csd1[j, i]
            npt.assert_equal(xc1, xc2.conj(), err_msg='MTM CSD not Hermitian')

    _, psd, _ = tsa.multi_taper_psd(sig, adaptive=False)
    for i in range(4):
        npt.assert_almost_equal(
            psd[i],
            csd1[i, i].real,
            err_msg='MTM CSD diagonal inconsistent with real PSD')
示例#23
0
def test_hermitian_multitaper_csd():
    """
    Make sure CSD matrices returned by various methods have
    Hermitian symmetry.
    """

    sig = np.random.randn(4,256)

    _, csd1 = tsa.multi_taper_csd(sig, adaptive=False)

    for i in range(4):
        for j in range(i+1):
            xc1 = csd1[i,j]
            xc2 = csd1[j,i]
            npt.assert_equal(
                xc1, xc2.conj(), err_msg='MTM CSD not Hermitian'
                )

    _, psd, _ = tsa.multi_taper_psd(sig, adaptive=False)
    for i in range(4):
        npt.assert_almost_equal(
            psd[i], csd1[i,i].real,
            err_msg='MTM CSD diagonal inconsistent with real PSD'
            )
示例#24
0
# This is the 'true' value, corrected for one-sided spectral density functions
Sxx_true = Sw_true[0, 0].real
Syy_true = Sw_true[1, 1].real

"""

The other is an estimate based on a multi-taper spectral estimate from the
empirical signals:

"""

c_x = np.empty((L, w.shape[0]))
c_y = np.empty((L, w.shape[0]))

for i in range(N):
    frex, c_x[i], nu = alg.multi_taper_psd(z[i][0])
    frex, c_y[i], nu = alg.multi_taper_psd(z[i][1])

"""

We plot these on the same axes, for a direct comparison:

"""

ax01.plot(w, Sxx_true, 'b', label='true Sxx(w)')
ax01.plot(w, Sxx_est, 'b--', label='estimated Sxx(w)')
ax01.plot(w, Syy_true, 'g', label='true Syy(w)')
ax01.plot(w, Syy_est, 'g--', label='estimated Syy(w)')
ax01.plot(w, np.mean(c_x, 0), 'r', label='Sxx(w) - MT PSD')
ax01.plot(w, np.mean(c_y, 0), 'r--', label='Syy(w) - MT PSD')
# Load Realizations

Xp=np.load('Ice_Age_Realizations.npy')

# Remove Mean for all variables

IT=IT-np.mean(IT)
IP=IP-np.mean(IP)
Id18Op=Id18Op-np.mean(Id18Op)
d18Oi=d18Oi-np.mean(d18Oi)
Xpm=Xp-np.mean(Xp, axis=0)

#
# Compute Spectra For all variables

ITf, ITpsd_mt, ITnu = tsa.multi_taper_psd(IT, Fs=1.0,adaptive=False, jackknife=False)
IPf, IPpsd_mt, IPnu = tsa.multi_taper_psd(IP, Fs=1.0,adaptive=False, jackknife=False)
Id18Opf, Id18Oppsd_mt, Id18Opnu = tsa.multi_taper_psd(Id18Op, Fs=1.0,adaptive=False, jackknife=False)
d18Oif, d18Oipsd_mt, d18Oinu = tsa.multi_taper_psd(d18Oi, Fs=1.0,adaptive=False, jackknife=False)

Xpf=np.zeros((len(ITf),len(Xpm[1])))
Xpsd_mt=np.zeros((len(ITf),len(Xpm[1])))
Xpnu=np.zeros((len(ITf),len(Xpm[1])))

for i in range(len(Xpm[1])):
    Xpf[:,i],Xpsd_mt[:,i],Xpnu[:,i]=tsa.multi_taper_psd(Xpm[:,i], Fs=1.0,adaptive=False, jackknife=False)


# Compute quantiles for spectra

from scipy.stats.mstats import mquantiles
示例#26
0
import nitime.utils as tsu

signal = np.loadtxt(open("../build/signal.csv", "rb"),
                    delimiter=",",
                    skiprows=1)

restored = np.loadtxt(open("../build/restored.csv", "rb"),
                      delimiter=",",
                      skiprows=1)
inp_sampling_rate = 1000.
lb = 0  # Hz
ub = 1000  # Hz

_, signal_spectra, _ = tsa.multi_taper_psd(signal,
                                           Fs=inp_sampling_rate,
                                           BW=None,
                                           adaptive=True,
                                           low_bias=True)
_, noise_spectra, _ = tsa.multi_taper_psd(restored - signal,
                                          Fs=inp_sampling_rate,
                                          BW=None,
                                          adaptive=True,
                                          low_bias=True)

freqs = np.linspace(0, inp_sampling_rate / 2, signal.shape[-1] / 2 + 1)

f = plt.figure()
ax = f.add_subplot(1, 2, 1)
ax_snr_info = f.add_subplot(1, 2, 2)
lb_idx, ub_idx = tsu.get_bounds(freqs, lb, ub)
freqs = freqs[lb_idx:ub_idx]
示例#27
0
TT = TT - np.mean(TT)
TP = TP - np.mean(TT)
Td18Op = Td18Op - np.mean(Td18Op)
TV = TV - np.mean(TV)
TRH = TRH - np.mean(TRH)
TC = TC - np.mean(TC)
TS = Td18Os - np.mean(Td18Os)

Xpm = Xp - np.mean(Xp, axis=0)

#
# Compute Spectra For all variables

TTf, TTpsd_mt, TTnu = tsa.multi_taper_psd(TT,
                                          Fs=1.0,
                                          adaptive=False,
                                          jackknife=False)
TPf, TPpsd_mt, TPnu = tsa.multi_taper_psd(TP,
                                          Fs=1.0,
                                          adaptive=False,
                                          jackknife=False)
Td18Opf, Td18Oppsd_mt, Td18Opnu = tsa.multi_taper_psd(Td18Op,
                                                      Fs=1.0,
                                                      adaptive=False,
                                                      jackknife=False)
TVf, TVpsd_mt, TVnu = tsa.multi_taper_psd(TV,
                                          Fs=1.0,
                                          adaptive=False,
                                          jackknife=False)
TRHf, TRHpsd_mt, TRHnu = tsa.multi_taper_psd(TRH,
                                             Fs=1.0,
示例#28
0
def get_freq_spectrum_notdict(timeseries_data,
                              fsampling,
                              fmin=2,
                              fmax=45,
                              N=68,
                              downsample_factor=4,
                              plot=True):
    """[When input data is not in a dict format. Filters timeseries_data with a band pass filter designed
        with cutoff frequencies [fmin, fmax], the filtered time series will be downsampled by a factor of
        downsample_factor with a low-pass filter. The downsampled time series is then transformed into the
        frequency domain using the multi-taper method]

    Args:
        timeseries_data ([type]): [N x t time series data, with N brain regions for source localized data or
                                   N channels for sensor data. Duration = t time points, or t/fs seconds]
        fsampling ([type]): [sampling frequency of timeseries_data, no default given because this will vary]
        downsample_factor ([type]): Defaults to 4. [The ratio to downsample data, for example, 600 Hz MEG data
                                    will be downsampled to 150 Hz. This will be used in the decimate function,
                                    which has a low-pass filter built in to eliminate harmonic contamination]
        fmin (int, optional): Defaults to 2. [low cutoff frequency]
        fmax (int, optional): Defaults to 45. [high cutoff frequency]
        N (int, optional): Defaults to 68. [number of regions or number of channels]
        plot (boolean, optional): Defaults to True. [Plot the spectra?]

    Returns:
        Freq_data[type]: [power spectrum for all input regions/channels]
        f : frequency vector/bins for power spectrum
    """
    fs = fsampling
    fvec = np.linspace(fmin, fmax, 40)
    hbp = firls(
        101,
        np.array([0, 0.2 * fmin, 0.9 * fmin, fmax - 2, fmax + 5, 100]) * 2 /
        fs,
        desired=np.array([0, 0, 1, 1, 0, 0]),
    )  # for detrending, a bandpass
    lpf = np.array([1, 2, 5, 2, 1])
    lpf = lpf / np.sum(lpf)
    ind_del = (
        hbp.size
    )  # number of coefficients in hbp. Delete that number in beginning of signal due to filtering

    Freq_data = []
    for row in timeseries_data:
        q = lfilter(hbp, 1, row)
        q = q[ind_del:-1]
        ds_q = decimate(q, downsample_factor, axis=0)
        f, psd, nu = tsa.multi_taper_psd(ds_q,
                                         Fs=fs / downsample_factor,
                                         NW=3,
                                         BW=1,
                                         adaptive=False,
                                         jackknife=False)
        Fdata = np.convolve(psd, lpf, mode="same")
        Freq_data.append(Fdata)

    Freq_data = np.asarray(Freq_data)
    assert Freq_data.shape[
        0] == N  # make sure we have N regions/channels spectra

    ind_fmin = np.abs(f - fmin).argmin()
    ind_fmax = np.abs(f - fmax).argmin()
    frange = f[ind_fmin:ind_fmax]
    Freq_range = Freq_data[:, ind_fmin:ind_fmax]

    if plot == True:
        # fig1, ax1 = mpl.subplots(1, 1)
        # Plotting source localized MEG data
        plt.figure(num=1, figsize=[3, 1.5], dpi=300)
        plt.xlabel("Frequency (Hz)")
        plt.ylabel("Magnitude (dB)")
        for g in range(len(Freq_data)):
            plt.plot(frange, mag2db(Freq_range[g, :]))
    else:
        print("No plot")

    output = (Freq_data, fvec, Freq_range, frange)

    return output
示例#29
0
Sd18Op=np.load('HIDDEN_d18OP.npy')
Sd18Os=np.load('HIDDEN_d18OS.npy')


H1=np.load('HIDDEN_PDRIP_T_025.npy')
H2=np.load('HIDDEN_PDRIP_T_05.npy')
H3=np.load('HIDDEN_PDRIP_T_1.npy')
H4=np.load('HIDDEN_PDRIP_T_5.npy')


H1=H1-np.mean(H1)
H2=H2-np.mean(H2)
H3=H3-np.mean(H3)
H4=H4-np.mean(H4)

H1f, H1psd_mt, H1nu = tsa.multi_taper_psd(H1, Fs=1.0,adaptive=False, jackknife=False)
H2f, H2psd_mt, H2nu = tsa.multi_taper_psd(H2, Fs=1.0,adaptive=False, jackknife=False)
H3f, H3psd_mt, H3nu = tsa.multi_taper_psd(H3, Fs=1.0,adaptive=False, jackknife=False)
H4f, H4psd_mt, H4nu = tsa.multi_taper_psd(H4, Fs=1.0,adaptive=False, jackknife=False)


ST=ST-np.mean(ST)
SP=SP-np.mean(SP)
Sd18Op=Sd18Op-np.mean(Sd18Op)
Sd18Os=Sd18Os-np.mean(Sd18Os)

STf, STpsd_mt, STnu = tsa.multi_taper_psd(ST, Fs=1.0,adaptive=False, jackknife=False)
SPf, SPpsd_mt, SPnu = tsa.multi_taper_psd(SP, Fs=1.0,adaptive=False, jackknife=False)
Sd18Opf, Sd18Oppsd_mt, Sd18Opnu = tsa.multi_taper_psd(Sd18Op, Fs=1.0,adaptive=False, jackknife=False)
Sd18Osf, Sd18Ospsd_mt, Sd18Osnu = tsa.multi_taper_psd(Sd18Os, Fs=1.0,adaptive=False, jackknife=False)
示例#30
0
# get channel names (attribute added during export)
print raw_ts.ch_names[:3]

###############################################################################
# investigate spectral density

import matplotlib.pyplot as plt

import nitime.algorithms as tsa

ch_sel = raw_ts.ch_names.index('MEG 0122')

data_ch = raw_ts.data[ch_sel]

f, psd_mt, nu = tsa.multi_taper_psd(data_ch,
                                    Fs=raw_ts.sampling_rate,
                                    BW=1,
                                    adaptive=False,
                                    jackknife=False)

# Convert PSD to dB
psd_mt = 10 * np.log10(psd_mt)

plt.close('all')
plt.plot(f, psd_mt)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power Spectrald Density (db/Hz)')
plt.title('Multitaper Power Spectrum \n %s' % raw_ts.ch_names[ch_sel])
plt.show()
示例#31
0
p1 = Rectangle((0, 0), 0.5, 0.5, fc="Black")
legend([p1,p2],[r'Original Timeseries (1 Year = 1 Data Point)','1000 Age-Perturbed Realizations [5$\%$]'],loc=3,fontsize=14,frameon=False)
plt.show()
#======================================================================================

Xp=np.load('Palmyra_Age_Perturbed.npy')
# Remove Mean for all variables

CST=SST-np.mean(SST)
CSS=SSS-np.mean(SSS)
Cd18O=Cd18O-np.mean(Cd18O)
Xpm=Xp-np.mean(Xp, axis=0)

# Compute Spectra For all variables

CSTf, CSTpsd_mt, CSTnu = tsa.multi_taper_psd(CST, Fs=1.0,adaptive=False, jackknife=False)
CSSf, CSSpsd_mt, CSSnu = tsa.multi_taper_psd(CSS, Fs=1.0,adaptive=False, jackknife=False)
Cd18Of, Cd18Opsd_mt, Cd18Onu = tsa.multi_taper_psd(Cd18O, Fs=1.0,adaptive=False, jackknife=False)


Xpf=np.zeros((len(CSTf),len(Xpm[1])))
Xpsd_mt=np.zeros((len(CSTf),len(Xpm[1])))
Xpnu=np.zeros((len(CSTf),len(Xpm[1])))

for i in range(len(Xpm[1])):
    Xpf[:,i],Xpsd_mt[:,i],Xpnu[:,i]=tsa.multi_taper_psd(Xpm[:,i], Fs=1.0,adaptive=False, jackknife=False)


# Compute quantiles for spectra

from scipy.stats.mstats import mquantiles
示例#32
0
nast_e.trim(starttime=time_epi)
nast_u.trim(starttime=time_epi)
katnp_n.trim(starttime=time_epi)
katnp_e.trim(starttime=time_epi)
katnp_u.trim(starttime=time_epi)

kkn4_n[0].data = r_[0, diff(kkn4_n[0].data) / 0.2]
kkn4_e[0].data = r_[0, diff(kkn4_e[0].data) / 0.2]
kkn4_u[0].data = r_[0, diff(kkn4_u[0].data) / 0.2]
nast_n[0].data = r_[0, diff(nast_n[0].data) / 0.2]
nast_e[0].data = r_[0, diff(nast_e[0].data) / 0.2]
nast_u[0].data = r_[0, diff(nast_u[0].data) / 0.2]

fn_kkn4, npsd_kkn4, nu = tsa.multi_taper_psd(kkn4_n[0].data,
                                             Fs=1. / kkn4_n[0].stats.delta,
                                             adaptive=True,
                                             jackknife=False,
                                             low_bias=True)
fe_kkn4, epsd_kkn4, nu = tsa.multi_taper_psd(kkn4_e[0].data,
                                             Fs=1. / kkn4_e[0].stats.delta,
                                             adaptive=True,
                                             jackknife=False,
                                             low_bias=True)
fu_kkn4, upsd_kkn4, nu = tsa.multi_taper_psd(kkn4_u[0].data,
                                             Fs=1. / kkn4_u[0].stats.delta,
                                             adaptive=True,
                                             jackknife=False,
                                             low_bias=True)

fn_nast, npsd_nast, nu = tsa.multi_taper_psd(nast_n[0].data,
                                             Fs=1. / nast_n[0].stats.delta,
示例#33
0
# index at certain time
print raw_ts.at(110.5)

# get channel names (attribute added during export)
print raw_ts.ch_names[:3]

###############################################################################
# investigate spectral density

import matplotlib.pyplot as plt

import nitime.algorithms as tsa

ch_sel = raw_ts.ch_names.index('MEG 0122')

data_ch = raw_ts.data[ch_sel]

f, psd_mt, nu = tsa.multi_taper_psd(data_ch, Fs=raw_ts.sampling_rate,
                                    BW=1, adaptive=False, jackknife=False)

# Convert PSD to dB
psd_mt = 10 * np.log10(psd_mt)

plt.close('all')
plt.plot(f, psd_mt)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power Spectrald Density (db/Hz)')
plt.title('Multitaper Power Spectrum \n %s' % raw_ts.ch_names[ch_sel])
plt.show()
示例#34
0
# --- Direct Spectral Estimator
freqs, d_sdf = alg.periodogram(ar_seq)
dB(d_sdf, d_sdf)

# --- Welch's Overlapping Periodogram Method via mlab
mlab_sdf, mlab_freqs = pp.mlab.psd(ar_seq, NFFT=N)
mlab_freqs *= np.pi / mlab_freqs.max()
mlab_sdf = mlab_sdf.squeeze()
dB(mlab_sdf, mlab_sdf)


### Taper Bandwidth Adjustments
NW = 4

# --- Regular Multitaper Estimate
f, sdf_mt, nu = alg.multi_taper_psd(ar_seq, width=NW, adaptive=False, jackknife=False)
dB(sdf_mt, sdf_mt)
# OK.. grab the number of tapers used from here
Kmax = nu[0] / 2

# --- Adaptively Weighted Multitapter Estimate
# -- Adaptive weighting from Thompson 1982, or Percival and Walden 1993
f, adaptive_sdf_mt, nu = alg.multi_taper_psd(ar_seq, width=NW, adaptive=True, jackknife=False)
dB(adaptive_sdf_mt, adaptive_sdf_mt)

# --- Jack-knifed intervals for regular weighting-----------------------------
# currently returns log-variance
_, _, jk_var = alg.multi_taper_psd(ar_seq, width=NW, adaptive=False, jackknife=True)

# the Jackknife mean is approximately distributed about the true log-sdf
# as a Student's t distribution with variance jk_var ... but in
def MTM_specgram(data,movingwin=[0.2, 0.050],**kwargs):
	'''modeled from mtspecgramc.m 

	data: 			format: time x trials (i.e. data[:,0] is 1 trial)
	movingwin:		 is in the format of [window, window_step] e.g. [0.2, 0.05] sec
	kwargs: 
		tapers:		in the form of [TW, K] e.g. [5/2 5]
		pad: 		(padding factor for the FFT) - optional (can take values -1,0,1,2...). 
                     -1 corresponds to no padding, 0 corresponds to padding
                     to the next highest power of 2 etc.
 			      	 e.g. For N = 500, if PAD = -1, we do not pad; if PAD = 0, we pad the FFT
 			      	 to 512 points, if pad=1, we pad to 1024 points etc.
 			      	 Defaults to 0.
 		Fs:			(sampling frequency) 
 		fpass:		(frequency band to be used in the calculation in the form fmin fmax])- optional. 
                    Default all frequencies between 0 and Fs/2
		trialave    (average over trials/channels when 1


	 Output:
        S       (spectrum in form time x frequency x channels/trials if trialave=0; 
                in the form time x frequency if trialave=1)
        t       (times)
        f       (frequencies)

	'''

	if 'tapers' in kwargs.keys():
		tapers = kwargs['tapers']
	else:
		tapers = [5/2, 5]

	if 'pad' in kwargs.keys():
		pad = kwargs['pad']
	else:
		pad = 0

	if 'Fs' in kwargs.keys():
		Fs = kwargs['Fs']
	else:
		Fs = 1000

	if 'fpass' in kwargs.keys():
		fpass = kwargs['fpass']
	else:
		fpass = [0, 100]

	if 'trialave' in kwargs.keys():
		trialave = kwargs['trialave']
	else:
		trialave = 0

	num_trials = data.shape[1]

	N = data.shape[0] #ms of trials

	Nwin=round(Fs*movingwin[0])
	Nstep=round(Fs*movingwin[1])

	t_power = 1
	while 2**t_power < Nwin:
		t_power +=1
	nfft = 2**(t_power+pad)

	#f=getfgrid(Fs,nfft,fpass)wi
	#f = np.arange(0,501,5)
	winstart=np.arange(0,N-Nwin,Nstep)
	nw=len(winstart)
	
	#Dimensions of S: trials x num_win (t) x f

	for n in range(nw):

		datawin=data[int(winstart[int(n)]):int(winstart[int(n)])+int(Nwin),:].T
		if  datawin.shape[1] < nfft :
			dat = np.zeros(( datawin.shape[0], nfft ))
			pad = (nfft - datawin.shape[1])
			if pad%2: #Odd:
				pad1 = pad2 = np.floor(pad/2.)
			
			elif not pad%2:
				pad1 = pad2 = pad/2
			
			dat[:, pad1:(datawin.shape[1]+pad2)] = datawin
		
		elif nfft == datawin.shape[1]:
			dat = datawin
		
		else:
			raise Exception("Not implemented yet...")

		if 'small_f_steps' in kwargs.keys():
			f, psd_est, nu = tsa.multi_taper_psd(dat,Fs=Fs, NFFT=nfft)
			
		else:
			f, psd_est, nu = tsa.multi_taper_psd(dat,Fs=Fs)

		if n==0:
			S = np.zeros(( num_trials, nw, len(f[f<fpass[1]]) ))

		#print len(f), psd_est.shape, len(nu), S.shape, len(f<fpass[1])
		S[:,n,:] = psd_est[:,f<fpass[1]]

	t=(winstart+round(Nwin/2))/float(Fs)

	if trialave:
		S = np.mean(S,axis=0)

	return S, f, t
示例#36
0
import matplotlib.pyplot as plt

import nitime.utils as utils
import nitime.timeseries as ts
import nitime.viz as viz
from nitime import algorithms as tsa
import nitime.utils as tsu

signal = np.loadtxt(open("../build/signal.csv", "rb"), delimiter=",", skiprows=1)

restored = np.loadtxt(open("../build/restored.csv", "rb"), delimiter=",", skiprows=1)
inp_sampling_rate = 1000.0
lb = 0  # Hz
ub = 1000  # Hz

_, signal_spectra, _ = tsa.multi_taper_psd(signal, Fs=inp_sampling_rate, BW=None, adaptive=True, low_bias=True)
_, noise_spectra, _ = tsa.multi_taper_psd(
    restored - signal, Fs=inp_sampling_rate, BW=None, adaptive=True, low_bias=True
)


freqs = np.linspace(0, inp_sampling_rate / 2, signal.shape[-1] / 2 + 1)

f = plt.figure()
ax = f.add_subplot(1, 2, 1)
ax_snr_info = f.add_subplot(1, 2, 2)
lb_idx, ub_idx = tsu.get_bounds(freqs, lb, ub)
freqs = freqs[lb_idx:ub_idx]
snr = signal_spectra / noise_spectra

ax.plot(freqs, np.log(signal_spectra[lb_idx:ub_idx]), label="Signal")
示例#37
0
def allpsd(home,project_name,run_name,run_number,GF_list,d_or_s,v_or_d,decimate,lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''
    
    from numpy import genfromtxt,where,log10,savez
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate 
    import nitime.algorithms as tsa
    
    #Decide what I'm going to work on
    sta=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=0,dtype='S')
    gf=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=[4,5],dtype='f')
    datapath=home+project_name+'/data/waveforms/'
    synthpath=home+project_name+'/output/inverse_models/waveforms/'
    outpath=home+project_name+'/analysis/frequency/'
    if v_or_d.lower()=='d':
        kgf=0 #disp
        datasuffix='kdisp'
        synthsuffix='disp'
    elif v_or_d.lower()=='v':
        kgf=1 #disp
        datasuffix='kvel'
        synthsuffix='vel'
    if d_or_s.lower()=='d': #We're working on observed data
        path=datapath
        suffix=datasuffix
    else: #We're looking at syntehtics from a certain run
        path=synthpath
        suffix=synthsuffix
    i=where(gf[:,kgf]==1)[0]
    for k in range(len(i)):
        print 'Working on '+sta[i[k]]
        if d_or_s.lower()=='d': #Read data
            n=read(path+sta[i[k]]+'.'+suffix+'.n')
            e=read(path+sta[i[k]]+'.'+suffix+'.e')
            u=read(path+sta[i[k]]+'.'+suffix+'.u')
            outname=sta[i[k]]+'.'+suffix+'.psd'
            if lowpass!=None:
                fsample=1./e[0].stats.delta
                e[0].data=lfilter(e[0].data,lowpass,fsample,10)
                n[0].data=lfilter(n[0].data,lowpass,fsample,10)
                u[0].data=lfilter(u[0].data,lowpass,fsample,10)
            if decimate!=None:
                n[0]=stdecimate(n[0],decimate)
                e[0]=stdecimate(e[0],decimate)
                u[0]=stdecimate(u[0],decimate)
        else: #Read synthetics
            n=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.n.sac')
            e=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.e.sac')
            u=read(path+run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.u.sac')
            outname=run_name+'.'+run_number+'.'+sta[i[k]]+'.'+suffix+'.psd'
        #Compute spectra
        fn, npsd, nu = tsa.multi_taper_psd(n[0].data,Fs=1./n[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        fe, epsd, nu = tsa.multi_taper_psd(e[0].data,Fs=1./e[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        fu, upsd, nu = tsa.multi_taper_psd(u[0].data,Fs=1./u[0].stats.delta,adaptive=True,jackknife=False,low_bias=True)
        #Convert to dB
        npsd=10*log10(npsd)
        epsd=10*log10(epsd)
        upsd=10*log10(upsd)
        #Write to file
        savez(outpath+outname,fn=fn,fe=fe,fu=fu,npsd=npsd,epsd=epsd,upsd=upsd)
示例#38
0
def get_spectrum(v, N, dt=None, method="welch", detrend=False, **kwargs):
    """Compute a lagged correlation between two time series
    These time series are assumed to be regularly sampled in time
    and along the same time line.

    Parameters
    ----------

        v: ndarray, pd.Series
            Time series, the index must be time if dt is not provided

        N: int
            Length of the output

        dt: float, optional
            Time step

        method: string
            Method that will be employed for spectral calculations.
            Default is 'welch'

        detrend: str or function or False, optional
            Turns detrending on or off. Default is False.

    See:
        - https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.periodogram.html
        - https://krischer.github.io/mtspec/
        - http://nipy.org/nitime/examples/multi_taper_spectral_estimation.html
    """
    if v is None:
        _v = np.random.randn(N)
    else:
        _v = v.iloc[:N]
    if dt is None:
        dt = _v.reset_index()["index"].diff().mean()

    if detrend and not method == "welch":
        print("!!! Not implemented yet except for welch")
    if method == "welch":
        from scipy import signal

        dkwargs = {
            "window": "hann",
            "return_onesided": False,
            "detrend": detrend,
            "scaling": "density",
        }
        dkwargs.update(kwargs)
        f, E = signal.periodogram(_v, fs=1 / dt, axis=0, **dkwargs)
    elif method == "mtspec":
        from mtspec import mtspec

        lE, f = mtspec(data=_v,
                       delta=dt,
                       time_bandwidth=4.0,
                       number_of_tapers=6,
                       quadratic=True)
    elif method == "mt":
        import nitime.algorithms as tsa

        dkwargs = {
            "NW": 2,
            "sides": "twosided",
            "adaptive": False,
            "jackknife": False
        }
        dkwargs.update(kwargs)
        lf, E, nu = tsa.multi_taper_psd(_v, Fs=1 / dt, **dkwargs)
        f = fftfreq(len(lf)) * 24.0
        # print('Number of tapers = %d' %(nu[0]/2))
    return pd.Series(E, index=f)
示例#39
0
def mtm(ys,
        ts,
        NW=None,
        BW=None,
        detrend=None,
        sg_kwargs=None,
        gaussianize=False,
        standardize=False,
        adaptive=False,
        jackknife=True,
        low_bias=True,
        sides='default',
        nfft=None):
    ''' Retuns spectral density using a multi-taper method.


    Based on the function in the time series analysis for neuroscience toolbox: http://nipy.org/nitime/api/generated/nitime.algorithms.spectral.html

    Parameters
    ----------

    ys : array
        a time series
    ts : array
        time axis of the time series
    NW : float
        The normalized half-bandwidth of the data tapers, indicating a
        multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4.
    BW : float
        The sampling-relative bandwidth of the data tapers
    detrend : str
          If None, no detrending is applied. Available detrending methods:
              - None - no detrending will be applied (default);
              - linear - a linear least-squares fit to `ys` is subtracted;
              - constant - the mean of `ys` is subtracted
              - savitzy-golay - ys is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y.
              - emd - Empirical mode decomposition
      sg_kwargs : dict
          The parameters for the Savitzky-Golay filters. see pyleoclim.utils.filter.savitzy_golay for details.
      gaussianize : bool
          If True, gaussianizes the timeseries
      standardize : bool
          If True, standardizes the timeseries
      adaptive : {True/False}
          Use an adaptive weighting routine to combine the PSD estimates of
          different tapers.
      jackknife : {True/False}
          Use the jackknife method to make an estimate of the PSD variance
          at each point.
      low_bias : {True/False}
          Rather than use 2NW tapers, only use the tapers that have better than
          90% spectral concentration within the bandwidth (still using
          a maximum of 2NW tapers)
      sides : str (optional)   [ 'default' | 'onesided' | 'twosided' ]
          This determines which sides of the spectrum to return.
          For complex-valued inputs, the default is two-sided, for real-valued
          inputs, default is one-sided Indicates whether to return a one-sided
          or two-sided

    Returns
    -------

    res_dict : dict
        the result dictionary, including
        - freq (array): the frequency vector
        - psd (array): the spectral density vector

    See Also
    --------
    pyleoclim.utils.spectral.periodogram : Estimate power spectral density using a periodogram
    pyleoclim.utils.spectral.welch : Retuns spectral density using the welch method
    pyleoclim.utils.spectral.lomb_scargle : Return the computed periodogram using lomb-scargle algorithm
    pyleoclim.utils.spectral.wwz_psd : Return the psd of a timeseries using wwz method.
    pyleoclim.utils.filter.savitzy_golay : Filtering using Savitzy-Golay
    pyleoclim.utils.tsutils.detrend : Detrending method

    '''
    # preprocessing
    ts = np.array(ts)
    ys = np.array(ys)

    if len(ts) != len(ys):
        raise ValueError('Time and value axis should be the same length')

    # remove NaNs
    ys, ts = clean_ts(ys, ts)
    # check for evenly-spaced
    check = is_evenly_spaced(ts)
    if check == False:
        raise ValueError('For the MTM method, data should be evenly spaced')
    # preprocessing
    ys = preprocess(ys,
                    ts,
                    detrend=detrend,
                    sg_kwargs=sg_kwargs,
                    gaussianize=gaussianize,
                    standardize=standardize)

    # calculate sampling frequency fs
    dt = np.median(np.diff(ts))
    fs = 1 / dt

    # spectral analysis
    freq, psd, nu = nialg.multi_taper_psd(ys,
                                          Fs=fs,
                                          NW=NW,
                                          BW=BW,
                                          adaptive=adaptive,
                                          jackknife=jackknife,
                                          low_bias=low_bias,
                                          sides=sides,
                                          NFFT=nfft)  # call nitime func

    # fix the zero frequency point
    if freq[0] == 0:
        psd[0] = np.nan

    # output result
    res_dict = {
        'freq': np.asarray(freq),
        'psd': np.asarray(psd),
    }

    return res_dict
dB(welch_psd, welch_psd)

fig04 = plot_spectral_estimate(freqs, psd, (welch_psd,), elabels=("Welch",))


""" 

.. image:: fig/multi_taper_spectral_estimation_04.png


Next, we use the multi-taper estimation method. We estimate the spectrum:

"""

f, psd_mt, nu = tsa.multi_taper_psd(
    ar_seq, adaptive=False, jackknife=False
    )
dB(psd_mt, psd_mt)


"""

And get the number of tapers from here: 

"""

Kmax = nu[0]/2


"""
示例#41
0
#p     = d18O p for SPEEDY

# Remove Mean for all variables

echam_p = ECHAM_p - np.mean(ECHAM_p)
echam_piso = ECHAM_piso - np.mean(ECHAM_piso)
echam_ice = ECHAM_ice - np.mean(ECHAM_ice)
quel_data = quel2 - np.mean(quel2)
quel_speedy = quel - np.mean(quel)
p_speedy = p - np.mean(p)

#
# Compute Spectra For all variables

ef, epsd_mt, enu = tsa.multi_taper_psd(echam_p,
                                       Fs=1.0,
                                       adaptive=False,
                                       jackknife=False)
epf, eppsd_mt, epnu = tsa.multi_taper_psd(echam_piso,
                                          Fs=1.0,
                                          adaptive=False,
                                          jackknife=False)
eif, eipsd_mt, einu = tsa.multi_taper_psd(echam_ice,
                                          Fs=1.0,
                                          adaptive=False,
                                          jackknife=False)

queldf, queldpsd_mt, queldnu = tsa.multi_taper_psd(quel_data,
                                                   Fs=1.0,
                                                   adaptive=False,
                                                   jackknife=False)
quelsf, quelspsd_mt, quelsnu = tsa.multi_taper_psd(quel_speedy,
t=np.arange(1000,2005,1)
dt=1.0

# Load Age-perturbed data:
Bchron=np.load('Cave_Age_Realizations.npy')
# Remove Mean for all variables
Xpm=Bchron-np.mean(Bchron, axis=0)
#
ST=ST-np.mean(ST)
SP=SP-np.mean(SP)
Sd18Op=Sd18Op-np.mean(Sd18Op)
Sd18Os=Sd18Os-np.mean(Sd18Os)
Sd18Oc=Sd18Oc-np.mean(Sd18Oc)
# Compute Spectra For all variables

STf, STpsd_mt, STnu = tsa.multi_taper_psd(ST, Fs=1.0,adaptive=False, jackknife=False)
SPf, SPpsd_mt, SPnu = tsa.multi_taper_psd(SP, Fs=1.0,adaptive=False, jackknife=False)
Sd18Opf, Sd18Oppsd_mt, Sd18Opnu = tsa.multi_taper_psd(Sd18Op, Fs=1.0,adaptive=False, jackknife=False)
Sd18Osf, Sd18Ospsd_mt, Sd18Osnu = tsa.multi_taper_psd(Sd18Os, Fs=1.0,adaptive=False, jackknife=False)
Sd18Ocf, Sd18Ocpsd_mt, Sd18Osnu = tsa.multi_taper_psd(Sd18Oc, Fs=1.0,adaptive=False, jackknife=False)

Xpf=np.zeros((len(STf),len(Xpm[1])))
Xpsd_mt=np.zeros((len(STf),len(Xpm[1])))
Xpnu=np.zeros((len(STf),len(Xpm[1])))

for i in range(len(Xpm[1])):
    Xpf[:,i],Xpsd_mt[:,i],Xpnu[:,i]=tsa.multi_taper_psd(Xpm[:,i], Fs=1.0,adaptive=False, jackknife=False)


# Compute quantiles for spectra
示例#43
0
def source_spectra(home, project_name, run_name, run_number, rupt, nstrike,
                   ndip):
    '''
    Tile plot of subfault source-time functions
    '''
    from numpy import genfromtxt, unique, zeros, where, arange, savez, mean
    from mudpy.forward import get_source_time_function, add2stf
    import nitime.algorithms as tsa

    outpath = home + project_name + '/analysis/frequency/'
    f = genfromtxt(rupt)
    num = f[:, 0]
    nfault = nstrike * ndip
    #Get slips
    all_ss = f[:, 8]
    all_ds = f[:, 9]
    all = zeros(len(all_ss) * 2)
    iss = 2 * arange(0, len(all) / 2, 1)
    ids = 2 * arange(0, len(all) / 2, 1) + 1
    all[iss] = all_ss
    all[ids] = all_ds
    #Now parse for multiple rupture speeds
    unum = unique(num)
    #Count number of windows
    nwin = len(where(num == unum[0])[0])
    #Get rigidities
    mu = f[0:len(unum), 13]
    #Get rise times
    rise_time = f[0:len(unum), 7]
    #Get areas
    area = f[0:len(unum), 10] * f[0:len(unum), 11]
    for kfault in range(nfault):
        if kfault % 10 == 0:
            print('... working on subfault ' + str(kfault) + ' of ' +
                  str(nfault))
        #Get rupture times for subfault windows
        i = where(num == unum[kfault])[0]
        trup = f[i, 12]
        #Get slips on windows
        ss = all_ss[i]
        ds = all_ds[i]
        #Add it up
        slip = (ss**2 + ds**2)**0.5
        #Get first source time function
        t1, M1 = get_source_time_function(mu[kfault], area[kfault],
                                          rise_time[kfault], trup[0], slip[0])
        #Loop over windows
        for kwin in range(nwin - 1):
            #Get next source time function
            t2, M2 = get_source_time_function(mu[kfault], area[kfault],
                                              rise_time[kfault],
                                              trup[kwin + 1], slip[kwin + 1])
            #Add the soruce time functions
            t1, M1 = add2stf(t1, M1, t2, M2)
        #Convert to slip rate
        s = M1 / (mu[kfault] * area[kfault])
        #remove mean
        s = s - mean(s)
        #Done now compute spectra of STF
        fsample = 1. / (t1[1] - t1[0])
        freq, psd, nu = tsa.multi_taper_psd(s,
                                            Fs=fsample,
                                            adaptive=True,
                                            jackknife=False,
                                            low_bias=True)
        outname = run_name + '.' + run_number + '.sub' + str(kfault).rjust(
            4, '0') + '.stfpsd'
        savez(outpath + outname, freq=freq, psd=psd)
示例#44
0
def get_freq_spectrum(timeseries_data,
                      fsampling,
                      fmin=2,
                      fmax=45,
                      N=68,
                      downsample_factor=4):
    """[Filters timeseries_data with a band pass filter designed with cutoff frequencies [fmin, fmax],
        the filtered time series will be downsampled by a factor of downsample_factor with a low-pass
        filter. The downsampled time series is then transformed into the frequency domain using the
        multi-taper method]

    Args:
        timeseries_data ([type]): [N x t time series data, with N brain regions for source localized data or
                                   N channels for sensor data. Duration = t time points, or t/fs seconds]
        fsampling ([type]): [sampling frequency of timeseries_data, no default given because this will vary]
        downsample_factor ([type]): Defaults to 4. [The ratio to downsample data, for example, 600 Hz MEG data
                                    will be downsampled to 150 Hz. This will be used in the decimate function,
                                    which has a low-pass filter built in to eliminate harmonic contamination]
        fmin (int, optional): Defaults to 2. [low cutoff frequency]
        fmax (int, optional): Defaults to 45. [high cutoff frequency]
        N (int, optional): Defaults to 68. [number of regions or number of channels]

    Returns:
        Freq_data[type]: [power spectrum for all input regions/channels]
        f : frequency vector/bins for power spectrum
    """

    fs = fsampling
    fvec = np.linspace(fmin, fmax, 40)
    hbp = firls(
        101,
        np.array([0, 0.2 * fmin, 0.9 * fmin, fmax - 2, fmax + 5, 100]) * 2 /
        fs,
        desired=np.array([0, 0, 1, 1, 0, 0]),
    )  # for detrending, a bandpass
    lpf = np.array([1, 2, 5, 2, 1])
    lpf = lpf / np.sum(lpf)
    ind_del = (
        hbp.size
    )  # number of coefficients in hbp. Delete that number in beginning of signal due to filtering

    Freq_data = {}

    for key in list(timeseries_data.keys()):
        data = timeseries_data[key]
        data = data.astype(float)
        row = np.asarray(data)
        q = lfilter(hbp, 1, row)
        q = q[ind_del:-1]  # delete transient portions
        ds_q = decimate(q, downsample_factor, axis=0)
        f, psd, nu = tsa.multi_taper_psd(ds_q,
                                         Fs=fs / downsample_factor,
                                         NW=3,
                                         BW=1,
                                         adaptive=False,
                                         jackknife=False)
        Fdata = np.convolve(psd, lpf, mode="same")
        Freq_data[key] = Fdata

    assert (
        len(Freq_data) == N
    )  # make sure we have correct number of regions/channels in the spectra

    # ind_fmin = np.abs(f-fmin).argmin()
    # ind_fmax = np.abs(f-fmax).argmin()
    # frange = f[ind_fmin:ind_fmax]
    # FMEGrange = Freq_data[:,ind_fmin:ind_fmax]
    return Freq_data, f
示例#45
0
def allpsd(home, project_name, run_name, run_number, GF_list, d_or_s, v_or_d,
           decimate, lowpass):
    '''
    Compute PSDs for either all the synthetics fo a aprticular run or all the data
    '''

    from numpy import genfromtxt, where, log10, savez
    from obspy import read
    from mudpy.forward import lowpass as lfilter
    from mudpy.green import stdecimate
    import nitime.algorithms as tsa

    #Decide what I'm going to work on
    sta = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                     usecols=0,
                     dtype='S')
    gf = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                    usecols=[4, 5],
                    dtype='f')
    datapath = home + project_name + '/data/waveforms/'
    synthpath = home + project_name + '/output/inverse_models/waveforms/'
    outpath = home + project_name + '/analysis/frequency/'
    if v_or_d.lower() == 'd':
        kgf = 0  #disp
        datasuffix = 'kdisp'
        synthsuffix = 'disp'
    elif v_or_d.lower() == 'v':
        kgf = 1  #disp
        datasuffix = 'kvel'
        synthsuffix = 'vel'
    if d_or_s.lower() == 'd':  #We're working on observed data
        path = datapath
        suffix = datasuffix
    else:  #We're looking at syntehtics from a certain run
        path = synthpath
        suffix = synthsuffix
    i = where(gf[:, kgf] == 1)[0]
    for k in range(len(i)):
        print('Working on ' + sta[i[k]])
        if d_or_s.lower() == 'd':  #Read data
            n = read(path + sta[i[k]] + '.' + suffix + '.n')
            e = read(path + sta[i[k]] + '.' + suffix + '.e')
            u = read(path + sta[i[k]] + '.' + suffix + '.u')
            outname = sta[i[k]] + '.' + suffix + '.psd'
            if lowpass != None:
                fsample = 1. / e[0].stats.delta
                e[0].data = lfilter(e[0].data, lowpass, fsample, 10)
                n[0].data = lfilter(n[0].data, lowpass, fsample, 10)
                u[0].data = lfilter(u[0].data, lowpass, fsample, 10)
            if decimate != None:
                n[0] = stdecimate(n[0], decimate)
                e[0] = stdecimate(e[0], decimate)
                u[0] = stdecimate(u[0], decimate)
        else:  #Read synthetics
            n = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.n.sac')
            e = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.e.sac')
            u = read(path + run_name + '.' + run_number + '.' + sta[i[k]] +
                     '.' + suffix + '.u.sac')
            outname = run_name + '.' + run_number + '.' + sta[
                i[k]] + '.' + suffix + '.psd'
        #Compute spectra
        fn, npsd, nu = tsa.multi_taper_psd(n[0].data,
                                           Fs=1. / n[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        fe, epsd, nu = tsa.multi_taper_psd(e[0].data,
                                           Fs=1. / e[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        fu, upsd, nu = tsa.multi_taper_psd(u[0].data,
                                           Fs=1. / u[0].stats.delta,
                                           adaptive=True,
                                           jackknife=False,
                                           low_bias=True)
        #Convert to dB
        npsd = 10 * log10(npsd)
        epsd = 10 * log10(epsd)
        upsd = 10 * log10(upsd)
        #Write to file
        savez(outpath + outname,
              fn=fn,
              fe=fe,
              fu=fu,
              npsd=npsd,
              epsd=epsd,
              upsd=upsd)
                                         method=dict(this_method='welch',
                                                     NFFT=N))
welch_freqs *= (np.pi / welch_freqs.max())
welch_psd = welch_psd.squeeze()
dB(welch_psd, welch_psd)

fig04 = plot_spectral_estimate(freqs, psd, (welch_psd, ), elabels=("Welch", ))
"""

.. image:: fig/multi_taper_spectral_estimation_04.png

Next, we use the multitaper estimation method. We estimate the spectrum:

"""

f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=False, jackknife=False)
dB(psd_mt, psd_mt)
"""

And get the number of tapers from here:

"""

Kmax = nu[0] / 2
"""

We calculate a Chi-squared model 95% confidence interval 2*Kmax degrees of
freedom (see [Percival1993]_ eq 258)

"""