コード例 #1
0
  def compute_spectrum(self):
    #print "Signal length = " + str((self.signal_1).shape)

    (Pxx_1, freq) = mlab.psd(self.signal_1, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean, 
                             window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
    (Pxx_2, freq) = mlab.psd(self.signal_2, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean, 
                             window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
    # Taking 10*log10()  Convert to dB and compute total energy
    #self.amp_sum_in = 0.0
    #self.amp_dum_out = 0.0
    Pxx_1 = np.array([10*math.log(p,10) for p in Pxx_1])
    Pxx_2 = np.array([10*math.log(p,10) for p in Pxx_2])    

    #amp_sum_sub = abs(amp_sum_out - amp_sum_in)
    #energy = amp_sum_sub #which energy source to use?
    #self.energy = self.amp_sum_1
    #print 'Pxx_out shape=' + str(Pxx_1.shape)
    # Smooth in Frequency Domain (Moving Average in time domain)
    temp = np.reshape(Pxx_2-Pxx_1, (self.NSAMPLES/2 + 1,))
    sub_smoothed = cookb_signalsmooth.smooth(temp, window_len=61, window='flat'); #61 or 51

    #compute the SNR
    self.snr_list.append(self.SNR(sub_smoothed))

    if self.PLOT == 1:
      self.plot_graph(freq, Pxx_1, Pxx_2, sub_smoothed)

    return sub_smoothed
コード例 #2
0
ファイル: preprocessing.py プロジェクト: B-Rich/ICANN2011-MEG
def compute_psd(x, t_start=0, t_end=-1, NFFT=128, Fs=200, noverlap=None, f_min=0, f_max=-1):
    """Compute power spectral densities of a dataset 'x' which is
    organized as: trials X sensors X time.

    t_start, t_end = define a time window. Default: all the trial.
    NFFT = size of the FFT window.
    noverlap = amount of overlapping between windows.
    f_min, f_max = return a specific frequency window of PSD. Default: full range.

    Returns:

    x_psd = dataset organized as: trials X channels X PSD.
    freq = the actual frequencies of the PSD.
    """
    print "Computing PSD of each trial (%s) and for each channel (%s):" % (x.shape[0], x.shape[1])
    if noverlap is None:
        noverlap = NFFT - Fs * 0.100 # See van gerven and jensen 2009
    size = NFFT / 2 + 1
    f_idx = range(size)
    if f_min!=0 and f_max!=-1: # compute actual frequency interval size
        tmp, freq = mlab.psd(x[0, 0, t_start:t_end], NFFT=NFFT, Fs=Fs, noverlap=noverlap)
        f_idx = np.where(((freq>=f_min) & (freq<f_max)))[0]
        size = f_idx.size
    shape = (x.shape[0], x.shape[1], size)
    x_psd = np.zeros(shape)
    for trial in range(x.shape[0]):
        print "T",trial,
        sys.stdout.flush()
        for sensor in range(x.shape[1]):
            tmp, freq = mlab.psd(x[trial, sensor, t_start:t_end], NFFT=NFFT, Fs=Fs, noverlap=noverlap)
            x_psd[trial, sensor, :] = tmp.squeeze()[f_idx]
    print
    return x_psd, freq
コード例 #3
0
ファイル: test_mlab.py プロジェクト: Honglongwu/matplotlib
    def test_psd(self):
        for y, fstims in zip(self.y, self.fstimsall):
            Pxx1, freqs1 = mlab.psd(y, NFFT=self.NFFT,
                                    Fs=self.Fs,
                                    noverlap=self.noverlap,
                                    pad_to=self.pad_to,
                                    sides='default')
            np.testing.assert_array_equal(freqs1, self.freqss)
            for fstim in fstims:
                i = np.abs(freqs1 - fstim).argmin()
                self.assertTrue(Pxx1[i] > Pxx1[i+1])
                self.assertTrue(Pxx1[i] > Pxx1[i-1])

            Pxx2, freqs2 = mlab.psd(y, NFFT=self.NFFT,
                                    Fs=self.Fs,
                                    noverlap=self.noverlap,
                                    pad_to=self.pad_to,
                                    sides='onesided')
            np.testing.assert_array_equal(freqs2, self.freqss)
            for fstim in fstims:
                i = np.abs(freqs2 - fstim).argmin()
                self.assertTrue(Pxx2[i] > Pxx2[i+1])
                self.assertTrue(Pxx2[i] > Pxx2[i-1])

            Pxx3, freqs3 = mlab.psd(y, NFFT=self.NFFT,
                                    Fs=self.Fs,
                                    noverlap=self.noverlap,
                                    pad_to=self.pad_to,
                                    sides='twosided')
            np.testing.assert_array_equal(freqs3, self.freqsd)
            for fstim in fstims:
                i = np.abs(freqs3 - fstim).argmin()
                self.assertTrue(Pxx3[i] > Pxx3[i+1])
                self.assertTrue(Pxx3[i] > Pxx3[i-1])
コード例 #4
0
        def tb_stimulus():
            # pulse the reset
            yield reset.pulse(100)
            for ii in xrange(2):
                yield clock.posedge
                
            # chirp 1 (time response pictoral)
            print("   chirp 1 ...")
            samp_in = signal.chirp(np.arange(args.Nsamps/2)*1/args.Fs,
                                   8, .64, 480,
                                   method=u'logarithmic')*.94
            samp_in = np.concatenate(
                (samp_in,
                 np.array([ss for ss in reversed(samp_in[:-1])] )))
            samp_out = []
            fsamp_out=[]
            # input samples, save the output
            for ii in xrange(args.Nsamps-1):
                sig_in.next = int(np.floor(samp_in[ii]*(sMax)))
                yield clock.posedge
                samp_out.append(sig_out//float(sMax))
                fsamp_out.append(int(np.floor(samp_in[ii]*(sMax))))
            samp_out = np.array(samp_out)
            #fsamp_out = np.array(fsamp_out)
            #fsamp_out.save('fsamp_out.dat')
            fh=open('fsamp_out.dat','w')
            cPickle.dump(fsamp_out,fh)
            c = signal.lfilter(coef, 1, samp_in)
            sdiff = np.abs(c[:-2] - samp_out[2:])
            plt.figure(3); plt.plot(sdiff)
            #print(np.max(sdiff), np.mean(sdiff**2))
            #assert np.max(sdiff) < 1e-3, "error too large" 
            assert np.max(sdiff) > 1e-3, "error too large" 
            ia = np.concatenate((np.ones(args.Nflt/2)*.98, samp_in))
            fig,ax = plt.subplots(1)
            ax.plot(ia, 'b'); ax.plot(samp_out[1:], 'r'); ax.plot(c, 'y--')
            fig.savefig('__plot2.png')

            # chirp 2 (frequency response, more points)
            print("   chrip 2 ...")
            Nfft = 8*args.Nsamps
            samp_in = signal.chirp(np.arange(Nfft)*1/args.Fs,
                                   0.1, 1, 500)*.98
            samp_out = []
            for ii in xrange(Nfft):
                sig_in.next = int(np.floor(samp_in[ii]*(sMax)))
                yield clock.posedge
                samp_out.append(sig_out//float(sMax))
            samp_out = np.array(samp_out)
            Pi,fi = mlab.psd(samp_in)
            Po,fo = mlab.psd(samp_out)
            ax1.plot(pi*fi, 10*log10(abs(Po/Pi)), 'r')
            ax1.grid(True)
            fig1.savefig('__plot1.png')
            
            raise StopSimulation
コード例 #5
0
ファイル: iqnoise.py プロジェクト: ColumbiaCMB/kid_readout
def auto_auto_cross(a, b, sample_rate, NFFT=None, detrend=mlab.detrend_none, window=mlab.window_none, noverlap=None,
                    binned=True, bins_per_decade=30, **kwds):
    """
    Return estimates of the auto-spectral density of both real time series a and b, of their cross-spectral density, and
    the frequencies corresponding to these estimates.

    Parameters
    ----------
    a : ndarray(real)
        A real time series.
    b : ndarray(real)
        A real time series.
    sample_rate : float
        The sample rate of both time series.
    NFFT : int
        The number of samples to use for each FFT chunk; should be a power of two for speed; if None, a reasonable
        default is used.
    window  : callable
        A function that takes a complex time series as argument and returns a windowed time series.
    noverlap : int
        The number of samples to overlap in each chunk; if None, a value equal to half the NFFT value is used.
    detrend : callable
        A function that takes a complex time series as argument and returns a detrended time series.
    binned : bool
        If True, the result is binned using bin sizes that increase with frequency, and the bins at zero frequency and
        the Nyquist frequency are dropped.
    bins_per_decade : int
        The number of bins per decade; used only if binned is True.
    kwds : dict
        Additional keywords to pass to mlab.psd and mlab.csd.

    Returns
    -------
    f : ndarray(float)
        The frequencies corresponding to the data.
    S_aa : ndarray(float)
        The spectral density of a.
    S_bb : ndarray(float)
        The spectral density of b.
    S_ab : ndarray(complex)
        The cross-spectral density of a and b.
    """
    if NFFT is None:
        NFFT = int(2 ** (np.floor(np.log2(a.size)) - 3))
    if noverlap is None:
        noverlap = NFFT // 2
    S_aa, f = mlab.psd(a, Fs=sample_rate, NFFT=NFFT, detrend=detrend, window=window, noverlap=noverlap, **kwds)
    S_bb, f = mlab.psd(b, Fs=sample_rate, NFFT=NFFT, detrend=detrend, window=window, noverlap=noverlap, **kwds)
    S_ab, f = mlab.csd(a, b, Fs=sample_rate, NFFT=NFFT, window=window, detrend=detrend, noverlap=noverlap, **kwds)
    if binned:
        f = f[1:-1]
        S_aa = S_aa[1:-1]
        S_bb = S_bb[1:-1]
        S_ab = S_ab[1:-1]
        edges, counts, f, (S_aa, S_bb, S_ab) = binning.log_bin(f, bins_per_decade, S_aa, S_bb, S_ab)
    return AutoAutoCross(f, S_aa, S_bb, S_ab)
コード例 #6
0
def plot_example_psds(example,rate):
    """
    This function creates a figure with 4 lines to show the overall psd for 
    the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
    """
    plt.figure()

    ##YOUR CODE HERE   
   
    for i in range(0, len(example)):
        Pxx, freqs = m.psd(example[i], NFFT=512, Fs=rate)
        normalizedPxx = Pxx/sum(Pxx)
        plt.plot(freqs, normalizedPxx, label=rowname[i])
        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Normalized Power Spectral Density')        
    
        ## Inserted into plotting loop
        ## Possible Classifier - calculate average power-weighted frequency    
        sumPower = 0
        for j in range(0, len(normalizedPxx)):
            sumPower = sumPower + (normalizedPxx[j] * freqs[j])
    
        avgFreq = sumPower/len(freqs)
        print(rowname[i] + " average frequency = " + str(avgFreq))
        ##
        ##    

    plt.xlim(0,20)
    plt.legend(loc=0)
    
    ## Better Classifier - Cummulative Power Distribution
    ## find where cummulative power exceeds threshold %
    threshold = 0.95
    print(' ')
    print('Threshold set to ' + str(threshold*100)+'%')
   
    # iterate over each example data set
    for i in range(0, len(example)):
        Pxx, freqs = m.psd(example[i], NFFT=512, Fs=rate)
        
        # normalize PSD
        normalizedPxx = Pxx/sum(Pxx)
  
        # determine cummulative power distribution 
        sumPxx = 0    
        j = 0        
        while sumPxx <= threshold:
            sumPxx = sumPxx + normalizedPxx[j]
            j = j + 1    
     
        # stop when cummulative power exceeds threshold %
        print(rowname[i]+' - Threshold Met at Frequency = '+str(freqs[j])+' Hz')
                   
    return
コード例 #7
0
def test_full_spectral_helper():
    x = np.random.randn(2 ** 20)
    y = np.random.randn(2 ** 20)
    mlabPxx, fr = mlab.psd(x, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
    mlabPyy, fr = mlab.psd(y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
    mlabPxy, fr = mlab.csd(x, y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', np.ComplexWarning)
        fullPxx, fullPyy, fullPxy, freqs, t = full_spectral_helper(x, y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
    assert (np.allclose(mlabPxx, fullPxx.mean(1)))
    assert (np.allclose(mlabPyy, fullPyy.mean(1)))
    assert (np.allclose(mlabPxy, fullPxy.mean(1)))
コード例 #8
0
ファイル: sleepModule.py プロジェクト: troutbum/neuraldata
def plot_psds(data, rate, subject, condition, label_set, title):
    """
    Plots the frequency response for all 9 channels
    using the entire recording    
    """
    fig = plt.figure()   
   # common title
    fig.suptitle('Frequency Response ('+title
            +')'+subject+' '+condition, 
            fontsize=14, fontweight='bold')            
    # common ylabel
    fig.text(0.06, 0.5, 'Normalized Power Spectral Density', 
             ha='center', va='center', rotation='vertical',
             fontsize=14, fontweight='bold')
    # common xlabel
    fig.text(0.5, 0.05,'Frequency (Hz)',
             ha='center', va='center',fontsize=14, fontweight='bold')
  # use this to stack EEG, EOG, EMG on top of each other         
    sub_order = [1,4,7,10,2,5,3,6,9]    

    # determine max response to scale y-axis 
    maxY = 0
    for i in range(0, len(data)):
        Pxx, freqs = m.psd(data[i], NFFT=512, Fs=rate)
        normalizedPxx = Pxx/sum(Pxx)
        if normalizedPxx.max() > maxY:
            maxY = normalizedPxx.max()
                 
    # plot all subplots
    for i in range(0, len(data)):
        plt.subplot(4, 3, sub_order[i])  # 4 x 3 layout
        #plt.subplot(9, 1, i + 1)  # vertical 9 x 1 layout
        plt.subplots_adjust(hspace=.6)  # adds space between subplots
        
        Pxx, freqs = m.psd(data[i], NFFT=512, Fs=rate)
        normalizedPxx = Pxx/sum(Pxx)
        #plt.plot(freqs, normalizedPxx, label=label_set[ch])
        plt.bar(freqs, normalizedPxx, label=label_set[i],width=0.2)
        plt.axis([0,70,0,maxY])
        
        plt.title(label_set[i]) 
        #plt.xlabel('Frequency (Hz)')
        #plt.ylabel('Normalized Power Spectral Density')        
      
        ## Inserted into plotting loop
        ## Possible Classifier - calculate average power-weighted frequency    
#        sumPower = 0
#        for j in range(0, len(normalizedPxx)):
#            sumPower = sumPower + (normalizedPxx[j] * freqs[j])
#    
#        avgFreq = sumPower/len(freqs)
#        print(channel_name[i] + " average frequency = " + str(avgFreq))
    return     
コード例 #9
0
ファイル: loclib.py プロジェクト: sao-eht/lmtscripts
def fitsearch(z, x0=0, y0=0, s10=20., s20=20., th0=0, channel='b'):
	Fs = z.fs
	tp = z.__dict__[channel]
	# 512 is balance between freq resolution and averaging, good for 50 Hz
	(p, f) = psd(tp, NFFT=512, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1.
	p = p / z.fillfrac # account for zeros in stiched timeseries
	N = len(z.t) # original sequence length
	pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
	fac = np.zeros(pad)
	mpad = np.zeros(pad)
	bpad = np.zeros(pad)
	bpad[:N] = tp
	B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
	fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
	fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten)
	fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
	def snr(args):
		(xtest, ytest, s1test, s2test, thtest) = args
		mpad[:N] = ezmodel(z.x, z.y, xtest, ytest, s1test, s2test, thtest) # model signal
		M = np.fft.rfft(mpad)
		norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
		snr = np.sum((M * B * fac).real) / norm
		return -snr
	result = fmin(snr, (asec2rad(x0), asec2rad(y0), asec2rad(s10)/2.355, asec2rad(s20)/2.355, th0*np.pi/180.))
	print "x: %.1f" % rad2asec(result[0])
	print "y: %.1f" % rad2asec(result[1])
	print "s1: %.2f" % rad2asec(result[2]*2.355)
	print "s2: %.2f" % rad2asec(result[3]*2.355)
	print "th: %.2f" % (result[4] * 180./np.pi)
コード例 #10
0
ファイル: loclib.py プロジェクト: sao-eht/lmtscripts
def fitgrid(z, channel='b'):
	Fs = z.fs
	tp = z.__dict__[channel]
	# 512 is balance between freq resolution and averaging, good for 50 Hz
	(p, f) = psd(tp, NFFT=512, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1.
	p = p / z.fillfrac # account for zeros in stiched timeseries
	N = len(z.t) # original sequence length
	pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
	fac = np.zeros(pad)
	mpad = np.zeros(pad)
	bpad = np.zeros(pad)
	bpad[:N] = tp
	B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
	fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
	fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten)
	fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
	def makesnr(*args):
		(xtest, ytest, s1test, s2test, thtest) = args
		mpad[:N] = ezmodel(z.x, z.y, xtest, ytest, s1test, s2test, thtest) # model signal
		# mpad[:N] = model(z.x, z.y, xtest, ytest, fwhm=rad2asec(s1test)*2.355)
		M = np.fft.rfft(mpad)
		norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
		snr = np.sum((M * B * fac).real) / norm
		return snr
	(xx, yy, ss1, ss2, tt) = np.mgrid[-2:2, 12:16, 10:30, 10:20, 20:90:15]
	snrs = []
	pars = zip(xx.ravel(), yy.ravel(), ss1.ravel(), ss2.ravel(), tt.ravel())
	for (x, y, s1, s2, th) in pars:
		snrs.append(makesnr(asec2rad(x)/2, asec2rad(y)/2, asec2rad(s1/2.355), asec2rad(s2/2.355), th*np.pi/180.))
	snrs = np.array(snrs)
	ss = snrs.reshape(xx.shape)
	return ss
コード例 #11
0
ファイル: test_algorithms.py プロジェクト: slnovak/nitime
def test_psd_matlab():

    """ Test the results of mlab csd/psd against saved results from Matlab"""

    from matplotlib import mlab

    test_dir_path = os.path.join(nitime.__path__[0],'tests')
    
    ts = np.loadtxt(os.path.join(test_dir_path,'tseries12.txt'))
    
    #Complex signal! 
    ts0 = ts[1] + ts[0]*np.complex(0,1) 

    NFFT = 256;
    Fs = 1.0;
    noverlap = NFFT/2

    fxx, f = mlab.psd(ts0,NFFT=NFFT,Fs=Fs,noverlap=noverlap,
                      scale_by_freq=True)

    fxx_mlab = np.fft.fftshift(fxx).squeeze()

    fxx_matlab = np.loadtxt(os.path.join(test_dir_path,'fxx_matlab.txt'))

    npt.assert_almost_equal(fxx_mlab,fxx_matlab,decimal=5)
コード例 #12
0
    def __setup_bins(self):
        """
        Makes an initial dummy psd and thus sets up the bins and all the rest.
        Should be able to do it without a dummy psd..
        """
        dummy = np.ones(self.len)
        _spec, freq = mlab.psd(dummy, self.nfft, self.sampling_rate,
                               noverlap=self.nlap)

        # leave out first entry (offset)
        freq = freq[1:]

        per = 1.0 / freq[::-1]
        self.freq = freq
        self.per = per
        # calculate left/rigth edge of first period bin,
        # width of bin is one octave
        per_left = per[0] / 2
        per_right = 2 * per_left
        # calculate center period of first period bin
        per_center = math.sqrt(per_left * per_right)
        # calculate mean of all spectral values in the first bin
        per_octaves_left = [per_left]
        per_octaves_right = [per_right]
        per_octaves = [per_center]
        # we move through the period range at 1/8 octave steps
        factor_eighth_octave = 2 ** 0.125
        # do this for the whole period range and append the values to our lists
        while per_right < per[-1]:
            per_left *= factor_eighth_octave
            per_right = 2 * per_left
            per_center = math.sqrt(per_left * per_right)
            per_octaves_left.append(per_left)
            per_octaves_right.append(per_right)
            per_octaves.append(per_center)
        self.per_octaves_left = np.array(per_octaves_left)
        self.per_octaves_right = np.array(per_octaves_right)
        self.per_octaves = np.array(per_octaves)

        self.period_bins = per_octaves
        # mid-points of all the period bins
        self.period_bin_centers = np.mean((self.period_bins[:-1],
                                           self.period_bins[1:]), axis=0)

        self.store_freqi=[]
        self.store_freqf=[]
        for f in self.store_freqs:
        	ind = np.abs(np.array(freq) - f).argmin()
        	self.store_freqi.append(ind)
        	self.store_freqf.append(freq[ind])

        # write the array of frequencies
        if self.write_psd:
         	f = open('o-PSDtxt/freq.txt', 'w')
 #        	for item in self.freq:
 #        		f.write("%7.5e\n" % item)
         	freq_octaves = 1.0 / np.array(self.period_bins[::-1])
         	for i in range(len(self.period_bins)):
         		f.write("%7.5e\n" % freq_octaves[i])
         	f.close        
コード例 #13
0
ファイル: plotPSD.py プロジェクト: JustasB/ACnet2
def do_plot_files(filenames):
  if len(filenames) > 0:
      plotnum = 0
      for file in filenames:
          print file, plotnum
          print 'Reading %s' % file
          fp = open(file, 'r')
          count = len(fp.readlines())
          print count, " lines"
          fp.close()
          if (plotnum == 0):
              print plotnum
              tn = np.zeros(count)
              yn = np.zeros(count)
              print len(tn)
          fp = open(file, 'r')
          i=0
          for line in fp.readlines():
              data = line.split(" ")
              # Note that tn[i] is replaced, and yn[i] is addded to
              tn[i] = float(data[0]); yn[i] = yn[i] + float(data[1])
              i += 1
          plotnum += 1
  else:
    print "No files were specified for plotting!"
    print "Please give one or more filenames as arguments, e.g.\n"
    print "    plotSpectra EPSC_sum_0004sj.txt EPSC_sum_M0004*.txt \n"
    sys.exit()   

  # Now do the plotting of the averaged array data
  # Should check to be sure that the files are all in the same format

  dt = tn[1] - tn[0]
  # fourier sample rate
  fs = 1. / dt

  # Average the data over number of files
  yn /= plotnum

  npts = len(yn)
  startpt = int(0.2*fs)

  if (npts - startpt)%2!=0:
      startpt = startpt + 1

  yn = yn[startpt:]
  tn = tn[startpt:]
  nfft = len(tn)//4
  overlap = nfft//2

  print npts, nfft
  print startpt, len(yn)

  print 'Plottting average of ', plotnum, ' runs from series ', runid

  pxx,freqs=mlab.psd(yn,NFFT=nfft,Fs=fs,noverlap=overlap,window=mlab.window_none)
  pxx[0] = 0.0
  ax2.plot(freqs,pxx)
  print freqs[0], freqs[1], freqs[10], freqs[400]
  print pxx[0], pxx[1], pxx[10], pxx[400]
コード例 #14
0
def binned2pxx(binned, Fs=1000., NFFT=256, noverlap=None,
    windw=mlab.window_hanning, detrend=mlab.detrend_mean, freq_high=100):
    """Given 2d array of binned times, return Pxx
    
    Helper function to ensure faking goes smoothly
    binned: 2d array, trials on rows, timepoints on cols
    rest : psd_kwargs. noverlap defaults to NFFT/2
    
    Will Pxx each trial separately with psd_kwargs, then slice out 
    frequencies below freq_high and return.
    """
    # Set up psd_kwargs
    if noverlap is None:
        noverlap = NFFT / 2
    psd_kwargs = {'Fs': Fs, 'NFFT': NFFT, 'noverlap': noverlap, 
        'detrend': detrend, 'window': windw}    
    
    # Pxx each trial
    ppxx_l = []
    for row in binned:
        ppxx, freqs = mlab.psd(row, **psd_kwargs)
        ppxx_l.append(ppxx)
    
    # Truncate unnecessary frequencies
    if freq_high:
        topbin = np.where(freqs > freq_high)[0][0]
        freqs = freqs[1:topbin]
        Pxx = np.asarray(ppxx_l)[:, 1:topbin]       
    return Pxx, freqs
コード例 #15
0
ファイル: eegws-api.py プロジェクト: al1na/EEGws-api
def calculate_psd(recording_id):
    electrode = request.args.get("electrode")
    recording = find_public_recording_by_id(recording_id)
    if electrode not in ELECTRODES or recording is None:
        abort(404)
    #print recording['sampling_rate']
    #print recording['electrodes'][electrode]
    psd = mlab.psd(recording['electrodes'][electrode], Fs=recording['sampling_rate'], NFFT=2048)
    plt.figure(figsize=(6, 8))
    #plt.plot(psd[1], psd[0], 'b-')
    plt.plot(psd[1][4:], psd[0][4:], 'b-')
    print "psd freqs index 4 up"
    print psd[1][4:]
    print "psd power index 4 up"
    print psd[0][4:]
    freqs = psd[1]
    print "freqs len " + str(len(freqs))
    print freqs
    freqsgtone = np.where(freqs >= 1)
    print "freqsgtone len " + str(len(freqsgtone))
    print freqsgtone
    print "power len " + str(len(psd[0]))
    print psd[0]
    plt.xlabel("FREQUENCY")
    plt.ylabel("POWER SPECTRAL DENSITY")
    plot_filename = "psd_" + recording_id + "_" + electrode + str(datetime.datetime.now().isoformat()) + ".png"
    plt.savefig(plot_filename, dpi=150)
    return send_from_directory(app.root_path, plot_filename)
コード例 #16
0
ファイル: noise_temp.py プロジェクト: ColumbiaCMB/kid_readout
 def noise_data(self, chan=0, m=1, plot=False):
     NFFT = self.NFFT
     N = self.N
     lendata = self.lendata
     Fs = self.Fs
     pst = np.zeros(NFFT)
     cnt = 0
     while cnt < m:
         data, addr = self.r.get_data_udp(N*lendata, demod=True)
         if self.use_r2 and ((not np.all(np.diff(addr)==2**21/N)) or data.shape[0]!=256*lendata):
             print "bad"
         else:
             ps, f = mlab.psd(data[:,chan], NFFT=NFFT, Fs=Fs/self.r.nfft)
             pst += ps
             cnt += 1
     pst /= cnt
     pst = 10.*np.log10(pst)
     if plot:
         ind = f > 0
         pl.semilogx(f[ind], pst[ind])
         pl.xlabel('Hz')
         pl.ylabel('dB/Hz')
         pl.title('chan data psd')
         pl.grid()
         pl.show()
     return f, pst
コード例 #17
0
ファイル: timeseries.py プロジェクト: pbranson/soda
    def psd(self, plot=True, nbandavg=1, scale=1.,**kwargs):
        """
        Power spectral density
        
        nbandavg = Number of fft bins to average
        scale = scale factor (for plotting only)
        """
        
        if self.isequal==False and self.VERBOSE:
            print 'Warning - time series is unequally spaced consider using lomb-scargle fft'
        

        NFFT = int(2**(np.floor(np.log2(self.ny/nbandavg)))) # Nearest power of 2 to length of data
        # Remove the mean and nan's
        y = self.y - self.y.mean()
        y[y.mask] = 0.

        Pyy,frq = mlab.psd(y,\
                Fs=2*np.pi/self.dt,\
                NFFT=NFFT,\
                window=mlab.window_hanning,\
                scale_by_freq=True)
        
        if plot:
            plt.loglog(frq,Pyy*scale,**kwargs)
            plt.xlabel('Freq. [$radians s^{-1}$]')
            plt.ylabel('PSD')
            plt.grid(b=1)
        
        return Pyy, frq
コード例 #18
0
def spectrum(data, sampling, NFFT=256, overlap=0.5,\
             window='hanning', detrender=mlab.detrend_linear,\
             sides='onesided', scale='PSD'):

  numpoints  = len(data)
  numoverlap = int(sampling * (1.0 - overlap))

  if isinstance(window,str):
    window=window.lower()

  win = signal.get_window(window, NFFT)

  # calculate PSD with given parameters
  spec,freq = mlab.psd(data, NFFT=NFFT, Fs=sampling, noverlap=numoverlap,\
                       window=win, sides=sides, detrend=detrender)

  # rescale data to meet user's request
  scale = scale.lower()
  if scale == 'asd':
    spec = numpy.sqrt(spec) * numpy.sqrt(2 / (sampling*sum(win**2)))
  elif scale == 'psd':
    spec *= 2/(sampling*sum(win**2))
  elif scale == 'as':
    spec = nump.sqrt(spec) * numpy.sqrt(2) / sum(win)
  elif scale == 'ps':
    spec = spec * 2 / (sum(win)**2)

  return freq, spec.flatten()
コード例 #19
0
def classify_epoch(epoch,rate):
    """
    This function returns a sleep stage classification (integers: 1 for NREM
    stage 1, 2 for NREM stage 2, and 3 for NREM stage 3/4) given an epoch of 
    EEG and a sampling rate.
    """
    Pxx, freqs = m.psd(epoch,NFFT=256,Fs=rate)
    nPxx = Pxx/float(sum(Pxx))
    delta_f = plt.find((0<freqs) & (freqs <=3))
 #   delta_power = sum(Pxx[delta_f])
    ndelta_power = sum(nPxx[delta_f])
    spindles_f = plt.find((11 <= freqs) & (freqs <= 15))
 #   spindle_power = sum(Pxx[spindles_f])
    nspindle_power = sum(nPxx[spindles_f])
  #  ratio = spindle_power/delta_power
    
    if (ndelta_power > 0.8): #suggests stage 3
        stage = 3
    else:  
        if (nspindle_power > 0.03): #suggests stage 2
            stage = 2
        else: 
            stage = 1
    
    return stage
コード例 #20
0
ファイル: worker.py プロジェクト: tapczan666/GUI_Analyzer
 def work(self, data):
     nfft = self.nfft
     length = self.length
     slice_length = self.slice_length
     samp_rate = self.samp_rate
     offset = self.offset
     index = data[0]
     center_freq = data[1]
     samples = data[2]
     if len(samples)>2*length:
         samples = samples[:2*length]
     trash = length - slice_length
     #print len(samples)
     samples = samples - offset
     power, freqs = psd(samples, NFFT=nfft, pad_to=length, noverlap=self.nfft/2, Fs=samp_rate/1e6, detrend=mlab.detrend_none, window=mlab.window_hanning, sides = 'twosided')
     power = np.reshape(power, len(power))            
     freqs = freqs + center_freq/1e6  
     power = power[trash/2:-trash/2]
     freqs = freqs[trash/2:-trash/2]
     power = 10*np.log10(power) 
     power = power - self.correction
     out = [index, power, freqs]
     
 
     return out
コード例 #21
0
ファイル: helper.py プロジェクト: jiang0131/pr2_pretouch
def plot_from_rawdata(data1, data2, rate, Ns=NS, overlap_ratio=OVERLAP_RATIO):
    '''
    Given two time-series data and the sampling rates, plot the frequency spectrums
    '''
    nOverlap = Ns * overlap_ratio
    (Pxx_in, freq) = mlab.psd(data1, NFFT=Ns, Fs=rate, 
                              detrend=mlab.detrend_mean, window=mlab.window_hanning,
                              noverlap=nOverlap, sides='onesided')
    (Pxx_out, freq) = mlab.psd(data2, NFFT=Ns, Fs=rate, 
                               detrend=mlab.detrend_mean, window=mlab.window_hanning,
                               noverlap=nOverlap, sides='onesided')
    Pxx_in = log(Pxx_in)
    Pxx_out = log(Pxx_out)
    Pxx_diff = Pxx_out - Pxx_in
    Pxx_diff_smoothed = cookb_signalsmooth.smooth(Pxx_diff.ravel(), window_len=51, window='flat'); 
    plot_graph(freq, Pxx_in, Pxx_out, Pxx_diff, Pxx_diff_smoothed)    
コード例 #22
0
def nonna_blrms_fft(signal, bands, infs, Tout, Tfft):
        """
        Compute the band-limited RMS of the input signal using FFT.

        Input arguments:
        signal = the input signal
        bands = corner frequencies of the bands, can be multiple. Ex. [[10, 20]] or [[10,20], [30,40]]
        infs = sampling frequency of signal
        Tout = time for each averaged PSD (inverse of output sampling)
	       the function returns a sample every Tout
       	Tfft = duration (in seconds) of each FFT
        """

	# define number of samples for each FFT and for each time slice
	Nfft = infs * Tfft
	Npt = infs * Tout
	Nsamples = int(len(signal)/Npt)
	# determine the satrting point of each data segment
	idx = numpy.arange(0,Nsamples) * Npt
	# initialize time vector and BLRMS vector
	t = numpy.arange(0,Nsamples) * Tout
	Nbands = len(bands)
	b = numpy.zeros((len(t), Nbands))

	# loop over each segment of data
	for i,j in enumerate(idx):
		# compute PSD
		sx, fr = psd(signal[j:j+Npt], Fs=infs, noverlap=Nfft/2, NFFT=Nfft)
		# loop over all bands
		for k in range(Nbands):
			# sum all bins in the correct frequency range, and multiply by frequency bin width
			b[i, k] = fr[1]*numpy.sum(sx[(fr>bands[k][0]) & (fr<bands[k][1])])
	
	# done
	return t, b
コード例 #23
0
ファイル: rayleigh.py プロジェクト: chase-ok/cmon
def compute_rayleigh(time, stride=0.1, num_strides=10):
    duration = stride*num_strides
    data = get_cache(STRAIN_FRAMETYPE)\
           .fetch(STRAIN_CHANNEL, time, time+duration)

    rate = 1.0/data.metadata.dt
    chunk_size = int(len(data)/num_strides)

    shared_freq = None
    powers = np.empty((num_strides, chunk_size//2 + 1), np.float64)
    for i in range(num_strides):
        subset = data[i*chunk_size:(i+1)*chunk_size]
        power, freq = mlab.psd(subset, Fs=rate, NFFT=int(rate*stride))
        
        if shared_freq is None:
            shared_freq = freq
        else:
            assert (shared_freq == freq).all()
        powers[i, :] = power.T

    rs = powers.std(axis=0)/powers.mean(axis=0)
    return Frame(time=time,
                 stride=stride,
                 num_strides=num_strides,
                 frequencies=shared_freq,
                 rs=rs)
コード例 #24
0
def plot_example_psds(example,rate):
    """
    This function creates a figure with 4 lines to show the overall psd for 
    the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
    """
    
    plt.figure()
    for idx in range(len(example)): 
        plt.subplot(2,2,idx+1)
        x = np.linspace(0, len(example[idx])/rate,len(example[idx]) )
        plt.plot(x,example[idx])
        plt.title('Phase'+str(idx))
    

    plt.figure()
    for idx in range(len(example)):
        Pxx, freqs = plt.psd(example[idx], NFFT=512, Fs=rate)
        plt.xlim((0,70))
    plt.legend(('REM','NREM stage 1','NREEM stage 2','NREEM stage 3/4'), loc='upper right',prop={'size':8})
      
      
    plt.figure()
    for idx in range(len(example)):
        Pxx, freqs = m.psd(example[idx], NFFT=512, Fs=rate)       
        index30=np.nonzero(freqs==30)[0][0]      
        pxx = 10*np.log10(Pxx[0:index30+1])
        normalized_pxx = pxx/sum(abs(pxx))
        plt.plot(freqs[0:index30+1], normalized_pxx)
    plt.legend(('REM','NREM stage 1','NREEM stage 2','NREEM stage 3/4'), loc='upper right',prop={'size':8})
  
    
    ##YOUR CODE HERE    
    
    return
コード例 #25
0
ファイル: traceRoutines.py プロジェクト: peltonen/touche
def psd(signal, sampling_frequency, frequency_resolution,
        high_frequency_cutoff=None,  **kwargs):
    """This function wraps matplotlib.mlab.psd to provide a more intuitive 
    interface.
    
    :param: signal - the input signal (a one dimensional array)
    :param: sampling_frequency - the sampling frequency of signal (i.e.: 10000)
    :param: frequency_resolution - the desired frequency resolution of the specgram.
        this is the guaranteed worst frequency resolution.
    :param: high_frequency_cutoff - optional high freq. cutoff.  resamples data 
        to this value and then uses that for Fs parameter
    :param: **kwargs - Arguments passed on to matplotlib.mlab.psd
    :returns: - tuple of two numpy arrays, power and freqs
    """
    if (high_frequency_cutoff is not None 
        and high_frequency_cutoff < sampling_frequency):
        resampled_signal = resample_signal(signal, sampling_frequency, 
                                                    high_frequency_cutoff)
    else:
        high_frequency_cutoff = sampling_frequency
        resampled_signal = signal
    num_data_samples = len(resampled_signal)
    NFFT= find_NFFT(frequency_resolution, high_frequency_cutoff, 
                    force_power_of_two=True) 
    
    return mlab.psd(resampled_signal, NFFT=NFFT, 
                    Fs=high_frequency_cutoff, 
                    noverlap=0, **kwargs)
コード例 #26
0
ファイル: noise_temp.py プロジェクト: ColumbiaCMB/kid_readout
 def take_psd(self, x, y, db=True, NFFT=None):
     if NFFT is None:
         NFFT=self.NFFT
     ps, f = mlab.psd(x+1j*y, NFFT=NFFT, Fs=self.Fs)
     if db:
         ps = 10.*np.log10(ps) 
     return f, ps
コード例 #27
0
    def update(self, *args):
        # save center freq. since we're gonna be changing it
        start_fc = self.sdr.fc

        # prepare space in buffer
        # TODO: use indexing to avoid recreating buffer each time
        self.image_buffer = np.roll(self.image_buffer, 1, axis=0)

        for scan_num, start_ind in enumerate(range(0, NUM_SCANS_PER_SWEEP*NFFT, NFFT)):
            self.sdr.fc += self.sdr.rs*scan_num

            # estimate PSD for one scan
            samples = self.sdr.read_samples(NUM_SAMPLES_PER_SCAN)
            samples = samples[::ZOOM]
            psd_scan, f = psd(samples, NFFT=NFFT)

            self.image_buffer[0, start_ind: start_ind+NFFT] = 10*np.log10(psd_scan)

        # plot entire sweep
        self.image.set_array(self.image_buffer)

        # restore original center freq.
        self.sdr.fc = start_fc

        return self.image,
コード例 #28
0
def plot_example_psds(example,rate):
    """
    This function creates a figure with 4 lines to show the overall psd for 
    the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
        
    """

    sleep_stages = ['REM sleep', 'Stage 1 NREM sleep', 'Stage 2 NREM sleep', 'Stage 3 and 4 NREM sleep'];    
    
    plt.figure()
    
    ##YOUR CODE HERE    
    for i in range( len( example[:,0]) ):    
        
        # Apply power spectral density using a Fast Fourier Transform 
        # to generate blocks of data
        psd, frequency = m.psd(example[i, :], NFFT = 512, Fs = rate )
        
        # normalize frequency
        psd = psd / np.sum(psd)
        
        # plot sleep stages
        plt.plot(frequency, psd, label = sleep_stages[i])
        
        # add legend
        plt.ylabel('Normalized Power Spectral Density')
        plt.xlabel('Frequency (Hz)')
    
    plt.xlim(0,20)
    plt.legend(loc=0)
    plt.title('Overall ower Spectral Density for Sleep Stages')
    
    return
コード例 #29
0
ファイル: physionoise.py プロジェクト: brainmap/physionoise
def spectral_analysis(filtered_signal, sample_rate, signal_name="unknown"):
	"""Runs a quick spectral analysis of a filtered signal, returns a bunch of
	information about it."""
	x           = int( (log(sample_rate) - log(0.01)) / log(2) )
	NFFT        = int(2**x)
	nooverlap   = NFFT / 2
	pxx, freqs  = psd( filtered_signal,
		NFFT=NFFT, Fs=sample_rate, noverlap=nooverlap,
		detrend=detrend_linear, window=window_hanning
	)
	i = pxx.argmax() # index of the maximum power frequency
	max_frequency = freqs[i]
	max_power     = pxx[i]
	period        = 1.0 / max_frequency
	
	if max_frequency == 0:
		logger.warning("%s power peak at zero. Threshold at %f Hz" % (signal_name, freqs[4]))
		i = pxx[5:].argmax()
		max_frequency = freqs[i+5]
		max_power     = pxx[i+5]
		period        = 1.0 / max_frequency
	
	print "Butterworth Filter: %s signal" % signal_name
	report_value("Spectral peak (Hz)", max_frequency)
	report_value("Peak power density", max_power)
	report_value("Spectral period (s)", period)
	
	return(pxx, freqs, max_frequency, max_power, period)
コード例 #30
0
 def __init__(self, frame, fft):
     self.lpc = scikits.talkbox.lpc(frame, 12)
     self.psd = pylab.psd(frame)[0]
     self.transforms = {
                         "energy":self.energy,
                         "zero.crossings":self.zero_crossings,
                         #"dominant.frequency":self.dominant_frequency,
                         #"full.energy":self.full_energy,
                         #"frame.mean":self.frame_mean,
                         #"frame.snr":self.snr,
                         #"2.4.khz.energy":self.two_four_khz_band_energy,
                         "3500.4300.peak":self.try_this_energy,
                         "5400.6800.peak":self.second_peak,
                         "1800,2700.peak":self.one_more_peak,
                         #"dom.0.300":self.dominant_frequency_in_0hz_300hz,
                         "dom.300.5000":self.dominant_frequency_in_300hz_5000hz,
                         #"dom.5000.24000":self.dominant_frequency_above_5000hz,
                         "low.freq.peak":self.another_peak,
                         "fft.kurtosis":self.fft_kurtosis,
                         "lpc.residual":self.lpc_residual,
                         "psd.spike":self.psd_peak,
                         "psd.other_spike":self.psd_hugepeak,
                         "psd.argmax.after.3":self.psd_argmax_after_3,
                         "psd.21.peak":self.psd_another_peak
                       }
     for i in range(0,12):
         self.transforms["lpc.2." + str(i)] = self.lpc2x(i)
         #self.transforms["lpc.0." + str(i)] = self.lpc0x(i)
     self.attributes = {}
     self.frame = frame
     self.fft = fft
     for transform in self.transforms:
         self.attributes[transform] = self.transforms[transform](frame, fft)
コード例 #31
0
pltl.set_ylim([np.min(l1power_density), np.max(l1power_density)])
pltl.set_xlabel('Frequency in Hz')
pltl.set_ylabel('Power Density in V**2/Hz')
pltl.set_title('Periodogram of L1 Strain')
pltref = fig.add_subplot(212)
pltref.semilogy(reffreq, refpower_density)
pltref.set_ylim([np.min(refpower_density), np.max(refpower_density)])
pltref.set_xlabel('Frequency in Hz')
pltref.set_ylabel('Power Density in V**2/Hz')
pltref.set_title('Periodogram of Reference Template')
fig.tight_layout()
plt.show()
plt.close(fig)

#Compute and plot the ASD for each detector strain
pxx_H1, freqs = mlab.psd(hstrain, Fs=samplefreq, NFFT=samplefreq)
pxx_L1, freqs = mlab.psd(lstrain, Fs=samplefreq, NFFT=samplefreq)
plt.figure()
plt.loglog(freqs, np.sqrt(pxx_H1), 'b', label='H1 Strain')
plt.loglog(freqs, np.sqrt(pxx_L1), 'r', label='L1 Strain')
plt.axis([10, 2000, 1e-24, 1e-18])
plt.legend(loc='upper center')
plt.xlabel('Frequency (Hz)')
plt.ylabel('ASD (strain/rtHz)')
plt.title('Strain ASDs')
plt.show()
plt.close()

#Store interpolations of the ASDs computed above to use later for whitening
psd_H1 = interp1d(freqs, pxx_H1)
psd_L1 = interp1d(freqs, pxx_L1)
コード例 #32
0
    print(posture)
    #input()

    for interval in training_samples[posture]:
        start_samp = interval[0]
        end_samp = interval[1]

        n_win = (end_samp - start_samp) // window_size
        for i in range(n_win):
            next_samp = start_samp + window_size
            emg_data = time[start_samp:next_samp]

            x = chann1[start_samp:next_samp]
            x2 = chann2[start_samp:next_samp]

            power, freq = psd(x, NFFT=window_size, Fs=samp_rate)
            start_freq = next(j for j, val in enumerate(freq) if val >= 4.0)
            end_freq = next(j for j, val in enumerate(freq) if val >= 60.0)
            start_index = np.where(freq >= 4.0)[0][0]
            end_index = np.where(freq >= 60.0)[0][0]
            if not posture in psd_data_ch1:
                psd_data_ch1[posture] = np.empty(
                    (0, end_index - start_index + 1))
            psd_data_ch1[posture] = np.append(
                psd_data_ch1[posture], [power[start_index:end_index + 1]],
                axis=0)

            power2, freq2 = psd(x2, NFFT=window_size, Fs=samp_rate)
            start_freq = next(j for j, val in enumerate(freq2) if val >= 4.0)
            end_freq = next(j for j, val in enumerate(freq2) if val >= 60.0)
            start_index = np.where(freq2 >= 4.0)[0][0]
コード例 #33
0
ファイル: plots.py プロジェクト: bdeeg08/framework-core
 def _psd(self, data):
     return mlab.psd(data, NFFT=self._nfft)
コード例 #34
0
def main(loglevel="INFO"):
    logger = logbook.Logger(__name__)
    # Reconfigure logger to show the pid number in log records
    logger = get_logger('msnoise.compute_cc_norot_child',
                        loglevel,
                        with_pid=True)
    logger.info('*** Starting: Compute CC ***')

    # Connection to the DB
    db = connect()

    if len(get_filters(db, all=False)) == 0:
        logger.info("NO FILTERS DEFINED, exiting")
        sys.exit()

    # Get Configuration
    params = get_params(db)
    filters = get_filters(db, all=False)
    logger.info("Will compute [%s] for different stations" %
                " ".join(params.components_to_compute))
    logger.info("Will compute [%s] for single stations" %
                " ".join(params.components_to_compute_single_station))

    if "R" in ''.join(params.components_to_compute) or "T" in ''.join(
            params.components_to_compute):
        logger.info(
            "You seem to have configured R and/or T components, thus rotations ARE needed. You should therefore use the 'msnoise compute_cc_rot' instead."
        )
        return ()

    if params.whitening not in ["A", "N"]:
        logger.info(
            "The 'whitening' parameter is set to '%s', which is not supported by this process. Set it to 'A' or 'N', or use the 'msnoise compute_cc_rot' instead."
            % params.whitening)
        return ()

    if params.remove_response:
        logger.debug('Pre-loading all instrument response')
        responses = preload_instrument_responses(db)
    else:
        responses = None
    logger.info("Checking if there are jobs to do")
    while is_next_job(db, jobtype='CC'):
        logger.info("Getting the next job")
        jobs = get_next_job(db, jobtype='CC')

        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logger.info("New CC Job: %s (%i pairs with %i stations)" %
                    (goal_day, len(pairs), len(stations)))
        jt = time.time()

        comps = []
        for comp in params.all_components:
            if comp[0] in ["Z", "E", "N", "1", "2"]:
                comps.append(comp[0])
            if comp[1] in ["Z", "E", "N", "1", "2"]:
                comps.append(comp[1])

        comps = np.unique(comps)
        stream = preprocess(db, stations, comps, goal_day, params, responses)
        if not len(stream):
            logger.debug("Not enough data for this day !")
            logger.debug("Marking job Done and continuing with next !")
            for job in jobs:
                update_job(db, job.day, job.pair, 'CC', 'D', ref=job.ref)
            continue
        # print '##### STREAMS ARE ALL PREPARED AT goal Hz #####'
        dt = 1. / params.goal_sampling_rate
        logger.debug("Starting slides")
        start_processing = time.time()
        allcorr = {}
        for tmp in stream.slide(params.corr_duration,
                                params.corr_duration * (1 - params.overlap)):
            logger.debug("Processing %s - %s" %
                         (tmp[0].stats.starttime, tmp[0].stats.endtime))
            tmp = tmp.copy().sort()

            channels_to_remove = []
            for gap in tmp.get_gaps(min_gap=0):
                if gap[-2] > 0:
                    channels_to_remove.append(".".join(
                        [gap[0], gap[1], gap[2], gap[3]]))

            for chan in np.unique(channels_to_remove):
                logger.debug("%s contains gap(s), removing it" % chan)
                net, sta, loc, chan = chan.split(".")
                for tr in tmp.select(network=net,
                                     station=sta,
                                     location=loc,
                                     channel=chan):
                    tmp.remove(tr)
            if len(tmp) == 0:
                logger.debug("No traces without gaps")
                continue

            base = np.amax([tr.stats.npts for tr in tmp])
            if base <= (params.maxlag * params.goal_sampling_rate * 2 + 1):
                logger.debug("All traces shorter are too short to export"
                             " +-maxlag")
                continue

            for tr in tmp:
                if tr.stats.npts != base:
                    tmp.remove(tr)
                    logger.debug("One trace is too short, removing it")

            if len(tmp) == 0:
                logger.debug("No traces left in slice")
                continue

            nfft = next_fast_len(tmp[0].stats.npts)
            tmp.detrend("demean")

            for tr in tmp:
                if params.windsorizing == -1:
                    np.sign(tr.data, tr.data)  # inplace
                elif params.windsorizing != 0:
                    imin, imax = scoreatpercentile(tr.data, [1, 99])
                    not_outliers = np.where((tr.data >= imin)
                                            & (tr.data <= imax))[0]
                    rms = tr.data[not_outliers].std() * params.windsorizing
                    np.clip(tr.data, -rms, rms, tr.data)  # inplace
            # TODO should not hardcode 4 percent!
            tmp.taper(0.04)

            # TODO should not hardcode 100 taper points in spectrum
            napod = 100

            data = np.asarray([tr.data for tr in tmp])
            names = [tr.id.split(".") for tr in tmp]

            # index net.sta comps for energy later
            channel_index = {}
            if params.whitening != "N" and params.whitening_type == "PSD":
                psds = []
                for i, name in enumerate(names):
                    n1, s1, l1, c1 = name
                    netsta = "%s.%s" % (n1, s1)
                    if netsta not in channel_index:
                        channel_index[netsta] = {}
                    channel_index[netsta][c1[-1]] = i

                    pxx, freqs = mlab.psd(tmp[i].data,
                                          Fs=tmp[i].stats.sampling_rate,
                                          NFFT=nfft,
                                          detrend='mean')
                    psds.append(np.sqrt(pxx))
                psds = np.asarray(psds)
            else:
                psds = np.zeros(1)

            for chan in channel_index:
                comps = channel_index[chan].keys()
                if "E" in comps and "N" in comps:
                    i_e = channel_index[chan]["E"]
                    i_n = channel_index[chan]["N"]
                    # iZ = channel_index[chan]["Z"]
                    mm = psds[[i_e, i_n]].mean(axis=0)
                    psds[i_e] = mm
                    psds[i_n] = mm
                    # psds[iZ] = mm

            # define pairwise CCs
            tmptime = tmp[0].stats.starttime.datetime
            thisdate = tmptime.strftime("%Y-%m-%d")
            thistime = tmptime.strftime("%Y-%m-%d %H:%M:%S")

            # Standard operator for CC
            cc_index = []
            if len(params.components_to_compute):
                for sta1, sta2 in itertools.combinations(names, 2):
                    n1, s1, l1, c1 = sta1
                    n2, s2, l2, c2 = sta2
                    comp = "%s%s" % (c1[-1], c2[-1])
                    if comp in params.components_to_compute:
                        cc_index.append([
                            "%s.%s_%s.%s_%s" % (n1, s1, n2, s2, comp),
                            names.index(sta1),
                            names.index(sta2)
                        ])

            # Different iterator func for single station AC or SC:
            single_station_pair_index_sc = []
            single_station_pair_index_ac = []

            if len(params.components_to_compute_single_station):
                for sta1, sta2 in itertools.combinations_with_replacement(
                        names, 2):
                    n1, s1, l1, c1 = sta1
                    n2, s2, l2, c2 = sta2
                    if n1 != n2 or s1 != s2:
                        continue

                    comp = "%s%s" % (c1[-1], c2[-1])
                    if comp in params.components_to_compute_single_station:
                        if c1[-1] == c2[-1]:
                            single_station_pair_index_ac.append([
                                "%s.%s_%s.%s_%s" % (n1, s1, n2, s2, comp),
                                names.index(sta1),
                                names.index(sta2)
                            ])
                        else:
                            # If the components are different, we can just
                            # process them using the default CC code (should warn)
                            single_station_pair_index_sc.append([
                                "%s.%s_%s.%s_%s" % (n1, s1, n2, s2, comp),
                                names.index(sta1),
                                names.index(sta2)
                            ])
                    if comp[::
                            -1] in params.components_to_compute_single_station:
                        if c1[-1] != c2[-1]:
                            # If the components are different, we can just
                            # process them using the default CC code (should warn)
                            single_station_pair_index_sc.append([
                                "%s.%s_%s.%s_%s" %
                                (n1, s1, n2, s2, comp[::-1]),
                                names.index(sta2),
                                names.index(sta1)
                            ])

            for filterdb in filters:
                filterid = filterdb.ref
                filterlow = float(filterdb.low)
                filterhigh = float(filterdb.high)

                freq_vec = scipy.fftpack.fftfreq(nfft, d=dt)[:nfft // 2]
                freq_sel = np.where((freq_vec >= filterlow)
                                    & (freq_vec <= filterhigh))[0]
                low = freq_sel[0] - napod
                if low <= 0:
                    low = 0
                p1 = freq_sel[0]
                p2 = freq_sel[-1]
                high = freq_sel[-1] + napod
                if high > nfft / 2:
                    high = int(nfft // 2)

                # Make a copy of the original data to prevent modifying it
                _data = data.copy()
                if params.whitening == "N":
                    # if the data doesn't need to be whitened, we can simply
                    # band-pass filter the traces now:
                    for i, _ in enumerate(_data):
                        _data[i] = bandpass(_,
                                            freqmin=filterlow,
                                            freqmax=filterhigh,
                                            df=params.goal_sampling_rate,
                                            corners=8)

                # First let's compute the AC and SC
                if len(single_station_pair_index_ac):
                    tmp = _data.copy()
                    if params.whitening == "A":
                        # if the data isn't already filtered, we still need to
                        # do it for the AutoCorrelation:
                        for i, _ in enumerate(tmp):
                            tmp[i] = bandpass(_,
                                              freqmin=filterlow,
                                              freqmax=filterhigh,
                                              df=params.goal_sampling_rate,
                                              corners=8)
                    if params.cc_type_single_station_AC == "CC":
                        ffts = scipy.fftpack.fftn(tmp,
                                                  shape=[
                                                      nfft,
                                                  ],
                                                  axes=[
                                                      1,
                                                  ])
                        energy = np.real(
                            np.sqrt(
                                np.mean(scipy.fftpack.ifft(ffts,
                                                           n=nfft,
                                                           axis=1)**2,
                                        axis=1)))

                        # Computing standard CC
                        corr = myCorr2(ffts,
                                       np.ceil(params.maxlag / dt),
                                       energy,
                                       single_station_pair_index_ac,
                                       plot=False,
                                       nfft=nfft)

                    elif params.cc_type_single_station_AC == "PCC":
                        corr = pcc_xcorr(tmp, np.ceil(params.maxlag / dt),
                                         None, single_station_pair_index_ac)
                    else:
                        print(
                            "cc_type_single_station_AC = %s not implemented, "
                            "exiting")
                        exit(1)

                    for key in corr:
                        ccfid = key + "_%02i" % filterid + "_" + thisdate
                        if ccfid not in allcorr:
                            allcorr[ccfid] = {}
                        allcorr[ccfid][thistime] = corr[key]
                    del corr, energy

                if len(cc_index):
                    if params.cc_type == "CC":
                        ffts = scipy.fftpack.fftn(_data,
                                                  shape=[
                                                      nfft,
                                                  ],
                                                  axes=[
                                                      1,
                                                  ])
                        if params.whitening != "N":
                            whiten2(ffts, nfft, low, high, p1, p2, psds,
                                    params.whitening_type)  # inplace
                        # energy = np.sqrt(np.sum(np.abs(ffts)**2, axis=1)/nfft)
                        energy = np.real(
                            np.sqrt(
                                np.mean(scipy.fftpack.ifft(ffts,
                                                           n=nfft,
                                                           axis=1)**2,
                                        axis=1)))

                        # logger.info("Pre-whitened %i traces"%(i+1))
                        # Computing standard CC
                        corr = myCorr2(ffts,
                                       np.ceil(params.maxlag / dt),
                                       energy,
                                       cc_index,
                                       plot=False,
                                       nfft=nfft)

                        for key in corr:
                            ccfid = key + "_%02i" % filterid + "_" + thisdate
                            if ccfid not in allcorr:
                                allcorr[ccfid] = {}
                            allcorr[ccfid][thistime] = corr[key]
                        del corr, energy, ffts
                    else:
                        print("cc_type = %s not implemented, " "exiting")
                        exit(1)

                if len(single_station_pair_index_sc):
                    if params.cc_type_single_station_SC == "CC":
                        # logger.debug("Compute SC using %s" % params.cc_type)
                        ffts = scipy.fftpack.fftn(_data,
                                                  shape=[
                                                      nfft,
                                                  ],
                                                  axes=[
                                                      1,
                                                  ])
                        if params.whitening != "N":
                            whiten2(ffts, nfft, low, high, p1, p2, psds,
                                    params.whitening_type)  # inplace
                        # energy = np.sqrt(np.sum(np.abs(ffts)**2, axis=1)/nfft)
                        energy = np.real(
                            np.sqrt(
                                np.mean(scipy.fftpack.ifft(ffts,
                                                           n=nfft,
                                                           axis=1)**2,
                                        axis=1)))

                        # logger.info("Pre-whitened %i traces"%(i+1))
                        # Computing standard CC
                        corr = myCorr2(ffts,
                                       np.ceil(params.maxlag / dt),
                                       energy,
                                       single_station_pair_index_sc,
                                       plot=False,
                                       nfft=nfft)

                        for key in corr:
                            ccfid = key + "_%02i" % filterid + "_" + thisdate
                            if ccfid not in allcorr:
                                allcorr[ccfid] = {}
                            allcorr[ccfid][thistime] = corr[key]
                        del corr, energy, ffts
                    else:
                        print(
                            "cc_type_single_station_SC = %s not implemented, "
                            "exiting")
                        exit(1)
            del psds
        # Needed to clean the FFT memory caching of SciPy
        clean_scipy_cache()

        if params.keep_all:
            for ccfid in allcorr.keys():
                export_allcorr2(db, ccfid, allcorr[ccfid])

        if params.keep_days:
            for ccfid in allcorr.keys():
                # print("Exporting %s" % ccfid)
                station1, station2, components, filterid, date = \
                    ccfid.split('_')

                corrs = np.asarray(list(allcorr[ccfid].values()))
                if not len(corrs):
                    logger.debug("No data to stack.")
                    continue
                corr = stack(corrs, params.stack_method, params.pws_timegate,
                             params.pws_power, params.goal_sampling_rate)
                if not len(corr):
                    logger.debug("No data to save.")
                    continue
                thisdate = goal_day
                thistime = "0_0"
                add_corr(db,
                         station1.replace('.', '_'),
                         station2.replace('.', '_'),
                         int(filterid),
                         thisdate,
                         thistime,
                         params.min30 / params.goal_sampling_rate,
                         components,
                         corr,
                         params.goal_sampling_rate,
                         day=True,
                         ncorr=corrs.shape[0],
                         params=params)

        # THIS SHOULD BE IN THE API
        massive_update_job(db, jobs, "D")
        if not params.hpc:
            for job in jobs:
                update_job(db, job.day, job.pair, 'STACK', 'T')

        logger.info("Job Finished. It took %.2f seconds (preprocess: %.2f s & "
                    "process %.2f s)" % ((time.time() - jt), start_processing -
                                         jt, time.time() - start_processing))
        del stream, allcorr
    logger.info('*** Finished: Compute CC ***')
コード例 #35
0
h1 *= signal.tukey(Nt,alpha=0.05)

dl = 1./4096
low_f = 30.
high_f = 500.

freqs = np.fft.rfftfreq(2*Nt, dl)
freqs = freqs[:Nt/2+1]

hf = np.fft.rfft(h1, n=2*Nt, norm = 'ortho') 

hf = hf[:Nt/2+1]

print hf

Pxx, frexx = mlab.psd(h1, Fs=4096, NFFT=2*4096,noverlap=4096/2,window=np.blackman(2*4096),scale_by_freq=False)

hf_psd = interp1d(frexx,Pxx)
hf_psd_data = abs(hf.copy()*np.conj(hf.copy())) 

mask = (freqs>low_f) & (freqs < high_f)
mask2 = (freqs>80.) & (freqs < 300.)

#plt.figure()
#plt.loglog(hf_psd_data[mask])
#plt.loglog(hf_psd(freqs)[mask])
#plt.savefig('compare.pdf')


norm = np.mean(hf_psd_data[mask])/np.mean(hf_psd(freqs)[mask])
norm2 = np.mean(hf_psd_data[mask2])/np.mean(hf_psd(freqs)[mask2])
コード例 #36
0
ファイル: lab9.py プロジェクト: gianna7wu/biol133compneuro
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab

#import matplotlib
#Nyquist frequency is 32

#f1, f2 values for example
# f1 = 4.0
# f2 = 50.0

f1 = 10.0
f2 = 45.0

t = np.arange(0, 100, 0.015625)

#xt=np.sin(2*np.pi*t)
yt = np.sin(2.0 * np.pi * f1 * t) + np.sin(2.0 * np.pi * f2 * t)
power, freqs = mlab.psd(yt, len(yt), 64, pad_to=10192)
#print len(xt)
plt.plot(freqs, power)
plt.show()
コード例 #37
0
ファイル: spectral_estimation.py プロジェクト: miili/obspy
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
        noverlap=0):
    """
    Wrapper for :func:`matplotlib.mlab.psd`.

    Always returns a onesided psd (positive frequencies only), corrects for
    this fact by scaling with a factor of 2. Also, always normalizes to dB/Hz
    by dividing with sampling rate.

    This wrapper is intended to intercept changes in
    :func:`matplotlib.mlab.psd` default behavior which changes with
    matplotlib version 0.98.4:

    * http://matplotlib.sourceforge.net/users/whats_new.html\
#psd-amplitude-scaling
    * http://matplotlib.sourceforge.net/_static/CHANGELOG
      (entries on 2009-05-18 and 2008-11-11)
    * http://matplotlib.svn.sourceforge.net/viewvc/matplotlib\
?view=revision&revision=6518
    * http://matplotlib.sourceforge.net/api/api_changes.html#changes-for-0-98-x

    .. note::
        For details on all arguments see :func:`matplotlib.mlab.psd`.

    .. note::
        When using `window=welch_taper`
        (:func:`obspy.signal.spectral_estimation.welch_taper`)
        and `detrend=detrend_linear` (:func:`matplotlib.mlab.detrend_linear`)
        the psd function delivers practically the same results as PITSA.
        Only DC and the first 3-4 lowest non-DC frequencies deviate very
        slightly. In contrast to PITSA, this routine also returns the psd value
        at the Nyquist frequency and therefore is one frequency sample longer.
    """
    # check if matplotlib is available, no official dependency for obspy.signal
    if MATPLOTLIB_VERSION is None:
        raise ImportError(msg_matplotlib_ImportError)

    # check matplotlib version
    elif MATPLOTLIB_VERSION >= [0, 98, 4]:
        new_matplotlib = True
    else:
        new_matplotlib = False
    # build up kwargs that do not change with version 0.98.4
    kwargs = {}
    kwargs['NFFT'] = NFFT
    kwargs['Fs'] = Fs
    kwargs['detrend'] = detrend
    kwargs['window'] = window
    kwargs['noverlap'] = noverlap
    # add additional kwargs to control behavior for matplotlib versions higher
    # than 0.98.4. These settings make sure that the scaling is already done
    # during the following psd call for newer matplotlib versions.
    if new_matplotlib:
        kwargs['pad_to'] = None
        kwargs['sides'] = 'onesided'
        kwargs['scale_by_freq'] = True
    # do the actual call to mlab.psd
    Pxx, freqs = mlab.psd(x, **kwargs)
    # do scaling manually for old matplotlib versions
    if not new_matplotlib:
        Pxx = Pxx / Fs
        Pxx[1:-1] = Pxx[1:-1] * 2.0
    return Pxx, freqs
コード例 #38
0
for rep, (spk_fl, grp_stat_fl, con_fl) in enumerate(
        zip(args.spikelist, args.groupstatlist, args.connectivitylist)):

    connectivity = pd.read_json(con_fl)
    connecitivty_e = connectivity.loc[connectivity['pre'] < 800]
    connecitivty_e_e = connectivity.loc[connectivity['post'] < 800]

    connecitivty_e_e['bin_w'] = pd.cut(connecitivty_e_e['weight'],
                                       np.arange(0, 10.5, 0.5))

    times, senders = hf.read_spikefile(spk_fl)
    exc_times, exc_sender, inh_times, inh_sender = hf.split_in_ex(
        times, senders)
    exc_rate, exc_bins = hf.bin_pop_rate(exc_times, exc_sender, bin_ms)
    exc_Pxx, exc_freqs = mlab.psd(exc_rate - np.mean(exc_rate),
                                  NFFT=NFFT,
                                  Fs=1000. / (exc_bins[1] - exc_bins[0]),
                                  noverlap=noverlap)

    exc_Pxx_tab[:, rep] = exc_Pxx
    idx = np.argmax(exc_Pxx[exc_freqs > 20])
    cut_freqs = exc_freqs[exc_freqs > 20]
    max_freq = cut_freqs[idx]
    if max_freq < 50:
        if table_low is None:
            table_low = pd.pivot_table(connecitivty_e_e,
                                       columns='bin_w',
                                       index='delay',
                                       values='weight',
                                       aggfunc=len)
        else:
            table_low = table_low.add(
コード例 #39
0
 def sdr_async_callback(self, iq, ctx):
     power, _ = mlab.psd(iq,
                         NFFT=self.CHUNK,
                         Fs=self.sdr.sample_rate,
                         scale_by_freq=False)
     self.signal.emit(np.sqrt(power))
コード例 #40
0
    x=np.arange(0,240)
    VALUE_SIN=A* np.sin(x * sin_omega)
    return VALUE_SIN

def gaussion_sin_function(Amplitude):
    a=sinfunction(Amplitude)
    z2=gaussian(a,time,0,sigma)
    return z2

time=np.linspace(-2,2,240)
template= gaussion_sin_function(2) #input gaussion_sin_function(Amplitude)

# Calculating the power_spectral_density with window

NFFT=sampling_rate//2
template_psd,template_freqs=mlab.psd(template,Fs=sampling_rate,NFFT=NFFT,noverlap=NFFT/8,window=signal.tukey(NFFT,alpha=0.1))

## calculateing time-domain signal calculate sin-gaussion function

dwindow=signal.tukey(len(template),alpha=0.9)
signal_with_window_function_time_space=template*dwindow

### using FFT calculate sin-gaussion function
f_template_fft=np.fft.fftfreq(len(signal_with_window_function_time_space),dt)
template_fft=np.fft.fft(template)*dt
signal_fft=np.fft.fft(signal_with_window_function_time_space)*dt



plt.plot(time,template,"r")
plt.xlabel("time")
コード例 #41
0
ファイル: spectral_estimation.py プロジェクト: rpratt20/obspy
    def __process(self, tr):
        """
        Processes a segment of data and adds the information to the
        PPSD histogram. If Trace is compatible (station, channel, ...) has to
        checked beforehand.

        :type tr: :class:`~obspy.core.trace.Trace`
        :param tr: Compatible Trace with data of one PPSD segment
        :returns: True if segment was successfully added to histogram, False
                otherwise.
        """
        # XXX DIRTY HACK!!
        if len(tr) == self.len + 1:
            tr.data = tr.data[:-1]
        # one last check..
        if len(tr) != self.len:
            msg = "Got a piece of data with wrong length. Skipping"
            warnings.warn(msg)
            print(len(tr), self.len)
            return False
        # being paranoid, only necessary if in-place operations would follow
        tr.data = tr.data.astype(np.float64)
        # if trace has a masked array we fill in zeros
        try:
            tr.data[tr.data.mask] = 0.0
        # if it is no masked array, we get an AttributeError
        # and have nothing to do
        except AttributeError:
            pass

        # restitution:
        # mcnamara apply the correction at the end in freq-domain,
        # does it make a difference?
        # probably should be done earlier on bigger chunk of data?!
        # Yes, you should avoid removing the response until after you
        # have estimated the spectra to avoid elevated lp noise

        spec, _freq = mlab.psd(tr.data, self.nfft, self.sampling_rate,
                               detrend=mlab.detrend_linear, window=fft_taper,
                               noverlap=self.nlap, sides='onesided',
                               scale_by_freq=True)

        # leave out first entry (offset)
        spec = spec[1:]

        # working with the periods not frequencies later so reverse spectrum
        spec = spec[::-1]

        # Here we remove the response using the same conventions
        # since the power is squared we want to square the sensitivity
        # we can also convert to acceleration if we have non-rotational data
        if self.is_rotational_data:
            # in case of rotational data just remove sensitivity
            spec /= self.metadata['sensitivity'] ** 2
        else:
            # determine instrument response from metadata
            try:
                resp = self._get_response(tr)
            except Exception as e:
                msg = ("Error getting response from provided metadata:\n"
                       "%s: %s\n"
                       "Skipping time segment(s).")
                msg = msg % (e.__class__.__name__, e.message)
                warnings.warn(msg)
                return False

            resp = resp[1:]
            resp = resp[::-1]
            # Now get the amplitude response (squared)
            respamp = np.absolute(resp * np.conjugate(resp))
            # Make omega with the same conventions as spec
            w = 2.0 * math.pi * _freq[1:]
            w = w[::-1]
            # Here we do the response removal
            spec = (w ** 2) * spec / respamp
        # avoid calculating log of zero
        idx = spec < dtiny
        spec[idx] = dtiny

        # go to dB
        spec = np.log10(spec)
        spec *= 10

        spec_octaves = []
        # do this for the whole period range and append the values to our lists
        for per_left, per_right in zip(self.per_octaves_left,
                                       self.per_octaves_right):
            specs = spec[(per_left <= self.per) & (self.per <= per_right)]
            spec_center = specs.mean()
            spec_octaves.append(spec_center)
        spec_octaves = np.array(spec_octaves)

        hist, self.xedges, self.yedges = np.histogram2d(
            self.per_octaves,
            spec_octaves, bins=(self.period_bins, self.spec_bins))

        try:
            # we have to make sure manually that the bins are always the same!
            # this is done with the various assert() statements above.
            self.hist_stack += hist
        except TypeError:
            # only during first run initialize stack with first histogram
            self.hist_stack = hist
        return True
コード例 #42
0
def _spectral_density(x,
                      y,
                      Fs,
                      Nf,
                      Nens,
                      Npts_per_real,
                      Npts_overlap,
                      Npts_per_ens,
                      detrend,
                      window,
                      print_status=False,
                      status_label=''):
    'Get spectral density of provided signals.'
    same_data = x is y

    # Initialize spectral density array
    if not same_data:
        # Cross-spectral density is intrinsically complex-valued, so
        # we must initialize the spectral density as a complex-valued
        # array to avoid loss of information
        Gxy = np.zeros([Nf, Nens], dtype=np.complex128)
    else:
        # Autospectral density is intrinsically real-valued
        # (assuming `x` is real-valued), so we don't need the
        # overhead of a complex-valued array
        Gxy = np.zeros([Nf, Nens])

    if print_status:
        print ''

    # Loop over successive ensembles
    for ens in np.arange(Nens):
        # Create a slice corresponding to current ensemble
        ens_start = ens * Npts_per_ens
        ens_stop = (ens + 1) * Npts_per_ens
        sl = slice(ens_start, ens_stop)

        if same_data:
            Gxy[:, ens] = mlab.psd(x[sl],
                                   Fs=Fs,
                                   NFFT=Npts_per_real,
                                   noverlap=Npts_overlap,
                                   detrend=detrend,
                                   window=window)[0]
        else:
            Gxy[:, ens] = mlab.csd(x[sl],
                                   y[sl],
                                   Fs=Fs,
                                   NFFT=Npts_per_real,
                                   noverlap=Npts_overlap,
                                   detrend=detrend,
                                   window=window)[0]

        if print_status:
            print('%s percent complete: %.1f \r' %
                  (status_label, (100 * np.float(ens + 1) / Nens))),

    if print_status:
        print ''

    return Gxy
コード例 #43
0
# index into the strain time series for this time interval:
indxt = np.where((time_H1 >= tevent - deltat) & (time_H1 < tevent + deltat))

plt.figure()
plt.plot(time_H1[indxt] - tevent, strain_H1[indxt], 'r', label='H1 strain')
plt.xlabel('time (s) since ' + str(tevent))
plt.ylabel('strain')
plt.legend(loc='lower right')
plt.title('Advanced LIGO strain data near GW150914')
plt.show()

# データのフーリエ変換を行う:
NFFT = 1 * fs
fmin = 10
fmax = 2000
Pxx_H1, freqs = mlab.psd(strain_H1, Fs=fs, NFFT=NFFT)

# We will use interpolations of the ASDs computed above for whitening:
psd_H1 = interp1d(freqs, Pxx_H1)

# plot the ASDs:
plt.figure()
plt.loglog(freqs, np.sqrt(Pxx_H1), 'r', label='H1 strain')
plt.axis([fmin, fmax, 1e-24, 1e-19])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='upper center')
plt.title('Advanced LIGO strain data near GW150914')
plt.show()
コード例 #44
0
time = a.timeaxis(datatype='sci', asic=asic)
dd = a.timeline_array(asic=asic)
FREQ_SAMPLING = 1 / a.asic(asic).sample_period()
ndet, nsamples = np.shape(dd)
#### Selection of detectors for which the signal is obvious
good_dets = [37, 45, 49, 70, 77, 90, 106, 109, 110]
best_det = 37
theTES = best_det
###### TOD Example #####
TimeSigPlot(time, dd, theTES)

###### TOD Power Spectrum #####
frange = [0.3, 15]  # range of plot frequencies desired
filt = 5
spectrum, freq = mlab.psd(dd[theTES, :],
                          Fs=FREQ_SAMPLING,
                          NFFT=nsamples,
                          window=mlab.window_hanning)
filtered_spec = f.gaussian_filter1d(spectrum, filt)

FreqResp(freq, frange, filtered_spec, theTES, fff)

##### NEW PLOT WITH FILTERED OVERPLOT
##### Filtering out the signal from the PT
freqs_pt = [1.72383, 3.24323, 3.44727, 5.69583, 6.7533, 9.64412, 12.9874]
bw_0 = 0.005

notch = ft.notch_array(freqs_pt, bw_0)

# hack to add number of harmonics expected in fibtools.filter_data
notch_shape = np.array(notch.shape)
notch_shape[1] += 1
コード例 #45
0
ファイル: fig7.py プロジェクト: tt-nakamura/GWDA
freq = rfftfreq(N, 1 / fs)
time = np.arange(N) / fs - t0
t = time[(time >= t1) & (time < t2)]

window = np.hanning(N)

template = h5py.File('LOSC_Event_tutorial/GW150914_4_template.hdf5', 'r')
(hp, hc) = template['template']
hp = rfft(np.roll(hp[::-1] * window, N // 2))
hc = rfft(np.roll(hc[::-1] * window, N // 2))
template = hc + hp * 1j

plt.figure(figsize=(6.4, 5.4))

for (i, h) in enumerate(data):
    (S, f) = psd(h, Fs=fs, NFFT=N // 8)
    S = np.interp(freq, f, S * fs)
    h = rfft(h * window)

    C = 2 * ifft(h * template / S, N)
    C = C[(time >= t1) & (time < t2)]
    sigma = np.sqrt(np.sum(np.abs(template)**2 / S) / N)
    rho = np.abs(C) / sigma
    rho_max = np.max(rho)

    print(label[i])
    print('SNR max =', rho_max)
    print('event time =', t[np.argmax(rho)] + t0)
    print('2*phi =', np.degrees(np.angle(C[np.argmax(rho)])))
    print('distance =', sigma / rho_max)
コード例 #46
0
plt.figure()
plt.subplot(2, 3, 1)
if rescale:
    plt.plot(w, sd / sd[0], '-', wmax, sdmax / sd[0], 'o')
#    plt.plot(w, sd/sd[0], '-')
#    plt.hold()
#    plt.plot(wmax, sdmax/sd[0], 'o')
else:
    plt.plot(w, sd, '-', wmax, sdmax, 'o')
#    plt.hold()
#    plt.plot(wmax, sdmax, 'o')

plt.title('DGP')

sdm, wm = mlb.psd(x)
sdm = sdm.ravel()
pm = ndimage.filters.maximum_filter(sdm, footprint=np.ones(5))
maxind = np.nonzero(pm == sdm)

plt.subplot(2, 3, 2)
if rescale:
    plt.plot(wm, sdm / sdm[0], '-', wm[maxind], sdm[maxind] / sdm[0], 'o')
else:
    plt.plot(wm, sdm, '-', wm[maxind], sdm[maxind], 'o')
plt.title('matplotlib')

if hastalkbox:
    sdp, wp = stbs.periodogram(x)
    plt.subplot(2, 3, 3)
コード例 #47
0
ファイル: app.py プロジェクト: sysrun/websdr-plus
 def updateSamples(self):
     self.samples = self.sdr.read_samples(NUM_SAMPLES_PER_SCAN)
     self.psd_scan, self.f = psd(self.samples, NFFT=NFFT)
     threading.Timer(0.02, self.updateSamples).start()
        values = np.frombuffer(data)
        ns = int(len(values)/n_channels)
        samp_count+=ns
        ps+=ns

        for i in range(ns):
            for j in range(n_channels):
                emg_data[j].append(values[n_channels*i + j])

        elapsed_time = time.time() - start_time
        if elapsed_time >= 0.1 and samp_count>=win_size:

            window_data = np.array([x[-win_size:] for x in emg_data])

            # Power Spectral Analisis
            power1, freq1 = psd(window_data[0], NFFT = win_size, Fs = samp_rate)
            power2, freq2 = psd(window_data[2], NFFT = win_size, Fs = samp_rate)
            axs[0,0].cla()
            axs[0,1].cla()
            axs[1,0].cla()
            axs[1,1].cla()
            start_time = time.time()
            axs[0,0].plot(window_data[4], window_data[0], color = 'blue', label = 'Canal 1')
            axs[0,1].plot(window_data[4], window_data[2], color = 'green', label = 'Canal 2')
            axs[0,0].set(xlabel="Time(ms)", ylabel='micro V')
            axs[0,0].set_ylim([-20, 20])
            axs[0,1].set(xlabel="Time(ms)", ylabel='micro V')
            axs[0,1].set_ylim([-20, 20])

            start_index = np.where(freq1 >= 4.0)[0][0]
            end_index = np.where(freq1 >= 60.0)[0][0]
コード例 #49
0
def plot_example_psds(example, rate):
    """
    This function creates a figure with 4 lines to show the overall psd for 
    the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
    """

    pxx_max_1 = []
    pxx_max_2 = []
    pxx_max_3 = []

    example_1_max = []
    example_2_max = []
    example_3_max = []

    example_1_min = []
    example_2_min = []
    example_3_min = []

    pxx0, freq0 = m.psd(example[0], 256, srate)
    pxx1, freq1 = m.psd(example[1], 256, srate)
    pxx2, freq2 = m.psd(example[2], 256, srate)
    pxx3, freq3 = m.psd(example[3], 256, srate)
    pxx0_normalized = pxx0 / sum(pxx0)
    pxx1_normalized = pxx1 / sum(pxx1)
    pxx2_normalized = pxx2 / sum(pxx2)
    pxx3_normalized = pxx3 / sum(pxx3)

    plt.figure()
    plt.plot(freq0, pxx0_normalized, color='k', label='REM sleep')
    plt.plot(freq1, pxx1_normalized, color='r', label='st1 NREM sleep')
    plt.plot(freq2, pxx2_normalized, color='b', label='st2 NREM sleep')
    plt.plot(freq3, pxx3_normalized, color='c', label='st3 NREM sleep')
    plt.xlim(0, 30)
    plt.xlabel('frequency in (HZ)')
    plt.ylabel('Power Spectal Denisty (db/HZ) ')
    plt.title('Psds for 4 stages')
    plt.legend(loc=0)
    plt.show()

    for i in range(10):
        start = i * 3840
        end = (i + 1) * 3840
        pxx, freq = m.psd(example[1][start:end], rate)
        pxx_normalized = pxx / sum(pxx)
        pxx_max_1 = np.append(pxx_max_1, max(pxx_normalized))
        example_1_max = np.append(example_1_max, max(example[1][start:end]))
        example_1_min = np.append(example_1_min, min(example[1][start:end]))
    print[
        max(example_1_min),
        min(example_1_min),
        max(example_1_max),
        min(example_1_max),
        max(pxx_max_1),
        min(pxx_max_1)
    ]

    for i in range(10):
        start = i * 3840
        end = (i + 1) * 3840
        pxx, freq = m.psd(example[2][start:end], rate)
        pxx_normalized = pxx / sum(pxx)
        pxx_max_2 = np.append(pxx_max_2, max(pxx_normalized))
        example_2_max = np.append(example_2_max, max(example[2][start:end]))
        example_2_min = np.append(example_2_min, min(example[2][start:end]))
    print[
        max(example_2_min),
        min(example_2_min),
        max(example_2_max),
        min(example_2_max),
        max(pxx_max_2),
        min(pxx_max_2)
    ]

    for i in range(10):
        start = i * 3840
        end = (i + 1) * 3840
        pxx, freq = m.psd(example[3][start:end], rate)
        pxx_normalized = pxx / sum(pxx)
        pxx_max_3 = np.append(pxx_max_3, max(pxx_normalized))
        example_3_max = np.append(example_3_max, max(example[3][start:end]))
        example_3_min = np.append(example_3_min, min(example[3][start:end]))
    print[
        max(example_3_min),
        min(example_3_min),
        max(example_3_max),
        min(example_3_max),
        max(pxx_max_3),
        min(pxx_max_3)
    ]
コード例 #50
0
def pyASD(data_in,fs,nfft):
    pxx,freq=mlab.psd(data_in,NFFT=nfft,Fs=fs)
    return pxx,freq
コード例 #51
0
# The frequency resolution of the spectral estimation is given by
# fs/nfft. The length of the Fourier transform nfft should be chosen
# so that this resolution is equal to the resolution in the
# simulated data
nfft = int(fs / df)  # Round to nearest integer

# Sanity check the transform length
print("Calculated length of Fourier transform:", nfft)
if nfft > S:
    print("\nThere are not enough sampled points to get the frequency"
          "\nresolution you need. Consider halving nfft.")
    exit()

(autospectrum, f) = mlab.psd(df_time['pressure'].to_numpy(),
                             window=mlab.window_none,
                             NFFT=nfft,
                             Fs=fs)
(crossspectrum, f) = mlab.csd(df_time['pressure'].to_numpy(),
                              df_time['flow'].to_numpy(),
                              window=mlab.window_none,
                              NFFT=nfft,
                              Fs=fs)

# Make a pandas dataframe containing the full spectra results
data = {"freq": f, "auto": autospectrum, "cross": crossspectrum}
spectra_full = pd.DataFrame(data)

# Filter the full spectra to keep only those frequencies that
# are contained in the simulated signals. If the values of the
# spectra are used at frequencies not contained in the simulation
# then the estimated impedance will be wrong at those frequencies
コード例 #52
0
def plotWindow(figure, posture, window):

    start_samp = training_samples[posture][window][0]
    end_samp = training_samples[posture][window][1]
    plt.figure(figure)

    plt.subplot(2, 2, 1)
    plt.plot(time[start_samp:end_samp],
             chann1[start_samp:end_samp],
             label='Channel 1')
    plt.xlabel('Tiempo (s)')
    plt.ylabel('micro V')
    plt.legend()

    plt.subplot(2, 2, 2)
    plt.plot(time[start_samp:end_samp],
             chann2[start_samp:end_samp],
             color='red',
             label='Channel 2')
    plt.xlabel('Tiempo (s)')
    plt.ylabel('micro V')
    plt.legend()

    # Power Spectral Density (PSD) (1 second of training data)
    win_size = 256
    ini_samp = training_samples[posture][window][0]
    end_samp = ini_samp + win_size
    x = chann1[ini_samp:end_samp]
    x2 = chann2[ini_samp:end_samp]
    t = time[ini_samp:end_samp]

    power, freq = psd(x, NFFT=win_size, Fs=samp_rate)

    start_freq = next(x for x, val in enumerate(freq) if val >= 4.0)
    end_freq = next(x for x, val in enumerate(freq) if val >= 60.0)
    #print(start_freq, end_freq)

    start_index = np.where(freq >= 4.0)[0][0]
    end_index = np.where(freq >= 60.0)[0][0]

    plt.subplot(2, 2, 3)
    plt.plot(freq[start_index:end_index],
             power[start_index:end_index],
             label='Canal 1')
    plt.xlabel('Hz')
    plt.ylabel('Power')
    plt.legend()

    power2, freq2 = psd(x2, NFFT=win_size, Fs=samp_rate)

    start_freq = next(x for x, val in enumerate(freq2) if val >= 4.0)
    end_freq = next(x for x, val in enumerate(freq2) if val >= 60.0)

    start_index = np.where(freq2 >= 4.0)[0][0]
    end_index = np.where(freq2 >= 60.0)[0][0]

    plt.subplot(2, 2, 4)
    plt.plot(freq2[start_index:end_index],
             power2[start_index:end_index],
             color='red',
             label='Canal 2')
    plt.xlabel('Hz')
    plt.ylabel('Power')
    plt.legend()
コード例 #53
0
def profile(df, raw_dat_col = 0, drum_diam=3.17e-2, return_pos=False, \
            numbins = 500, fit_intensity=False, \
            intensity_func = gauss_intensity, guess = 3.0e-3, \
            plot_peaks = False):
    ''' Takes a DataFile instance, extacts digitized data from the ThorLabs
    WM100 beam profiler, computes the derivative to find the profile then
    averages many profiles from a single time steam.
    
    INPUTS:  df, DataFile instance with profiles
             raw_dat_col, column in 'other_data' with raw WM100 monitor
             drum_diam, diameter of the optical head that rotates
             return_pos, boolean to specify if return in raw time or calibrated
                         drum position using the drum_diam argument

    OUTPUTS: all_t, all times associated with profiles, overlain/sorted
             all_prof, all profiles overlain and sorted
    '''

    raw_dat = df.other_data[raw_dat_col]

    numpoints = len(raw_dat)
    fsamp = df.fsamp
    dt = 1.0 / fsamp
    t = np.linspace(0, (numpoints - 1) * dt, numpoints)

    psd, freqs = mlab.psd(raw_dat, NFFT=len(raw_dat), Fs=fsamp)
    chopfreq = freqs[np.argmax(psd[5:]) + 5]

    if chopfreq > 15:
        chopfreq = 10.2

    grad = np.gradient(raw_dat)

    dt_chop = 1.0 / chopfreq
    numchops = int(t[-1] / dt_chop)
    twidth = (guess / (2.0 * np.pi * 10.0e-3)) * dt_chop

    peaks = pdet.peakdetect(grad, lookahead=50, delta=0.075)

    pos_peaks = peaks[0]
    neg_peaks = peaks[1]

    tot_prof = []
    tot_t = []

    if plot_peaks:
        for peakind, pos_peak in enumerate(pos_peaks):
            try:
                neg_peak = neg_peaks[peakind]
            except:
                continue
            plt.plot(t[pos_peak[0]], pos_peak[1], 'x', color='r')
            plt.plot(t[neg_peak[0]], neg_peak[1], 'x', color='b')
        plt.plot(t, grad)
        plt.show()

    # since the chopper and ADC aren't triggered together and don't
    # have the same timebase, need to make sure only have nice pairs
    # of peaks so we can look at forward going and backward going
    # separately. Since we know positive going should be first
    # this is quite easy to accomplish
    if neg_peaks[0][0] < pos_peaks[0][0]:
        neg_first = True
    elif neg_peaks[0][0] > pos_peaks[0][0]:
        neg_first = False
    else:
        print("Couldn't figure out positive or negative first...")

    if neg_first:
        pos_peaks = pos_peaks[:-1]
        neg_peaks = neg_peaks[1:]

    if len(pos_peaks) > len(neg_peaks):
        pos_peaks = pos_peaks[:-1]
    elif len(neg_peaks) > len(pos_peaks):
        neg_peaks = neg_peaks[1:]

    for ind, pos_peak in enumerate(pos_peaks):
        neg_peak = neg_peaks[ind]

        pos_peak_loc = pos_peak[0]
        neg_peak_loc = neg_peak[0]

        if pos_peak_loc < 250:
            continue

        fit_t = t[pos_peak_loc - 250:neg_peak_loc + 250]
        fit_prof = grad[pos_peak_loc - 250:neg_peak_loc + 250]

        # try:
        new_t, new_prof = fit_gauss_and_truncate(fit_t, fit_prof, \
                                                 twidth, numbins = numbins)

        if len(tot_t) == 0:
            tot_t = new_t
            tot_prof = new_prof
        else:
            tot_t = np.hstack((tot_t, new_t))
            tot_prof = np.hstack((tot_prof, new_prof))

        # except:
        #     plt.plot(fit_t, fit_prof)
        #     plt.show()
        #     input()
        #     print('Failed to fit and return result')

    sort_inds = tot_t.argsort()

    tot_d = 2 * np.pi * 10.2 * (drum_diam * 0.5) * tot_t

    new_t = tot_t[sort_inds]
    new_d = tot_d[sort_inds]
    new_prof = tot_prof[sort_inds]

    if fit_intensity:
        if return_pos:
            xvec = new_d
        else:
            xvec = new_t

        width = 0.2 * (np.max(xvec) - np.min(xvec))
        newguess = [np.max(new_prof), 0, width]

        try:
            popt, pcov = opti.curve_fit(intensity_func, xvec, \
                                        new_prof, p0 = newguess)
            return xvec, new_prof, popt
        except:
            print("Fit didn't work!")

    if return_pos:
        return new_d, new_prof
    else:
        return new_t, new_prof
コード例 #54
0
    sr = 100000
    nfft = 2**14
    params = {'NFFT': nfft, 'noverlap': nfft / 2}

    metadata, traces = load_traces_dat(folderpath,
                                       'transferfunction-traces.dat')

    t = traces[0]
    x = t[1, :]
    y = t[2, :]

    # subtract mean
    x = x - np.mean(x)
    y = y - np.mean(y)

    Pxx, _ = ml.psd(x, Fs=sr, **params)
    Pyy, _ = ml.psd(y, Fs=sr, **params)
    Pxy, f = ml.csd(x, y, Fs=sr, **params)

    fig = custom_fig('CSD illustration', (13, 15))
    ax1 = plt.subplot(2, 1, 1)
    ax1.fill_between([0, 20000], -0.1, 1.1, color='lightgray')
    ax1.plot(f, Pxx / max(Pxx), label='PSD(x)')
    ax1.plot(f, Pyy / max(Pyy), label='PSD(y)')
    ax1.set_ylabel('Auto spectral density')
    ax1.set_yticks([])
    ax1.set_ylim(-0.1, 1.1)
    ax1.set_xlim(-500, 30000)
    ax1.set_xticklabels([])
    ax1.legend()
コード例 #55
0
ds2 = simulateDSM(sig, ntf2)[0]

ndt = tplot2 - tplot0 + 1
dcval1 = np.sum(ds1[tplot0:tplot2]) / ndt
dcval2 = np.sum(ds2[tplot0:tplot2]) / ndt
print('Accuracy in the recovery of DC components')
print('recovered DC on-off', dcval1)
print('recovered DC low-dc-noise', dcval2)
print('original DC', np.sum(sig[tplot0:tplot2]) / ndt)

print("Computing spectra")
NFFT = int(2**(np.ceil(np.log(nfft_periods * 1. / fa * fphi) / np.log(2))))
# This is a bit rough... there may be some residual power at the
# signal frequencies. To obtain a good plot make c1, c2, c3 very small
(psd1, freqs) = mlab.psd((ds1 - sig)[tplot0:tplot2],
                         Fs=2 * fphi,
                         NFFT=NFFT,
                         noverlap=NFFT / 2)
(psd2, freqs) = mlab.psd((ds2 - sig)[tplot0:tplot2],
                         Fs=2 * fphi,
                         NFFT=NFFT,
                         noverlap=NFFT / 2)
minfreq = 1
df = freqs[1] - freqs[0]
minidx = int(np.ceil(minfreq / df))

plt.figure()
plt.plot(freqs[minidx:], dbp(psd1[minidx:]), 'b', label='on-off')
plt.plot(freqs[minidx:], dbp(psd2[minidx:]), 'r', label='low-dc-noise')
plt.xlim(2, 128E3)
plt.xscale('log', basex=10)
plt.xlabel('$f$', x=1.)