Example #1
0
def makeWienerNoiseSpectrum(data, peakIndices=[], numBefore=100, numAfter=700, noiseOffsetFromPeak=200, sampleRate=1e6, template=[],isVerbose=False,baselineSubtract=True):
    nFftPoints = numBefore + numAfter
    peakIndices=np.array(peakIndices).astype(int)
    
    #If no peaks, choose random indices to make spectrum 
    if len(peakIndices)==0:
        print 'warning: makeWienerNoiseSpectrum was not passed any peakIndices. Generating random indicies now'
        peakIndices=np.array([0])
        rate = len(data)/nFftPoints/10
        while peakIndices[-1]<(len(data)-1):
            prob=np.random.rand()
            currentIndex=peakIndices[-1]
            peakIndices=np.append(peakIndices,currentIndex+np.ceil(-np.log(prob)/rate).astype(int))
        peakIndices=peakIndices[:-2]      
    if len(peakIndices)==0:
        raise ValueError('makeWienerNoiseSpectrum: input data set is too short for the number of FFT points specified')
   
    #Baseline subtract noise data
    if(baselineSubtract):
        noiseStream = np.array([])
        for iPeak,peakIndex in enumerate(peakIndices):
            if peakIndex > nFftPoints+noiseOffsetFromPeak and peakIndex < len(data)-numAfter:
                noiseStream = np.append(noiseStream, data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak])
        data = data - np.mean(noiseStream)
    
    #Calculate noise spectra for the defined area before each pulse
    noiseSpectra = np.zeros((len(peakIndices), nFftPoints))
    rejectInd=np.array([])
    for iPeak,peakIndex in enumerate(peakIndices):
        if peakIndex > nFftPoints+noiseOffsetFromPeak and peakIndex < len(data)-numAfter:
            noiseData = data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak]
            noiseSpectra[iPeak] = np.abs(np.fft.fft(data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak])/nFftPoints)**2 
            if len(template)!=0:
                filteredData=np.correlate(noiseData,template,mode='same')
                peakDict=tP.detectPulses(filteredData, nSigmaThreshold = 3., negDerivLenience = 1, bNegativePulses=False)
                if len(peakDict['peakIndices'])!=0:
                    rejectInd=np.append(rejectInd,iPeak)     

    #Remove indicies with pulses by coorelating with a template if provided
    if len(template)!=0:  
        noiseSpectra = np.delete(noiseSpectra, rejectInd, axis=0)
    noiseFreqs = np.fft.fftfreq(nFftPoints,1./sampleRate)    
    noiseSpectrum = np.median(noiseSpectra,axis=0)
    #noiseSpectrum[0] = 2.*noiseSpectrum[1] #look into this later 8/15/16
    if isVerbose:
        print len(noiseSpectra[:,0]),'traces used to make noise spectrum', len(rejectInd), 'cut for pulse contamination'

    return {'noiseSpectrum':noiseSpectrum, 'noiseFreqs':noiseFreqs}
def makeNoiseSpectrum(data,
                      peakIndices=(),
                      window=800,
                      noiseOffsetFromPeak=200,
                      sampleRate=1e6,
                      filt=(),
                      isVerbose=False,
                      baselineSubtract=True):
    '''
    makes one sided noise power spectrum in units of V^2/Hz by averaging together Fourier transforms of noise
    traces. The code screens out any potential pulse contamination with the provided peak indices and filter

    INPUTS:
    data - raw data to calculate noise from
    peakIndices - list of known peak indices. Will choose randomly if not specified
    window - data size to take individual transforms of for averaging
    noiseOffsetFromPeak - takes window this many points to the left of peak 
    sampleRate - sample rate of data
    filt - filter for detecting unspecified pulses in data
    isVerbose - print extra info to terminal
    baselineSubtract - subtract baseline

    OUTPUTS:
    dictionary containing frequencies, noise spectrum and indicies used to make the noise spectrum
    '''
    peakIndices = np.array(peakIndices).astype(int)

    #If no peaks, choose random indices to make spectrum
    if len(peakIndices) == 0:
        peakIndices = np.array([0])
        rate = len(data) / float(window) / 1000.
        while peakIndices[-1] < (len(data) - 1):
            prob = np.random.rand()
            currentIndex = peakIndices[-1]
            peakIndices = np.append(
                peakIndices, currentIndex +
                np.ceil(-np.log(prob) / rate * sampleRate).astype(int))
        peakIndices = peakIndices[:-2]
    if len(peakIndices) == 0:
        raise ValueError(
            'makeNoiseSpectrum: input data set is too short for the number of FFT points specified'
        )
    #Baseline subtract noise data
    if (baselineSubtract):
        noiseStream = np.array([])
        for iPeak, peakIndex in enumerate(peakIndices):
            if peakIndex > window + noiseOffsetFromPeak and peakIndex < len(
                    data) + noiseOffsetFromPeak:
                noiseStream = np.append(
                    noiseStream,
                    data[peakIndex - window - noiseOffsetFromPeak:peakIndex -
                         noiseOffsetFromPeak])
        data = data - np.mean(noiseStream)

    #Calculate noise spectra for the defined area before each pulse
    if len(peakIndices) > 2000:
        peakIndices = peakIndices[:2000]
        noiseSpectra = np.zeros(
            (len(peakIndices), len(np.fft.rfftfreq(window))))
    else:
        noiseSpectra = np.zeros(
            (len(peakIndices), len(np.fft.rfftfreq(window))))
    rejectInd = np.array([])
    goodInd = np.array([])
    counter = 0
    for iPeak, peakIndex in enumerate(peakIndices):
        if peakIndex > window + noiseOffsetFromPeak and peakIndex < len(
                data) + noiseOffsetFromPeak:
            noiseData = data[peakIndex - window -
                             noiseOffsetFromPeak:peakIndex -
                             noiseOffsetFromPeak]
            noiseSpectra[counter] = 4 * window / sampleRate * np.abs(
                np.fft.rfft(
                    data[peakIndex - window - noiseOffsetFromPeak:peakIndex -
                         noiseOffsetFromPeak]))**2
            if len(filt) != 0:
                filteredData = np.convolve(noiseData, filt, mode='same')
                peakDict = tP.detectPulses(filteredData,
                                           nSigmaThreshold=2.,
                                           negDerivLenience=1,
                                           bNegativePulses=True)
                if len(peakDict['peakIndices']) != 0:
                    rejectInd = np.append(rejectInd, int(counter - 1))
                else:
                    goodInd = np.append(goodInd, int(peakIndex))
                    counter += 1
        if counter == 500:
            break
    noiseSpectra = noiseSpectra[0:counter]
    #Remove indicies with pulses by convolving with a filt if provided
    if len(filt) != 0:
        noiseSpectra = np.delete(noiseSpectra, rejectInd.astype(int), axis=0)
    noiseFreqs = np.fft.rfftfreq(window, 1. / sampleRate)
    if len(np.shape(noiseSpectra)) == 0:
        raise ValueError(
            'makeWienerNoiseSpectrum: not enough spectra to average')
    if np.shape(noiseSpectra)[0] < 5:
        raise ValueError(
            'makeWienerNoiseSpectrum: not enough spectra to average')

    noiseSpectrum = np.median(noiseSpectra, axis=0)
    noiseSpectrum[0] = noiseSpectrum[1]
    if not np.all(noiseSpectrum > 0):
        raise ValueError('makeWienerNoiseSpectrum: not all noise data >0')
    if isVerbose:
        print len(noiseSpectra[:,
                               0]), 'traces used to make noise spectrum', len(
                                   rejectInd), 'cut for pulse contamination'

    return {'spectrum': noiseSpectrum, 'freqs': noiseFreqs, 'indices': goodInd}
Example #3
0
    #noiseSpectrumDict['noiseSpectrum'][np.abs(noiseSpectrumDict['noiseFreqs'])>210000]=5e-5
   
    #make matched filter
    matchedFilter=mF.makeMatchedFilter(template, noiseSpectrumDict['noiseSpectrum'], nTaps=50, tempOffs=75)
    coef, _ = opt.curve_fit(lambda x, a, t0 : a*exp(-x/t0), time[len(time)*1/5:len(time)*4/5],template[len(time)*1/5:len(time)*4/5], [-1 , 30e-6])
    fallFit=coef[1]
    superMatchedFilter=mF.makeSuperMatchedFilter(template, noiseSpectrumDict['noiseSpectrum'], fallFit, nTaps=50, tempOffs=75)
    print "filters made"
    
    #convolve with filter
    filteredData=np.convolve(rawData,matchedFilter,mode='same') 
    superFilteredData=np.convolve(rawData,superMatchedFilter,mode='same')
    print "data filtered" 
    
    #find peak indices
    peakDict=tP.detectPulses(filteredData, nSigmaThreshold = 3., negDerivLenience = 1, bNegativePulses=False)
    superPeakDict=tP.detectPulses(superFilteredData, nSigmaThreshold = 3., negDerivLenience = 1, bNegativePulses=False)
    print "peaks found"
    
    #find peak amplitudes
    amps=filteredData[peakDict['peakIndices']]
    superAmps=superFilteredData[superPeakDict['peakIndices']]
    print "amplitudes extracted"
    
    #fig=plt.figure()
    #plt.plot(template)
    #plt.hist(amps,100,alpha=.7)
    #plt.hist(superAmps,100,alpha=.7)
    #plt.show()
    
##### Find expected energy resolution of different filters #####