def fitEvents(events, startEvent, stopEvent, opts): dt = opts['dt'] origTau = opts['tau'] multiFit = opts['multiFit'] waveform = opts['waveform'] tvals = opts['tvals'] dtype = [(n, events[n].dtype) for n in events.dtype.names] output = np.empty(len(events), dtype=dtype + [ ('fitAmplitude', float), ('fitTime', float), ('fitRiseTau', float), ('fitDecayTau', float), ('fitWidth', float), ('fitError', float), ('fitFractionalError', float) ]) offset = 0 ## not all input events will produce output events; offset keeps track of the difference. outputState = { 'guesses': [], 'eventData': [], 'indexes': [], 'xVals': [], 'yVals': [] } #print "==========" for i in range(startEvent, stopEvent): start = events[i]['time'] sliceLen = events[i]['len']*dt +100.*dt ## Ca2+ events are much longer than 50ms if i+1 < len(events): nextStart = events[i+1]['time'] #nextStart = events[i+1]['index']*dt #print " picking between:", sliceLen, nextStart, '-', start, '=', nextStart-start sliceLen = min(sliceLen, nextStart-start) #print " chose:", sliceLen guessLen = events[i]['len']*dt #guessLen = sliceLen tau = origTau if tau is not None: guessLen += tau*2. #print " picking between:", guessLen*3, sliceLen #sliceLen = min(guessLen*3., sliceLen) ## Figure out from where to pull waveform data that will be fitted startIndex = np.argwhere(tvals>=start)[0][0] stopIndex = startIndex + int(sliceLen/dt) startIndex -= 10 ## pull baseline data from before the event starts #print " data to fit: indices:", startIndex, stopIndex, 'dt:', dt, "times:", startIndex*dt, stopIndex*dt eventData = waveform[startIndex:stopIndex] times = tvals[startIndex:stopIndex] if len(times) < 4: ## PSP fit requires at least 4 points; skip this one offset += 1 continue ## reconvolve this chunk of the signal if it was previously deconvolved if tau is not None: eventData = functions.expReconvolve(eventData, tau=tau, dt=dt) ## Make guesses as to the shape of the event mx = eventData.max() mn = eventData.min() guessAmp = (mx-mn)*2 ## fit converges more reliably if we start too large guessRise = guessLen/4. guessDecay = guessLen/4. guessStart = times[10] guessWidth = guessLen*0.75 guessYOffset = eventData[0] ## fitting to exponential rise * decay ## parameters are [amplitude, x-offset, rise tau, fall tau] guess = [guessStart, guessYOffset, guessRise, guessDecay, guessAmp, guessWidth] guessFit = [guessYOffset, guessStart, guessRise, guessDecay, guessAmp, guessWidth] #guess = [amp, times[0], guessLen/4., guessLen/2.] ## careful! #bounds = [ #sorted((guessAmp * 0.1, guessAmp)), #sorted((guessStart-guessRise*2, guessStart+guessRise*2)), #sorted((dt*0.5, guessDecay)), #sorted((dt*0.5, guessDecay * 50.)) #] yVals = eventData.view(np.ndarray) ## Set bounds for parameters - ## exppulse parameter order: yOffset, t0, tau1, tau2, amp, width #yOffset, t0, tau1, tau2, amp, width bounds=[(-10, 10), ## no bounds on yOffset (float(events[i]['time']-10*dt), float(events[i]['time']+5*dt)), ## t0 must be near the startpoint found by eventDetection (0.010, float(opts['riseTauMax'])), ## riseTau must be greater than 10 ms (0.010, float(opts['decayTauMax'])), ## ditto for decayTau (0., float(opts['ampMax'])), ## amp must be greater than 0 (0, float(events[i]['len']*dt*2))] ## width #print "Bounds", bounds #print "times", times.min(), times.max() ## Use Paul's fitting algorithm so that we can put bounds/constraints on the fit params #print "event:", i, 'amp bounds:', bounds[4] fitter = Fitting() fitResults = fitter.FitRegion([1], 0, times, yVals, fitPars=guessFit, fitFunc='exppulse', bounds=bounds, method='SLSQP', dataType='xy') fitParams, xPts, yPts, names = fitResults #print "fitParams:", fitParams #print "names", names #fitResult = functions.fit(functions.expPulse, times, yVals, guess, generateResult=True, resultXVals=times) #fitParams, val, computed, err = fitResult #print ' fitParams:', fitParams[0] yOffset, t0, tau1, tau2, amp, width = fitParams[0] #print "fitResult", fitResult #computed = fitResult[-2] computed = fitter.expPulse(fitParams[0], times) diff = (yVals - computed) err = (diff**2).sum() fracError = diff.std() / computed.std() output[i-offset] = tuple(events[i]) + (amp, t0, tau1, tau2, width) + (err, fracError) #print "amp:", amp #print "output:", output[i-offset] outputState['guesses'].append(guess) outputState['eventData'].append(eventData) outputState['indexes'].append(i) outputState['xVals'].append(times) outputState['yVals'].append(computed) if offset > 0: output = output[:-offset] outputState['output'] = output return outputState
def processData(self, data): return functions.expReconvolve(data)
def processEventFits(events, startEvent, stopEvent, opts): ## This function does all the processing work for EventFitter. dt = opts['dt'] origTau = opts['tau'] multiFit = opts['multiFit'] waveform = opts['waveform'] tvals = opts['tvals'] nFields = len(events.dtype.fields) dtype = [(n, events[n].dtype) for n in events.dtype.names] output = np.empty(len(events), dtype=dtype + [ ('fitAmplitude', float), ('fitTime', float), ('fitRiseTau', float), ('fitDecayTau', float), ('fitTimeToPeak', float), ('fitError', float), ('fitFractionalError', float), ('fitLengthOverDecay', float), ]) offset = 0 ## not all input events will produce output events; offset keeps track of the difference. outputState = { 'guesses': [], 'eventData': [], 'indexes': [], 'xVals': [], 'yVals': [] } for i in range(startEvent, stopEvent): start = events[i]['time'] #sliceLen = 50e-3 sliceLen = dt*300. ## Ca2+ events are much longer than 50ms if i+1 < len(events): nextStart = events[i+1]['time'] sliceLen = min(sliceLen, nextStart-start) guessLen = events[i]['len']*dt tau = origTau if tau is not None: guessLen += tau*2. #print i, guessLen, tau, events[i]['len']*dt #sliceLen = 50e-3 sliceLen = guessLen if i+1 < len(events): ## cut slice back if there is another event coming up nextStart = events[i+1]['time'] sliceLen = min(sliceLen, nextStart-start) ## Figure out from where to pull waveform data that will be fitted startIndex = np.argwhere(tvals>=start)[0][0] stopIndex = startIndex + int(sliceLen/dt) eventData = waveform[startIndex:stopIndex] times = tvals[startIndex:stopIndex] #print i, startIndex, stopIndex, dt if len(times) < 4: ## PSP fit requires at least 4 points; skip this one offset += 1 continue ## reconvolve this chunk of the signal if it was previously deconvolved if tau is not None: eventData = functions.expReconvolve(eventData, tau=tau, dt=dt) #print i, len(eventData) ## Make guesses as to the shape of the event mx = eventData.max() mn = eventData.min() if mx > -mn: peakVal = mx else: peakVal = mn guessAmp = peakVal * 2 ## fit converges more reliably if we start too large guessRise = guessLen/4. guessDecay = guessLen/2. guessStart = times[0] zc = functions.zeroCrossingEvents(eventData - (peakVal/3.)) ## eliminate events going the wrong direction if len(zc) > 0: if guessAmp > 0: zc = zc[zc['peak']>0] else: zc = zc[zc['peak']<0] #print zc ## measure properties for the largest event within 10ms of start zc = zc[zc['index'] < 10e-3/dt] if len(zc) > 0: if guessAmp > 0: zcInd = np.argmax(zc['sum']) ## the largest event in this clip else: zcInd = np.argmin(zc['sum']) ## the largest event in this clip zcEv = zc[zcInd] #guessLen = dt*zc[zcInd]['len'] guessRise = .1e-3 #dt*zcEv['len'] * 0.2 guessDecay = dt*zcEv['len'] * 0.8 guessStart = times[0] + dt*zcEv['index'] - guessRise*3. ## cull down the data set if possible cullLen = zcEv['index'] + zcEv['len']*3 if len(eventData) > cullLen: eventData = eventData[:cullLen] times = times[:cullLen] ## fitting to exponential rise * decay ## parameters are [amplitude, x-offset, rise tau, fall tau] guess = [guessAmp, guessStart, guessRise, guessDecay] #guess = [amp, times[0], guessLen/4., guessLen/2.] ## careful! bounds = [ sorted((guessAmp * 0.1, guessAmp)), sorted((guessStart-min(guessRise, 0.01), guessStart+guessRise*2)), sorted((dt*0.5, guessDecay)), sorted((dt*0.5, guessDecay * 50.)) ] yVals = eventData.view(np.ndarray) fit = functions.fitPsp(times, yVals, guess=guess, bounds=bounds, multiFit=multiFit) computed = functions.pspFunc(fit, times) peakTime = functions.pspMaxTime(fit[2], fit[3]) diff = (yVals - computed) err = (diff**2).sum() fracError = diff.std() / computed.std() lengthOverDecay = (times[-1] - fit[1]) / fit[3] # ratio of (length of data that was fit : decay constant) output[i-offset] = tuple(events[i]) + tuple(fit) + (peakTime, err, fracError, lengthOverDecay) #output['fitTime'] += output['time'] #print fit #self.events.append(eventData) outputState['guesses'].append(guess) outputState['eventData'].append(eventData) outputState['indexes'].append(i) outputState['xVals'].append(times) outputState['yVals'].append(computed) if offset > 0: output = output[:-offset] outputState['output'] = output return outputState