Exemplo n.º 1
0
	def run(self, event=None):
		i=self.chooseMode1.GetSelection()
		sdf=self.chooseMode1.GetString(i)
		sdf=evtdet.DETECTORS[sdf]
		pars=self.discPars1.GetValue()
		pars=eval(pars)
		if type(pars) in [float, int]:
			pars=(pars,)
		else:
			pars=tuple(pars)
		sel=(None, [self.preferences["Trigger Channel"]], None)
		path=self.preferences["Dpath"]+'/spikes1'
		if self.dat.getSubData(path):
			self.dat.getSubData(path).sever()
		sdf(self.dat, sel, pars, path)
		eventCondition(self.dat, dpath=path, select=sel, lead=self.preferences["Template Lead (ms)"], length=self.preferences["Template Length"], newpath=self.preferences["Dpath"]+'/evts1', milliseconds=True)
		sel=(None, [self.preferences["Output Channel"]], None)
		
		eventCondition(self.dat, dpath=path, select=sel, lead=self.preferences["Template Lead (ms)"], length=self.preferences["Template Length"], newpath=self.preferences["Dpath"]+'/evts2', milliseconds=True)
		
		self.showEvts(self.graph1, self.preferences["Dpath"]+'/evts1')
		self.showPV(self.graph1, 0, self.preferences["Dpath"]+'/evts1')
		self.showEvts(self.graph2, self.preferences["Dpath"]+'/evts2')
		self.showPV(self.graph2, 1, self.preferences["Dpath"]+'/evts2')
		
		
		
		
		
		
		
		
		
Exemplo n.º 2
0
def conditionEach(ds, dpathFrom='/', dpathEventsToConditionWith='/evts', dpathStimulus='/avstims'):
	''' 
	Applies gicspikesort.conditioning.eventCondition to all timeseries and events in ds
	'''
	dod = ds.getHierarchy()
	numberNames=dod[dpathFrom].getData()[:,1]

	for k in range(max(numberNames)+1):
		#print(k)
	
		#separate each unit to its own channel
		mm = list(numberNames).index(k)
		tmpName = '/'+ds.getLabels()[k]
		hdr = {'SampleType':'events','SamplesPerSecond':5e4, 'Labels':ds.getLabels()[k]}
		dta = dod[dpathFrom].getData()[mm:mm+list(numberNames).count(k),0]
		ds.createSubData(tmpName,dta, head=hdr)	
	
		#event condition
		newName='/sorted_'+ds.getLabels()[k]
		gicc.eventCondition(ds, dpathEventsToConditionWith, tmpName, 5, 35, newName, False)
	
		#remove the new channel
		dod = ds.getHierarchy()
		dod[tmpName].sever()	
Exemplo n.º 3
0
def tmpWork(ds, prefChans=None):
	''' 
	Just the day's workflow, save keystrokes
	'''
	
	dataDict = ds.getHierarchy()	
	
	#set prefChans
	if not prefChans:
		prefChans = range(ds.shape()[1]-4,ds.shape()[1])
		print('Selecting stimulus channels')
		#print(prefChans)

	#get a marker of the base channel starting each stimulus seti
	_getStims(ds,prefChans, .006,.003)

	#remove the last element from isolateSpikes WIP
	#events=dataDict['/tmpname'].getData()
	#events=events[0:len(events)/2-1,:]
	#dataDict['/tmpname'].setData(events)
	#print(dataDict['/tmpname'].shape())

	#trigger on these marks to get the average stimulus
	gicc.eventCondition(ds,'/evts', '/', 40.0, 80.0, '/avstim', milliseconds=True)
	
	#remove the channel with multiple marks per stimulus run	
	dataDict = ds.getHierarchy()

	repNumber=dataDict['/avstim'].header()['Reps']
	
	#print(repNumber)	
	
	#average just the spikes and the base and tip stim
	average = dataDict['/avstim'].getData() 
	newdata = average[:,0:repNumber-1].mean(1)
	johnsData=average[:,0:repNumber-1]

	#try to find the base and stimulus channels, or arbitrarily set base and stim channels
	labelNames = ds.getLabels()
	if 'nozzle 1 (No Comment)' in labelNames:
		basenum = labelNames.index('nozzle 1 (No Comment)')
		tipnum = labelNames.index('nozzle 2 (No Comment)')
	else:
		basenum = prefChans[m]
		tipnum = prefChans[1]
	   
	for m in range(basenum,basenum+4):
			nd2=average[:,repNumber*m:repNumber*(m+1)-1].mean(1)
			newdata=n.column_stack((newdata,nd2))

	#nd2=average[:,repNumber*basenum:repNumber*(basenum+1)-1].mean(1)
	#newdata=n.column_stack((newdata,nd2))
	#nd2=average[:,repNumber*(basenum+1):repNumber*(basenum+2)-1].mean(1)
	#newdata=n.column_stack((newdata,nd2))

	hdr = {'SampleType':'timseries','SamplesPerSecond':ds.header()['SamplesPerSecond']}
	ds.datinit(newdata,hdr)
	dataDict['/avstim'].sever()
	ds.createSubData('/johns',johnsData, head=hdr)
	#dataDict['/'].sever()

	#remove the non stimulus channels from the conditioned data
	#evtsData = dataDict['/evts'].getData()
	#evtsNumber = len(evtsData)
	#evtsNumber=evtsNumber*10
	#resizeData=dataDict['/avstim'].getData()[:,evtsNumber:]
	#dataDict['/avstim'].sever()
	#hdr = dataDict['/avstim'].header()
	#ds.createSubData('/avstim',resizeData,hdr)
	   
	#save the new file
	newFName = ds.xpath(True)[0].fileinformation.get('filename','NoNameGiven')
	newFName = newFName[0:newFName.rfind('.')] + '.mat'
	IO.write(ds,newFName,newdoc=True)
Exemplo n.º 4
0
def dividebyStims(ds, prefChans=[9,10,11,12], thresh1=.5, thresh2=.3, path=None, stimchantag=None, newbasename=''):
		'''
		Find all the stimuli and sort each run into a limited number of classes, then put each class together in a new file
		Assumptions:	1) On repeated stimuli we're going faster than 30Hz
						2) We have at least 200ms between stimuli repeats
						3) Recording doesn't start in the middle of a stimulus
						4) Base is the first stimulus channel, tip is second
		'''
		#stimoptions=array([-9.,-7.,-5.,-3.,-1.,0.,1.,3.,5.,7.,9.])
		#stimoptions=array([-60,-55,-50,-45,-40,40,45,50,55,60])
		#stimoptions=array([-115,-100,-85,-60,-45,-30,-15,15,30,45,60,85,100,115])
		winl = 100*ds.header()['SamplesPerSecond']/1e3
		winh = 200*ds.header()['SamplesPerSecond']/1e3

		if stimchantag:
			jnk, prefChans = _stims_from_strng(ds,stimchantag)
		else:
			_findStims(ds,prefChans,thresh1,thresh2,pth=path)
		dataDict = ds.getHierarchy()
		tips = dataDict['/tmpname'].getData()
		#print tips, type(tips), shape(tips)

		#print len(tips), tips
		cheatvar = 0
		if not len(tips): #without stimuli detected on the channels in question we have some trouble with the following function.  Which wouldn't be a problem except THAT SOMEWHERE THIS FUNCTION RESIZES DATA, AND WE NEED TO RESIZE IN THE ABSENCE OF STIMULI AS WELL.  BUT WHILE ITS POSSIBLE TO RUN THROUGH THE PROGRAM, IT SOMEHOW STILL MISSES THE PLACE WHERE THINGS ARE RESIZED`
			tips = array([[0.,0.],[0.,1.],[0.,2.],[0.,3.],[5.,0.],[5.,1.],[5.,2.],[5.,3.]])
			cheatvar = 1
			ds.getSubData('/tmpname').sever()
			ds.createSubData('/tmpname',data=tips,head={})
			return True
			#print tips, type(tips), shape(tips)
	#		if newbasename == '':
	#			newfname = ds.xpath(True)[0].fileinformation.get('filename','nonamegiven')
	#		else:
	#			newfname = newbasename
	#		newfname = os.path.splitext(newfname)[0] + 'pt1.mien'
	#		print 'No stimuli found, saving only {0}.'.format(newfname)
	#		save_wo_leak(ds, newfname)
	#		return
	
		#take care of uneven numbers of stims on various channels
		runlengths = [len(tips[tips[:,1]==m,0]) for m in list(set(tips[:,1]))]
		shortchan = argmin(runlengths)
		others = list(set(range(len(runlengths)))-set([shortchan]))
		longchan=others[0]
		if not runlengths[shortchan]==runlengths[longchan]:
			frontdif = abs(tips[tips[:,1]==shortchan,0][0] - tips[tips[:,1]==longchan,0][0])
			endif = abs(tips[tips[:,1]==shortchan,0][-1] - tips[tips[:,1]==longchan,0][-1])
			badend =int(where(frontdif>endif,0,-1))
			tt=list(tips)
			for m in range(len(others)-1,-1,-1):
				tt.pop(where(tips[:,1]==others[m])[0][badend])
			tips = array(tt)

		stims = tips[where(tips[:,1]==0)[0],0]
		print 'bb', stims, range(1,len(prefChans))
		for m in range(1,len(prefChans)):
			addons = tips[where(tips[:,1]==m)[0],0]
			if not addons.shape[0] in stims.shape: #so far only fixed when one of addons not equal to number 2 is the wrong size
				if addons.shape[0]>max(stims.shape):
					raise NameError('do not know how to do')
				else:
					while addons.shape[0] != max(stims.shape):
						if addons[0]-stims[0,0] > addons[-1]-stims[0,-1]: #the double part reference to stims eliminates the chance to catch addons number 2 wrong size
							addons = hstack((0,addons))	
						else:
							addons = hstack((addons,0))	
					#return stims, addons
			stims = vstack((stims, addons))	
		stims = transpose(stims)
		#print 'aa', stims
			#datadict = ds.gethierarchy()
			#tips = datadict['/tmpname1'].getdata()
			#bases = datadict['/tmpname0'].getdata()
			#ch3 = datadict['/tmpname2'].getdata()
			#tips2 = datadict['/tmpname3'].getdata()
			#stims = n.hstack((bases,tips,ch3,tips2))
		stims = _reformatted_stims(stims, ds.header()['SamplesPerSecond'])
		#print 'vv', shape(stims)
#		stimoptions = _unique_stims(stims)
#		stimoptions, newstim = _translate_stims(stimoptions)
		#print 'hello', stims, stims.shape, type(stims), '\n\n\n', fubaru
		stims, newstim = _translate_stims(stims)	
		#print 'cc', shape(stims), newstim
#		stimoptions=array(stimoptions)
		events = _unique_events_from_stims(ds)
		if min(events)[0] < 30/1e3*ds.header()['SamplesPerSecond']:
			print('this recording may be starting in the middle of a stimulus and subsequent analysis will get wonky:- sep.dsp.dividebystims')
		stimlist = list(stims)
		if cheatvar:
			stimlist = [0]
		stimoptions=array(list(set(stimlist)))

		#clean up after myself  
		dataDict['/tmpname'].sever()
		#datadict['/tmpnametot'].sever()
		#datadict['/tmpname0'].sever()
		#datadict['/tmpname1'].sever()
		#datadict['/tmpname2'].sever()
		#datadict['/tmpname3'].sever()
		#datadict['/evts'].sever()

		#see if the spikes are already identified in this file and need to be distributed works ok without sorted spikes< but need some way to get "/hidden/stimulus' copied as well
		tempsin=ds.getElements()
		tempsin=[t for t in tempsin if t.name().startswith("spikesort_")]
		nams=[]
		temps=[]
		for m in range(len(tempsin)):#for each unit...
				temps.append(tempsin[m].getElements(attribs={'SampleType' :'events'},depth=1)[0])
				nams.append(tempsin[m].name())
				temps[m]=temps[m].getData()#get the event times off all spikes for a given unit

		#pull out chunks for each repeat, paste them together, save new file
		ds2=ds.clone(False)
		channum=ds.shape()[1]
		hdr = ds.header()
		#hd2 = {'SamplesPerSecond':50000.0,'StartTime':0.0,'SampleType':'events'}
		for m in range(len(stimoptions)):
				#print 'right here', stimoptions, m, events, stimlist, array(stimlist), stimoptions[m], len(events[array(stimlist)==stimoptions[m]])
				togets=events[array(stimlist)==stimoptions[m]]#get a list of all stimuli matching this stimoption
				data = n.zeros((1,channum))#preallocate
				locevts = []#preallocate 
				for r in range(len(tempsin)):
						locevts.append([])#preallocate
				for r in range(len(togets)):#for each matching stimulus event
						data=n.concatenate((data,dataDict['/'].getData()[int(togets[r]-winl):int(togets[r]+winh),:]))#append the chunk winl before the stimulus to winh after the stimulus to data
						for q in range(len(temps)):#for each unit...
								under=set(temps[q][temps[q]>togets[r]-winl])# find all the 'temps' in the appropriate within winl to winh of the event 
								over=set(temps[q][temps[q]<togets[r]+winh])
								ok=array(list(under.intersection(over)))
								for t in range(len(ok)):#for each individual spike of a certian unit...
										locevts[q].append(ok[t]-int(togets[r])+r*(winl+winh)+winl)#add each approved spike with the appropriate shift for how far along in the recording we are
				ds2.datinit(data,hdr)
				ds2.setAttrib('StartTime',0)
				for q in range(len(temps)):
						hd2['Labels']=nams[q]
						ds2.createSubData('/'+nams[q],data=locevts[q],head=hd2)
				if newbasename == '':
					newfname = ds.xpath(True)[0].fileinformation.get('filename','nonamegiven')
				else:
					newfname = newbasename
				if newfname==None:
					print 'could not read filename'
					newfname = os.path.expanduser('~') + '/Desktop/tempfile.mien'
				newfname = os.path.splitext(newfname)[0] + 'pt' + str(m+1) +'.mien'
				#print newfname
				save_wo_leak(ds2,newfname)
				if len(temps): #put this in standard format if the sorted spikes are already there
						ds2.createSubData('/evts',data=array(range(len(togets)))*(winl+winh)+winl,head=hd2)

						#get the data I want
						gicc.eventCondition(ds2,'/evts', '/', 120.0, 160.0, '/avstim', milliseconds=True)
						dataDict2 = ds2.getHierarchy()
						repNumber = dataDict2['/avstim'].header()['Reps']
						average = dataDict2['/avstim'].getData()
						newdata = average[:,0:repNumber-1].mean(1)

						#also get raw traces of the full recording
						fulldata = average[:,0:repNumber-1]
						for q in range(1,dataDict2['/'].shape()[1]):
								fulldata = n.column_stack((fulldata,average[:,repNumber*q:repNumber*(q+1)-1]))

						#get the stimulus channels I want
						ms2pts=ds.header()['SamplesPerSecond']*1e-3
						for q in range(len(prefChans)):
								upper=int(togets[0]-120*ms2pts)
								lower=int(togets[0]+40*ms2pts)
								nd2 = getSelection(ds,(path,prefChans[q],None))[upper:lower]
								newdata=n.column_stack((newdata,nd2))

						#write over ds2 so it doesn't have needless data as well
						avstim=dataDict2['/avstim'].getData()
						hd3=dataDict2['/avstim'].header()
						ds2.clearAll()
						hdr = {'SampleType':'timseries','SamplesPerSecond':ds.header()['SamplesPerSecond']}
						ds2.datinit(newdata,hdr)
						ds2.createSubData('/evts',data=array(range(len(togets)))*(winl+winh)+winl,head=hd2)
						ds2.createSubData('/avstim',avstim,head=hd3)
						ds2.createSubData('/fullstim',fulldata,head=hd3)

						#add the spikes in a single piece of data
						locdata=zeros((2,1))
						for q in range(len(temps)):
								indata=vstack((locevts[q],q*ones((1,len(locevts[q])))))
								locdata=hstack((locdata,indata))
						locdata=locdata[:,1:]
						hd3=hd2.copy()
						hd3['SampleType']='labeledevents'
						hd3['Labels']=nams
						ds2.createSubData('/spks',locdata.transpose(),head=hd3)

						nFN2=newFName[:-5]+'spks.mat'
						#print nFN2
						save_wo_leak(ds2,nFN2)
				ds2.clearAll()
		ds2.sever()
		ds.sever()