Example #1
0
def procFile(fn, rot, nfn):
	f = open(fn, 'rb')
	h = getInfo(f)
	nframes=h['nframes']
	print "converting %s (%i frames)" % (fn, nframes)
	if nframes<1000:
		frames = arange(nframes)
	else:
		frames = None
	dat = chunkOFrames(fn, frames)
	me = dat.mean(3)[...,0]
	out = []
	for i in range(nframes):
		frame = readFrameN(f, h, i) - me
		if rot!=None:
			frame, junk = imrotate(frame, 1, rot)
		x = frame.sum(1)
		if options['w']>1:
			x=convolve(ones(options['w']), x, mode='same')
		out.append(argmax(x))
		if not i%500:
			print "... frame %i" % i
	out = array(out, float32)
	dat = mdat.newData(out, {'SampleType':'timeseries', 'SamplesPerSecond':1.0/h['timescale']})
	doc = nmpml.blankDocument()
	doc.newElement(dat)
	io.write(doc, nfn)	
Example #2
0
def sortByStimulus(doc):
	stim = doc.getElements('Data', {'SampleType':'timeseries'})
	stim = [s for s in stim if s.name().startswith('Call')]
	traces = doc.getElements('Data', {'SampleType':'labeledevents'})
	traces = [s for s in traces if s.name().startswith('Trace')]
	ndoc = nmp.blankDocument()
	for s in stim:
		withdrug = []
		without = []
		for t in traces:
			if t.getTypeRef('Data') and t.getTypeRef('Data')[0].target() == s:
				nt = t.clone()
				if nt.noData():
					nt.datinit(zeros((0,2)))
				nt.setAttrib('Name', '%s%s%s' % (t.container.container.name(), t.container.name(), t.name()))
				resample(nt, s.fs())
				nt.setAttrib('StartTime', s.start() - t.attrib('stim0_delay')/1000.0)
				if t.attrib('Drug'):
					withdrug.append(nt)
				else:
					without.append(nt)
		ns = s.clone()
		if withdrug:
			withdrug = _stackLE(withdrug)
			withdrug.setAttrib('Name', 'BIC')
			ns.newElement(withdrug)
		if without:
			without = _stackLE(without)
			without.setAttrib('Name', 'NoBIC')
			ns.newElement(without)
		ndoc.newElement(ns)
	return ndoc
Example #3
0
def sum_stack(fn,sw):
	'''Measures relative illumination between frames. Sums the values of each frame, divides by the number of pixels, and removes the mean.'''
	dat, fs = retrieveData(fn[0],sw)
	nframes = dat.shape[3]
	print('processing image stack: %ix%i, %i frames ...' % (dat.shape[0], dat.shape[1], nframes))
	s = zeros(nframes)
	for k in arange(nframes):
		s[k]=sum(dat[:,:,0,k])
	numpix = float32(dat.shape[0]*dat.shape[1])
	s = s/numpix
	s -= s.mean()
	if len(s.shape)==2:
		s=s.reshape(-1,)
	h = nmpdat.newHeader(fs=fs)
	nd = nmpdat.newData(s, h)
	doc = nmp.blankDocument()
	doc.newElement(nd)
	if sw.has_key('dir'):
		a = io.write(doc,sw['dir'] + '/' + sw['sum_file'])
	else:	
		a = io.write(doc,sw['sum_file'])
	if a:
		print "%s successfully written." % sw['sum_file']
	else:	
		print "%s failed to write." % sw['sum_file']
	
	return s, fs
Example #4
0
def knit(dname):
	if os.path.isfile(os.path.join(dname, 'concat_ex.mdat')):
		os.unlink(os.path.join(dname, 'concat_ex.mdat'))
	if os.path.isfile(os.path.join(dname, 'concat.mdat')):
		os.unlink(os.path.join(dname, 'concat.mdat'))		
	print("=== processing directory %s ===" % dname)
	dat_all = []
	dat_ex = []
	mf_allch = []
	mdat = [f for f in os.listdir(dname) if f.endswith("_ts.mdat")]
	date = mdat[0][:10]
	if all([f[:10] == date for f in mdat]):
		if os.path.isfile(os.path.join(dname, date+'concat_ex.mdat')):
			os.unlink(os.path.join(dname, date+'concat_ex.mdat'))
		if os.path.isfile(os.path.join(dname, date+'concat.mdat')):
			os.unlink(os.path.join(dname, date+'concat.mdat'))		
	else:
		print "Multiple experiments present -- aborting. Put separate experiments in different folders."
		return None	
	bin = [f for f in os.listdir(dname) if f.endswith(".bin")]
	for f in mdat:
		ff1=os.path.join(dname, f)
		f2 = getMatch(f[:-8], bin)
		if not f2:
			print("can't match %s" % (f,))
			continue
		ff2 = os.path.join(dname, f2)
		print("adding file pair %s, %s" % (f, f2))
		dat1 = io.read(ff1).getElements("Data")[0]
		dat2 = io.read(ff2).getElements("Data")[0]
		dat2 = cropMicroflown(dat2)
		# crpd = dat2.getData()
		# ds = miendata.newData(crpd, {'SampleType':'timeseries', 'SamplesPerSecond':10000})
		# doc = nmpml.blankDocument()
		# doc.newElement(ds)
		# io.write(doc, os.path.join(dname, 'crpd.mdat'))
		resample(dat1, 1000)
		resample(dat2, 1000)
		dat1 = dat1.getData()
		dat2 = dat2.getData()[:,2]
		if dat1.shape[0] < dat2.shape[0]:
			dat2 = dat2[:dat1.shape[0]]
		elif dat1.shape[0] > dat2.shape[0]:
			dat1 = dat1[:dat2.shape[0]]
		dat1 -=  dat1.mean()
		dat2 -= dat2.mean()
		dd = column_stack([dat1, dat2])
		dat_all.append(dd)
		if not any([q in f.lower() for q in EXCLUDE]):
			dat_ex.append(dd)
	dat = row_stack(dat_all)
	ds = miendata.newData(dat, {'SampleType':'timeseries', 'SamplesPerSecond':1000})
	doc = nmpml.blankDocument()
	doc.newElement(ds)
	io.write(doc, os.path.join(dname, date+'concat.mdat'))
	if len(dat_ex) < len(dat_all):
		dat = row_stack(dat_ex)
		ds.datinit(dat)
		io.write(doc, os.path.join(dname, date+'concat_ex.mdat'))
Example #5
0
def writeFile(f,fname='TestMe',sps=None,start=0):
	ndoc = nmpml.blankDocument()
	if sps:
		tf = mdat.newData(f, {'Name':fname, 'SampleType':'timeseries', 'SamplesPerSecond':sps, "StartTime":start}) 
	else:			
		tf = mdat.newData(f, {'Name':fname, 'SampleType':'function'}) 
	ndoc.newElement(tf)
	io.write(ndoc, fname+".mdat")		
Example #6
0
def renameFids(lfn):
    """First argument should be a command "rn", "col" or "sep". These functions were used in creation of the standard fiducials. 
	rn calls "guessFiducialNames (from ccbcv align) to attempt to name the fiducial lines in a file as sagital, coronal, and transverse, rather than the arbitrary names they may have. col collects the fiducials in the list of arguments into a single "combined fiducials" file. sep opperates on a file generated by col (the argument list must have exactly one file name after the command sep), and splits this into files containing xhair, transverse, sagital, and coronal lines.
	"""
    import ccbcv.align as al
    import mien.parsers.nmpml as nmp

    if lfn[1] == "rn":
        for n in lfn[2:]:
            if "_renamed" in n:
                continue
            if "_fubar" in n:
                continue
            print (n)
            nn, ext = os.path.splitext(n)
            nn = nn + "_renamed" + ext
            doc = io.read(n)
            try:
                al.guessFiducialNames(doc)
                io.write(doc, nn)
            except:
                print ("failed")
                raise
    elif lfn[1] == "col":
        ndoc = nmp.blankDocument()
        for n in lfn[2:]:
            doc = io.read(n)
            els = []
            for e in doc.elements:
                if e.name() in ["xhair", "transverse", "sagital", "coronal"]:
                    ne = e.clone()
                    snum = aname.match(n)
                    nn = ne.name() + "_" + "_".join(snum.groups())
                    ne.setName(nn)
                    ndoc.newElement(ne)
        io.write(ndoc, "combined_fiducials.nmpml")
    elif lfn[1] == "sep":
        cf = io.read(lfn[2])
        for n in ["xhair", "transverse", "sagital", "coronal"]:
            els = [e for e in cf.elements if e.name().startswith(n)]
            ndoc = nmp.blankDocument()
            for e in els:
                ndoc.newElement(e)
            nn = n + "_fiducials.nmpml"
            io.write(ndoc, nn)
Example #7
0
def save_wo_leak(ds,name):
	''' 
	Saves data in mien formats without the usual memory leaks of using the mien function directly
	'''
	import mien.parsers.fileIO as IO
	from mien.parsers.nmpml import blankDocument
	doc = blankDocument()
	doc.newElement(ds)
	IO.write(doc, name)
	ds.sever()
	doc.sever()
Example #8
0
def writeFile(f, sps, fname='CubicFunctions', start=0):
    ndoc = nmpml.blankDocument()
    tf = mdat.newData(
        f, {
            'Name': fname,
            'SampleType': 'timeseries',
            'SamplesPerSecond': sps,
            "StartTime": start
        })
    ndoc.newElement(tf)
    io.write(ndoc, fname + ".mdat")
Example #9
0
def save_wo_leak(ds,name):
    '''
    Saves data in mien formats without the usual memory leaks of using the mien function directly
    '''
    [ds] = check_inputs([ds],[['Data','list','ndarray']])
    name = check_fname(name)
    from mien.parsers.fileIO import write
    from mien.parsers.nmpml import blankDocument
    doc = blankDocument()
    doc.newElement(ds)
    write(doc, name)
    ds.sever()
    doc.sever()
Example #10
0
def gmms2data(lfn):
    for fn in lfn:
        doc = io.read(fn)
        doc2 = blankDocument()
        gmms = doc.getElements("MienBlock", {"Function": "ccbcv.gmm.gmm"})
        for g in gmms:
            atr = g.getInheritedAttributes()
            pars = g.getArguments()
            weights = array(pars["weights"])
            means = reshape(array(pars["means"]), (weights.shape[0], -1))
            covs = reshape(array(pars["covs"]), (means.shape[0], means.shape[1], means.shape[1]))
            doc2.newElement(_gmm2dat(atr, weights, means, covs))
        io.write(doc2, fn + ".mat")
def allTFs(fnames):
	ndoc = nmpml.blankDocument()
	for fname in fnames:
		doc = io.read(fname)
		ds = doc.getElements("Data", depth=1)[0]
		bn=os.path.splitext(fname)[0]
		tf = tffmax(ds, False)
		if not tf:
			continue
		if fname.startswith("2009") or fname.startswith("2010"):
			tf.data[:,2]+=pi
			tf.data[:,2] = tf.data[:,2] -  ( 2*pi*(tf.data[:,2]>pi))
		tf.setName(bn)
		ndoc.newElement(tf)
	io.write(ndoc, "allTFs.mdat")
def constructTFs(fname):
	if fname.endswith("_tf.mdat"):
		return
	print("====%s====" % fname)
	doc = io.read(fname)
	ds = doc.getElements("Data", depth=1)[0]
	bn=os.path.splitext(fname)[0]
	ndoc = nmpml.blankDocument()
	for mn in tfmethods:
		tf = tfmethods[mn](ds)
		if not tf:
			continue
		tf.setName(mn)
		ndoc.newElement(tf)
	ofn = bn + "_tf.mdat"
	io.write(ndoc, ofn)
def allTFsRs(fnames):
	ndoc = nmpml.blankDocument()
	for fname in fnames:
		doc = io.read(fname)
		ds = doc.getElements("Data", depth=1)[0]
		bn=os.path.splitext(fname)[0]
		tf = tffmax(ds, False)
		if not tf:
			continue
		tf = tf.getData()
		if fname.startswith("2009") or fname.startswith("2010"):
			tf[:,2]+=pi
			tf[:,2] = tf[:,2] -  ( 2*pi*(tf[:,2]>pi))
		tf = row_stack( [array([[0,0,0]]), tf, array([[250,tf[-1,1],tf[-1,2]]])])
		tf = uniformsample(tf, 1.0)
		tf = mdat.newData(tf, {'Name':bn, 'SampleType':'timeseries', 'SamplesPerSecond':1.0, "StartTime":0})
		ndoc.newElement(tf)
	io.write(ndoc, "allTFsResamp.mdat")		
Example #14
0
File: base.py Project: gic888/MIEN
	def newDoc(self, doc=None):
		if self.document and doc==self.document:
			return
		try:	
			self.document.sever()
			del(self.document)
		except:
			pass
		if doc:
			self.document = doc
		else:	
			self.document = blankDocument()
		self.document._owner=self
		self.document._guiinfo={}
		for e in self.document.getElements():
			e._guiinfo={}
		self.resolveElemRefs()
		self.onNewDoc()
Example #15
0
def getTests(dsheet=None):
	cells = getDsheet(dsheet)
	files = {}
	for c in cells:
		etrack = c[ mouseNoLab]
		mouse = numOrRan(etrack)[0]
		dfpath = os.path.join(basedir, pathprefix+str(mouse), pathprefix +etrack)
		pstpath = os.path.join(dfpath, pathprefix + etrack + '.pst')
		if os.path.isfile(pstpath):
			print "found data for cell %s" % c[cellIdLab]
			if not pstpath in files:
				files[pstpath] = []
			files[pstpath].append(c)
		else:
			print "No data for %s, %s" % (c[cellIdLab], pstpath)
	doc = nmp.blankDocument()		
	for fn in files:
		pstmeta, tests = parsePST.parse(fn)
		pstmeta[1] = 'Date: ' +  pstmeta[1]
		for c in files[fn]:
			dat = nmp.addPath(doc, "/Data:Cell%s" % c[cellIdLab])
			dat.setAttrib('SampleType', "group")
			for k in c:
				safek = k.replace('#','Num').replace(' ', '_').replace('+', 'and').replace('?', '')
				dat.setAttrib(safek, c[k])
			dat.setAttrib('ExperimentMetaData', pstmeta)
			dat.setAttrib('RawTraceFilename', fn[:-4] + '.raw')
			for i, t in enumerate(c[testLab]):
				tids = numOrRan(t)
				for tid in tids:
					test = tests[tid-1]
					drug = bool(c[conditionLab][i]=='yes')
					tdat = nmp.createElement('Data', {'Name':'Test%i' % tid,'SampleType':'group', 'Drug':drug})
					for k in test:
						if not k == 'traces':
							tdat.setAttrib(k, test[k])
					dat.newElement(tdat)
					addTraces(tdat, test['traces'], drug)
	return doc
Example #16
0
def compressMap(fname):
	'''Opens a document fname which should contain appropriately meta-tagged afferent reconstructions of cercal system afferents. Groups the included varicosities (sphere fiducials) in such a way that each length/cercus/class group is represented by a single fiducial. Discards slide number metadata, and any elements that are not sphere or line fiducials. '''
	bfn, ext = os.path.splitext(fname)
	nfn = bfn+"_compressed.nmpml"
	doc = io.read(fname)
	d2 = nmpml.blankDocument()
	lfids = doc.getElements('Fiducial', {'Style':'line'})
	i = 1
	for f in lfids:
		nf = nmpml.createElement('Fiducial', {'Style':'line', 'color':[255,255,255], 'meta_slide_number': 123, 'Name':'standard_outline_123_at1p344scale_line%i' % i})
		i+=1
		nf.setPoints(f.getPoints())
		d2.newElement(nf)
	for side in ['left', 'right']:
		for length in ['long', 'medium', 'short']:
			for clas in range(1,14):
				fids = doc.getElements('Fiducial', {'Style':'spheres', 'meta_class':clas, 'meta_length':length, 'meta_cercus':side})
				if fids:
					nf = nmpml.createElement('Fiducial', {'Style':'spheres', 'color':fids[0].attrib('color'), 'meta_class': clas, 'meta_length':length, 'meta_cercus':side, 'Name':'class%i%s%s' % (clas, length, side)})
					for f in fids:
						nf.setPoints(f.getPoints(), True)
					d2.newElement(nf)
	io.write(d2, nfn)
Example #17
0
def select(doc, **kwargs):
	'''Interface to extract a subset of an nmpml document '''
	if len(doc.getElements())<2:
		return doc
	elements=[]
	gui=kwargs.get('gui', True)
	if gui is True:
		gui=None
	dlg=TreeBrowser(gui, {"multiple":True, 'doc':doc})
	dlg.CenterOnParent()
	val = dlg.ShowModal()
	if val == wx.ID_OK:
		elements=dlg.getElements()
	else:
		return doc
	dlg.Destroy()
	unique=[]
	
	for e in elements:
		par=e.xpath(True)[:-1]
		for pe in par:
			if pe in elements:
				break
		else:
			unique.append(e)
	if kwargs.get('prune'):
		doc.elements=[]		
		for e in unique:
			e.container=None
			doc.newElement(e)
	else:	
		from mien.parsers.nmpml import blankDocument
		doc = blankDocument()
		for e in unique:
			doc.newElement(e.clone())
	return doc
Example #18
0
def newFile(s,d=()):
	d=dict(d)
	nd = nmpdat.newData(s, d)
	doc = nmp.blankDocument()
	doc.newElement(nd)
	return doc
Example #19
0
def write(obj, fname, **kwargs):
	'''Writes an NmpmlObject to the named file. This function will attempt
to automatically identify the format of the file, using the "fileinformation"
member of the NmpmlObject, and, failing that, the extension of the file name.
To override this behavior use the keyword argument "format". This function will return False if it fails, or True if it succeeds. 

Key word arguments:

"format" - set this to a key of "filetypes" to force the format of the output 
	to a particular format.
"gui" - Set this to a mien.wx.base.BaseGui instance to use that GUI's methods
	for user interaction during the load. Set it to True to cause this 
	function to make its own GUI for interaction (otherwise, you will get
	text-mode interaction)
"forceext" - set this to True to cause this function to alter the provided
	file name to gaurantee that it has the extension that mien associates to
	the format that it was written in.
"newdoc" - Write a copy of the object, placed in a new xml document container. The container will contain no other elements, but the copy is recursive, so the objects children will also be written.
	
	
'''	
	if kwargs.get('newdoc'):
		from mien.parsers.nmpml import blankDocument
		doc=blankDocument()
		doc.newElement(obj.clone())
		obj=doc
	if hasattr(fname, 'write'):	
		fileobj=fname
		openme=False	
	else:	
		url=fname
		parts=parseurl(url)
		fname=parts[2]
		kwargs['parsed_url']=parts
		openme=True	
	format=kwargs.get('format')
	if not format:
		format=obj.fileinformation["type"]
	if not format or format=='guess':
		if openme:
			format=get_file_format(fname, kwargs.get('gui'))
		if format=="unknown xml":
			format = get_xml_dialect(obj)
		if not format:
			print "aborting write"
			return
	if not filetypes.has_key(format):
		fl=match_extension(format)
		if not fl:
			print format
			print "can't find format for file %s using default (nmpml)" % str(fname) 
			format='nmpml'
		format=fl[0]
		kwargs['format']=fl[0]
	kwargs['format']=format
	ft=filetypes[format]
	if not ft["write"]:
		print "format %s is read only" % format
		return False
	if kwargs.get('forceext') and openme:
		ext=ft['extensions'][0]
		if not fname.endswith(ext):
			q=list(parseurl(url))
			q[2]=os.path.splitext(fname)[0]+ext
			url=urlunparse(tuple(q))
	#prep=checkSaveElements(obj, ft.get("elements", "any"), fname)
	#if not prep:
	#	print "format %s can't be used to write any of the objects in this document" % format
	#	return False
	if openme:
		fileobj, cleanup=openurlwrite(url)
		kwargs['wrotetourl']=url
	if ft.has_key('xml dialect'):
		writeGenericXML(fileobj, obj, **kwargs)
		obj.onSave(url)
	else:	
		ft["write"](fileobj, obj, **kwargs)
	if openme:	
		cleanup()	
	return True	
Example #20
0
def read(f, **kw):
    a = reshape(fromstring(f.read(), int16), (-1, 1))
    d = nmp.blankDocument()
    dat = nmp.addPath(d, "/Data:call")
    dat.datinit(a, {"SampleType": "timeseries", "SamplesPerSecond": 40000.0})
    return d
Example #21
0
File: base.py Project: gic888/MIEN
	def save(self, **kwargs):
		for k in self.iodefaults['save'].keys():
			if not kwargs.has_key(k):
				kwargs[k]=self.iodefaults['save'][k]
		ask=kwargs.get('ask', True) 
		fname=kwargs.get('fname')
		sev=False
		if kwargs.has_key('doc'):
			doc=kwargs['doc']
			del(kwargs['doc'])
		elif kwargs.has_key('subelement'):
			el=kwargs['subelement']
			del(kwargs['subelement'])
			el=el.clone()
			doc=blankDocument()
			doc.newElement(el)
			sev=el
		else:	
			doc=self.document
		if kwargs.has_key('fname'):
			del(kwargs['fname'])
		else:	
			try:
				fname=self.document.fileinformation["filename"]
			except:
				fname=None
		if not fname:
			ask=True
		if kwargs.get('select'):
			kwargs['gui']=self
			doc=mien.parsers.fileIO.select_elements(doc, **kwargs)	
		if ask:
			if kwargs.get('format'):
				f=kwargs['format']
				formats=[f]
				ext=mien.parsers.fileIO.filetypes[f]['extensions'][0]
				formatstring="%s |*%s" % (f, ext)
			else:
				formats=['guess']+mien.parsers.fileIO.legal_formats(doc)
				formatstring="guess from filename | * |"
				for f in formats[1:]:
					ext=mien.parsers.fileIO.filetypes[f]['extensions'][0]
					formatstring+=f+" |*"+ext+"|"
				formatstring=formatstring[:-1]	
			dir=self.fileinformation.get('save directory')
			if not dir:
				try:
					dir=self.document.fileinformation["filename"]
					dir=os.path.split(dir)[0]
				except:
					dir=os.getcwd()
			dlg=wx.FileDialog(self, message="Save to File", defaultDir=str(dir), style=wx.SAVE, wildcard=formatstring)
			dlg.SetFilterIndex(0)
			dlg.CenterOnParent()
			if dlg.ShowModal() == wx.ID_OK:
				fname=str(dlg.GetPath())
				format=formats[dlg.GetFilterIndex()]
			else:
				self.report("Canceled")
				return
			kwargs['format']=format
			kwargs['forceext']=True
			dir=os.path.split(fname)[0]	
			self.fileinformation['save directory']=dir
			doc.fileinformation["filename"]=fname
			doc.fileinformation["type"]=format
		check=mien.parsers.fileIO.write(doc, fname, **kwargs)
		if sev:
			el.sever()
			doc.sever()
		if check:
			try:
				fname=kwargs['wrotetourl']
			except:
				pass
			self.report("Wrote to %s" % fname)
		else:
			self.report('save failed')
Example #22
0
def comp_series(fn,sw):
	'''Compares illumination of movie images to stimulus generator record running the LED.'''
	if 'n' in sw.keys():
		s, fs = retrieveData(fn[0],sw)
	else:
		s, fs = sum_stack(fn,sw)
	if len(s.shape)==2 and s.shape[1] == 1:
		s=s.reshape(-1,)
	b, fsb = retrieveData(fn[1],sw)
	if 'e' in sw.keys():
		es, esind = near_event_finder(s,fs,stdnum=5,numback=6)
		eb, ebind = near_event_finder(b[:,0],fsb,thresh=0.1)
	elif 'f' in sw.keys():
		es, esind = subfs_event_finder(s,fs)
		eb, ebind = subfs_event_finder(b[:,0],fsb,5)		
	else:
		es, esind = event_finder(s,fs)
		eb, ebind = event_finder(b[:,0],fsb,thresh=1)
	df = len(eb) - len(es)
	if df:
		z=zeros(df,s.dtype)
		es=hstack([es,z])
	tdiff = es-eb
	#Start: code to ensure all events are detected
	bb = b[:,0]
	z = zeros_like(bb)
	z[ebind] = 1
	btot = vstack([bb,z]).T
	hb = nmpdat.newHeader(fs=fsb, l=['RawStimulus','Events'])
	ndb = nmpdat.newData(btot, hb)
	docb = nmp.blankDocument()
	docb.newElement(ndb)
	if sw.has_key('dir'):
		a = io.write(docb,sw['dir'] + '/' + sw['stim_file'])
	else:	
		a = io.write(docb,sw['stim_file'])
	if a:
		print "%s successfully written." % sw['stim_file']
	else:	
		print "%s failed to write."  % sw['stim_file']
	zs = zeros_like(s)
	zs[esind] = 1
	stot = vstack([s,zs]).T
	hs = nmpdat.newHeader(fs=fs, l=['CameraSum','Events'])
	nds = nmpdat.newData(stot, hs)
	docs = nmp.blankDocument()
	docs.newElement(nds)
	if sw.has_key('dir'):
		a = io.write(docs,sw['dir'] + '/' + sw['cam_file'])
	else:	
		a = io.write(docs,sw['cam_file'])
	if a:
		print "%s successfully written." % sw['cam_file']
	else:	
		print "%s failed to write." % sw['cam_file']
	#End: code to ensure all events are detected
	tot = vstack([es,eb,tdiff]).T
	h = nmpdat.newHeader(fs=1.0, l=['VideoEvents','StimulusEvents','TimeDifference'])
	nd = nmpdat.newData(tot, h)
	doc = nmp.blankDocument()
	doc.newElement(nd)
	if sw.has_key('dir'):
		a = io.write(doc,sw['dir'] + '/' + sw['comp_file'])
	else:	
		a = io.write(doc,sw['comp_file'])
	if a:
		print "%s successfully written." % sw['comp_file']
	else:	
		print "%s failed to write." % sw['comp_file']
Example #23
0
import mien.parsers.nmpml as nmpml
from ccbcv.rest import PSDB
from mien.parsers.mzip import deserialize

URL = 'http://cercus.cns.montana.edu:8090'
PATH = '/CercalSystem/'
CERCDB = PSDB(URL, PATH)

def getVaric(iid):
	df = CERCDB.getFile(iid)
	doc = deserialize(df)
	var = doc.getElements('Fiducial', {"Style":"spheres"})
	return var

records =  CERCDB.get(PATH)
doc = nmpml.blankDocument()
for rec in records:
	if 'afferent' in rec['metadata']['anatomy_type']:
		iid = rec['id']
		print iid
		els = getVaric(iid)
		if not els:
			print("warning, no varicosities in record %s. Skipping record" % iid)
			continue
		elif len(els)>1:
			print("warning, duplicate varicosities in record %s. Using first element" % (iid,))
		el = els[0]
		el.sever()
		el.setName("v"+iid)
		metas = CERCDB.getInfo(iid)['metadata']
		for k in metas:
Example #24
0
		if "_renamed" in n:
			continue
		if "_fubar" in n:
			continue
		print(n)
		nn, ext = os.path.splitext(n)
		nn = nn+"_renamed"+ext
		doc = io.read(n)
		try:
			al.guessFiducialNames(doc)
			io.write(doc, nn)
		except:
			print("failed")
			raise
elif sys.argv[1]=="col":
	ndoc = nmp.blankDocument()
	for n in sys.argv[2:]:
		doc = io.read(n)
		els = []
		for e in doc.elements:
			if e.name() in ["xhair", "transverse", "sagital", "coronal"]:
				ne = e.clone()
				snum = aname.match(n)
				nn = ne.name()+"_"+"_".join(snum.groups())
				ne.setName(nn)
				ndoc.newElement(ne)
	io.write(ndoc, "combined_fiducials.nmpml")
elif sys.argv[1]=="sep":
	cf = io.read(sys.argv[2])
	for n in ["xhair", "transverse", "sagital", "coronal"]:
		els = [e for e in cf.elements if e.name().startswith(n)]