示例#1
0
def retrieveData(fn,sw=(), header=False):
	'''This function opens a file and returns the data members and sampling rate.'''
	sw=dict(sw)
	speclist=[(b,sw[b]) for b in ['xmin','xmax','ymin','ymax','nframes','startframe'] if sw.has_key(b)]
	if len(speclist) > 0:
		select=dict(speclist)
	else:
		select=None
	if select:
		f=io.read(fn,select=select)
	else:
		f=io.read(fn)
	d=f.getElements('Data')[0]
	dat = d.getData()
	if dd.isSampledType(d):
		fs=d.header()['SamplesPerSecond']
	elif d.stype()=='image':
		fs = 1./d.header()['StackSpacing']
	else:
		print "File does not have sampling rate. Setting sampling rate to None. May cause problems later..."
		fs = None
	if header:
		return dat, fs, d.header()
	else:
		return dat, fs
示例#2
0
def batchSort(tf, df):
    if type(tf) == str:
        tf = io.read(tf)
    bn = os.path.split(df)[-1]
    bn, ext = os.path.splitext(bn)
    bn = bn + "-" + ext[1:]
    sfn = bn + "_sorted_spikes.mdat"
    dfstr = df
    df = io.read(df)
    print (tf.getElements("Data", "spikesorter_setup", depth=2))
    setup = tf.getElements("Data", "spikesorter_setup", depth=2)[0]
    temps = tf.getElements("Data", depth=2)
    temps = [t for t in temps if t.name().startswith("spikesort_")]
    temps.sort(sortByOrder)
    dat = df.getElements("Data", depth=1)[0]
    writeShifts(dat, zeros(dat.shape()[1]))
    precondition(dat, setup)
    writeShifts(dat, zeros(dat.data.shape[1]))
    for t in temps:
        spi = detect(dat, t, bn)

    if TOSAVE:
        # newfname=sys.argv[1]
        # newfname, jnk=os.path.splitext(newfname)
        dfstr = os.path.basename(dfstr)
        dfstr, jnk = os.path.splitext(dfstr)
        newfname = "spikes" + dfstr + ".mdat"
        print (newfname)
        io.write(tf, newfname, newdoc=False)

    spikes = dat.getSubData("/spikes")
    io.write(spikes, sfn, newdoc=True, format=".mdat")
示例#3
0
def assemble(filepairs, dname):
	dat_all = []
	for fp in filepairs:
		bfn = os.path.join(dname, fp[0])
		afn = os.path.join(dname, fp[1])
		dbin = io.read(bfn).getElements("Data")[0]
		daos = io.read(afn).getElements("Data")[0]
		trig= dbin.getData()[:,TRIGCHAN]
		ind = argmax(trig[1:] - trig[:-1])+1
		mfd = dbin.getData()[ind:,MFCHAN]
		dbin.datinit(mfd, {"SampleType":"timeseries", "StartTime":0.0, 
			"Labels":["MicroFlownVoltage"], "SamplesPerSecond":dbin.fs()})
		tsd = smooth(daos.getData(),daos.attrib("SamplesPerSecond"))  #remove clicks BEFORE resampling
		daos.datinit(tsd, {"SampleType":"timeseries", "StartTime":0.0, 
			"Labels":["HairPosition"], "SamplesPerSecond":daos.fs()})
		resample(dbin, SIGFS)
		resample(daos, SIGFS)
		dat2 = dbin.getData()
		dat1 = daos.getData()
		if dat1.shape[0] < dat2.shape[0]:
			dat2 = dat2[:dat1.shape[0]]
		elif dat1.shape[0] > dat2.shape[0]:
			dat1 = dat1[:dat2.shape[0]]
		dat1 -=  dat1.mean()
		dat2 -= dat2.mean()
		dd = column_stack([dat1, dat2])
		dat_all.append(dd)
	dat = row_stack(dat_all)
	ds = miendata.newData(dat, {'SampleType':'timeseries', 'SamplesPerSecond':SIGFS,
		'StartTime':0.0, "Labels":['HairPosition', 'MicroFlownVoltage']})
	return ds
示例#4
0
def knit(dname):
	if os.path.isfile(os.path.join(dname, 'concat_ex.mdat')):
		os.unlink(os.path.join(dname, 'concat_ex.mdat'))
	if os.path.isfile(os.path.join(dname, 'concat.mdat')):
		os.unlink(os.path.join(dname, 'concat.mdat'))		
	print("=== processing directory %s ===" % dname)
	dat_all = []
	dat_ex = []
	mf_allch = []
	mdat = [f for f in os.listdir(dname) if f.endswith("_ts.mdat")]
	date = mdat[0][:10]
	if all([f[:10] == date for f in mdat]):
		if os.path.isfile(os.path.join(dname, date+'concat_ex.mdat')):
			os.unlink(os.path.join(dname, date+'concat_ex.mdat'))
		if os.path.isfile(os.path.join(dname, date+'concat.mdat')):
			os.unlink(os.path.join(dname, date+'concat.mdat'))		
	else:
		print "Multiple experiments present -- aborting. Put separate experiments in different folders."
		return None	
	bin = [f for f in os.listdir(dname) if f.endswith(".bin")]
	for f in mdat:
		ff1=os.path.join(dname, f)
		f2 = getMatch(f[:-8], bin)
		if not f2:
			print("can't match %s" % (f,))
			continue
		ff2 = os.path.join(dname, f2)
		print("adding file pair %s, %s" % (f, f2))
		dat1 = io.read(ff1).getElements("Data")[0]
		dat2 = io.read(ff2).getElements("Data")[0]
		dat2 = cropMicroflown(dat2)
		# crpd = dat2.getData()
		# ds = miendata.newData(crpd, {'SampleType':'timeseries', 'SamplesPerSecond':10000})
		# doc = nmpml.blankDocument()
		# doc.newElement(ds)
		# io.write(doc, os.path.join(dname, 'crpd.mdat'))
		resample(dat1, 1000)
		resample(dat2, 1000)
		dat1 = dat1.getData()
		dat2 = dat2.getData()[:,2]
		if dat1.shape[0] < dat2.shape[0]:
			dat2 = dat2[:dat1.shape[0]]
		elif dat1.shape[0] > dat2.shape[0]:
			dat1 = dat1[:dat2.shape[0]]
		dat1 -=  dat1.mean()
		dat2 -= dat2.mean()
		dd = column_stack([dat1, dat2])
		dat_all.append(dd)
		if not any([q in f.lower() for q in EXCLUDE]):
			dat_ex.append(dd)
	dat = row_stack(dat_all)
	ds = miendata.newData(dat, {'SampleType':'timeseries', 'SamplesPerSecond':1000})
	doc = nmpml.blankDocument()
	doc.newElement(ds)
	io.write(doc, os.path.join(dname, date+'concat.mdat'))
	if len(dat_ex) < len(dat_all):
		dat = row_stack(dat_ex)
		ds.datinit(dat)
		io.write(doc, os.path.join(dname, date+'concat_ex.mdat'))
示例#5
0
def getAllSpheres(fname):
	'''Opens the document fname, and calls array2mm for every sphere fiducial in the document.''' 
	doc = io.read(fname)
	fids = doc.getElements('Fiducial', {'Style':'spheres'})
	for f in fids:
		array2mm(f.getPoints()[:,:3], f.name())
		break
示例#6
0
文件: cli.py 项目: gic888/MIEN
def bounce():
	'''Reload the modules in the "mods" list and the current document'''
	mods=["mien.nmpml.%s" % mn for mn in mien.nmpml.__all__]+['mien.nmpml',  'mien.optimizers.base','mien.optimizers.brute','mien.optimizers.ga','mien.nmpml.optimizer', 'mien.parsers.nmpml','mien.parsers.fileIO']
	for m in mods:
		exec("import %s" %m)
		exec("reload(%s)" % m)
	io=mien.parsers.fileIO
	try:
		l=globals()
		doc=l['doc']
		els=l['els']
		fn=doc.fileinformation.get('filename', 'miendoc.nmpml')
		doc=io.read(fn)
		l['doc']=doc
		l['io']=io
	except:
		print "unable to reload document"
		raise
		return None
	for i,e  in enumerate(els):
		if type(e)!=str:
			e=e.upath()
		try:
			ne=doc.getInstance(e)
		except:
			ne=None
			print "Can't reference element at %s" % e
		els[i]=ne	
	l['els']=els
示例#7
0
def defaultMetas(lfn):
    """ sets length:long, class:0, cercus:left, instar:10, slide_number:-1, directional_tuning:0, cercal_distance:10. If any of these attributes already have values, this function will not overwrite them. It is mainly useful to prevent functions that crash if these basic metas are undefined from crashing. If you want to change the defaults, you may pass arguments that are of the form name:value, as well as file names. For example meta_cercus:right fn1 fn2  will operate on files fn1 and fn2, but will set cercus:right as well as the other defaults listed above."""
    defaults = {
        "meta_length": "long",
        "meta_class": 0,
        "meta_cercus": "left",
        "meta_instar": 10,
        "meta_slide_number": -1,
        "meta_directional_tuning": 0,
        "meta_cercal_distance": 10,
    }

    files = []
    for s in lfn:
        if ":" in s:
            n, v = s.split(":")
            defaults[n] = v
        else:
            files.append(s)
    for fn in files:
        doc = io.read(fn)
        for e in doc.getElements(["Cell", "Fiducial", "SpatialField"]):
            for a in defaults:
                if not a in e.attributes:
                    e.setAttrib(a, defaults[a])
                elif a == "meta_directional_tuning":
                    e.setAttrib("meta_directional_tuning", e.attrib("meta_directional_tuning") % 360)
        io.write(doc, fn)
示例#8
0
def makeDBGroup(lfn):
    """For each file in the list, make a database group containing the metadata for these objects. Also, remove these metadata from the objects contained within the group. Attempt's to guess a database ID ane assign it as the name of the group as well"""

    PATH = "/CercalSystem/"
    for fn in lfn:
        doc = io.read(fn)
        m = db_getmetas(doc, False)
        if "meta_dbid" in m:
            name = m["meta_dbid"]
            del (m["meta_dbid"])
        else:
            try:
                name = "%s_%s_%s" % (m["meta_length"][0].upper(), str(m["meta_class"]), str(m["meta_slide_number"]))
            except:
                name = "DBGroup"
        print "setting group %s in %s" % (name, fn)
        m["Name"] = name
        m["DBrecord"] = PATH
        m["DBurl"] = URL
        group = createElement("Group", m)
        els = doc.elements[:]
        doc.newElement(group)
        for e in els:
            e.move(group)
        del (m["Name"])
        for e in group.getElements():
            for k in m:
                if k in e.attributes:
                    del (e.attributes[k])
            for k in ["Color", "DisplayGroup"]:
                if k in e.attributes:
                    del (e.attributes[k])
        io.write(doc, fn)
示例#9
0
def convertMetas(lfn):
    """set some values for the metadata tags cercal_distance, class, cercus, and directional_tuning. Use these to set color. Sets cercal_distance to 10 (proximal). Uses the dircolors module to infer class and thus direction and color. This module assumes the data are stored in a file name of origin, using "." as the field separator."""
    for fn in lfn:
        print fn
        doc = io.read(fn)
        els = doc.getElements(["Cell", "Fiducial", "SpatialField"], depth=1)
        for e in els:
            e.setAttrib("meta_cercal_distance", 10)
            aclass = e.attrib("meta_class")
            if aclass == None:
                try:
                    dc._getclass(e)
                    aclass = e.attrib("meta_class")
                except:
                    print "failed to get class for %s" % fn
                    continue
            cerc = e.attrib("meta_cercus").lower()[0]
            try:
                d = dc.DIRECTIONS[aclass]
            except:
                print "no directional tuning for  %s" % (str(aclass),)
                continue
            if cerc == "r":
                d = 360 - d
            e.setAttrib("meta_directional_tuning", d)
            d = d % 360
            ang = d * pi / 180
            c = dc._getAngleColor(ang)
            pycol = dc.convertColor(c, "py")
            if "Color" in e.attributes:
                del (e.attributes["Color"])
            e.setAttrib("color", pycol)
        io.write(doc, fn)
示例#10
0
	def makeMasks(self, event):
		#updated
		c = self.cell
		points = reshape(c.get_drawing_coords(), (-1, 8))
		diams = (points[:,3]+points[:,7])/2
		#filt = diams < 8.5
		#get abstract model data file
		d = self.gui.askParam([{"Name":"Density Model File",
 							 "Value":'classModelsOnly.mien',
 							 "Browser":FileBrowse}])
		if not d:
			return
		md = io.read(d[0])	
		masks = md.getElements("AbstractModel", depth=1)
		#for each model, construct masks 
		for mask in masks:
			gmm = mask.getElements('MienBlock', {'Function':'ccbcv.gmm.gmm'})
			if not gmm:
				continue
			gmm = gmm[0]
			dat = maskPoints(mask, points)
			#dat = dat*filt[:,newaxis]*mask.attrib('total_weight')
			dat = dat*mask.attrib('total_weight')
			a=  {'Name':mask.name().rstrip('gm'),'SampleType':'mask'}
			for atr in mask.attributes:
				if atr.startswith('meta_'):
					a[atr] = mask.attrib(atr)
			d = newData(dat,  a)
			c.newElement(d)
		self.gui.update_all(object=c, event="Rebuild")
		self.gui.report("finished masking")
示例#11
0
def get_velocity(fn, sw, pfs):
	print "loading microflown data and converting to velocity..."
	mff=os.path.join(os.path.split(__file__)[0], 'MicroflownCalib.ncl')
	mff, ffs = read_ncl(mff)
	mff=mff[:,0]
	doc = io.read(fn)
	vdat = doc.getElements('Data')[0]
	vfs = vdat.fs()
	vdat = vdat.getData()
	#vdat, vfs = read_streamer(fn)
	vdat=vdat[:,sw['m']]
	#linear regression
	a=arange(float(len(vdat)))
	o=ones(vdat.shape[0])
	A=column_stack([a,o])
	l, resid, rank, s =linalg.lstsq(A,vdat)
	if l[0] > .001*(max(vdat)-min(vdat)):
		print('Warning: possible linear trend in velocity data. Slope of linear regression = %.3f, peak-to-peak velocity difference = %.3f.' % (l[0], max(vdat)-min(vdat)))
	#write_txt('test.txt', ffs, 'data 	linear 	final', column_stack([vdat,line,vdat-line]))
	if vfs!=ffs:
		mff= filterResample(mff, ffs, vfs)
	vdat=numfilter(vdat, mff)
	# write_txt('test.txt', ffs, 'filtered', vdat[:,newaxis])
	if vfs!=pfs:
		vdat=resample(vdat, vfs, pfs)
	if sw.get('startframe'):
		vdat=vdat[sw['startframe']:]
	vdat-=vdat.mean()
	return vdat	
示例#12
0
def getAofT(fn,  sw):
	if fn.endswith('raw'):
		dat, fs = read_raw(fn, sw)
	else:
		doc=io.read(fn)
		d=doc.getElements('Data')[0]
		h=d.header()
		dat=d.getData()
		fs = 1.0/h['StackSpacing']
		dat=dat[:,:,0,:]
	print('processing image stack: %ix%i, %i frames ...' % (dat.shape[0], dat.shape[1], dat.shape[2]))
	if sw.get('rotate'):
		dat=imrotate(dat, sw['rotate'])
	pts=[]
	if sw.has_key('subtract_mean'):
		dat=dat.astype(float32)-dat.mean(2)[:,:,newaxis]
		for i in range(dat.shape[2]):
			dat[:,:,i]-=dat[:,:,i].min()
			dat[:,:,i]/=dat[:,:,i].max()
	for i in range(dat.shape[2]):
		x=dat[:,:,i].sum(1)
		if sw['w']>1:
			x=convolve(ones(sw['w']), x, mode='same')
		x=argmax(x)
		pts.append(x)
	pts=array(pts).astype(float32 )
	L=sw['l']*AOS_PixPerDiv/NIKON_PixPerDiv	
	pts-=pts.mean()
	#small angle aprox
	saa=pts/L
	doc.sever()	
	return (saa, fs)
示例#13
0
def process(fn, sw):
	#gc.set_debug()
	print('loading image stack ...')
	f=io.read(fn)
	d=f.getElements('Data')[0]
	dat = d.getData()
	h = d.header()
	print('processing image stack: %ix%i, %i frames ...' % (dat.shape[0], dat.shape[1], dat.shape[3]))
	if sw.get('xmin'):
		xm=sw['xmin']
		dat=dat[xm:,:,:]
	else:
		xm=0
	if sw.get('ymin'):
		ym=sw['ymin']
		dat=dat[:, ym:,:,:]
	else:
		ym=0
	if sw.get('xmax', -1)!=-1:
		xx=sw['xmax']
		xr=xx-sw.get('xmin', 0)
		dat=dat[:xr, :,:,:]
	else:
		xx=dat.shape[0]
	if sw.get('ymax', -1)!=-1:
		yy=sw['ymax']
		yr=yy-sw.get('ymin', 0)
		dat=dat[:, :yr,:,:]
	else:
		yy=dat.shape[1]
	print 'cropped to %ix%i' % (xr,yr)
	h['OriginalDims'] = (dat.shape[0],dat.shape[1])
	h['XDims'] = (xm,xx)
	h['YDims'] = (ym,yy)
	if sw.has_key('subtract_mean'):
		print "calculating mean ..."
		dat=dat.astype(float32)-dat.mean(3)[:,:,:,newaxis]
		dat-=dat.min()
		dat/=dat.max()
		h['SubtractMean']=True
	else:
		h['SubtractMean']=False
	if sw.get('rotate'):
		print "rotating ..."
		dat=imrotate(dat, sw['rotate'])
		h['Rotated'] = sw['rotate']
	else:
		h['Rotated'] = 0
	print('writing image stack ...')
	f=newFile(dat,h)
	if sw.has_key('AOS_dir'):
		fname=sw['AOS_dir']+'/'+sw['AOS_file']
	else:
		fname=sw['AOS_file']
	a1=io.write(f,fname)	
	if a1:
		print "Wrote %s." % fname
	else:
		print "Failed to write %s." % fname
示例#14
0
def read_mien(f):
	doc=io.read(f)
	dat=doc.getElements('Data')[0]
	h=dat.header()
	d=dat.getData()
	fs = 1.0/h['StackSpacing']
	d=d[:,:,0,:]
	return (d, fs)
示例#15
0
def renameFids(lfn):
    """First argument should be a command "rn", "col" or "sep". These functions were used in creation of the standard fiducials. 
	rn calls "guessFiducialNames (from ccbcv align) to attempt to name the fiducial lines in a file as sagital, coronal, and transverse, rather than the arbitrary names they may have. col collects the fiducials in the list of arguments into a single "combined fiducials" file. sep opperates on a file generated by col (the argument list must have exactly one file name after the command sep), and splits this into files containing xhair, transverse, sagital, and coronal lines.
	"""
    import ccbcv.align as al
    import mien.parsers.nmpml as nmp

    if lfn[1] == "rn":
        for n in lfn[2:]:
            if "_renamed" in n:
                continue
            if "_fubar" in n:
                continue
            print (n)
            nn, ext = os.path.splitext(n)
            nn = nn + "_renamed" + ext
            doc = io.read(n)
            try:
                al.guessFiducialNames(doc)
                io.write(doc, nn)
            except:
                print ("failed")
                raise
    elif lfn[1] == "col":
        ndoc = nmp.blankDocument()
        for n in lfn[2:]:
            doc = io.read(n)
            els = []
            for e in doc.elements:
                if e.name() in ["xhair", "transverse", "sagital", "coronal"]:
                    ne = e.clone()
                    snum = aname.match(n)
                    nn = ne.name() + "_" + "_".join(snum.groups())
                    ne.setName(nn)
                    ndoc.newElement(ne)
        io.write(ndoc, "combined_fiducials.nmpml")
    elif lfn[1] == "sep":
        cf = io.read(lfn[2])
        for n in ["xhair", "transverse", "sagital", "coronal"]:
            els = [e for e in cf.elements if e.name().startswith(n)]
            ndoc = nmp.blankDocument()
            for e in els:
                ndoc.newElement(e)
            nn = n + "_fiducials.nmpml"
            io.write(ndoc, nn)
示例#16
0
def makeSubgroups(lfn):
    """Takes the name of an attribute as the first argument, followed by a list of files. For all group element in the files, if at least two, and less than all, of the children  of that group have the same value for the named attribute, a subgroup is created to contain them."""
    attr = lfn[0]
    for fn in lfn[1:]:
        doc = io.read(fn)
        for e in doc.elements:
            if e.__tag__ == "Group":
                _makeSubGroup(e, attr)
    io.write(doc, fn)
示例#17
0
def setmeta(lfn):
    """the first argument should be a metadata tag (without the leading meta_), and the second should be a value. Sets this tag to this value in all the listed files. If the value is DEL, removes the tag."""
    md = "meta_" + lfn[0]
    mv = lfn[1]
    for fn in lfn[2:]:
        doc = io.read(fn)
        for e in doc.getElements(["Cell", "Fiducial"]):
            e.setAttrib(md, mv)
        print "set %s in %s" % (md, fn)
        io.write(doc, fn)
示例#18
0
def datDirToTF(ds, newpath="/", dir="mf04dat"):
	files = os.listdir(dir)
	calib = []
	for f in files:
		if not os.path.splitext(f)[-1][1:].isdigit():
			print "ignoring %s" % f
			continue
		ffn = os.path.join(dir, f)
		try:
			doc = io.read(ffn)
			dat = doc.getElements("Data")[0]
		except:
			print "read failure %s" % ffn
			continue
		fs = dat.fs()
		dat = dat.getData()
		accel = dat[:,1] - dat[:,1].mean()
		accel*=10.01
		vel = dat[:,2] - dat[:,2].mean()
		aa, af, ap = _cosineModel(accel, fs)
		va, vf, vp = _cosineModel(vel, fs)
		if abs(af - vf)/min(af, vf) > .1*min(af, vf):
			print "Warning: %s: estimated different frequncies for input (%.4g) and output (%.4g). Skipping this data point" % (ffn, af, vf)
			continue
		freq = (af+vf)/2.0
		print ffn
		print aa, af, ap
		print va, vf, vp
   		ap-=90
		aa=aa/(2*pi*af)
		while ap<vp:
			ap+=360
		while ap>vp:
			ap-=360
		gain = aa/va
		phase = (ap-vp) 
		print gain, phase
		phase= (phase*pi/180)
		phase = phase % (2*pi)
		if phase>pi:
			phase-=2*pi
		calib.append([freq, gain, phase])
	calib = array(calib)
	ind = calib[:,0].argsort()
	calib = calib[ind, :]
	gain = uniformsample(calib[:,[0,1]], 1.0, True)
	phase = uniformsample(calib[:,[0,2]], 1.0, True)
	head = {"SampleType":"timeseries", "SamplesPerSecond":1.0, "StartTime":calib[0, 0]}
	d = ds.getSubData(newpath)
	if d:
		d.datinit(column_stack([gain, phase]), head)
	else:
		ds.createSubData(newpath, column_stack([gain, phase]), head=head)
	pass
示例#19
0
def sinScanToTF(ds, fname="breeMFdata.mdat", newpath="/", start=.5, accSens = 0.0999, stride=5.5, dur=3.0, nscans=62):
	doc = io.read(fname)
	dat = doc.getElements("Data")[0]
	fs = dat.fs()
	dat = dat.getData()
	calib=[]
	offset = int(round(start*fs))
	dur = int(round(dur*fs))
	stride = int(round(stride*fs))
	lfreq = 0
	for i in range(nscans):
		accel = dat[offset:offset+dur, 1]
		mf = dat[offset:offset+dur, 0]
		offset+=stride
		accel -= accel.mean()
		mf-=mf.mean()
		accel/=accSens
		#print max(accel), max(mf)
		aa, af, ap = _cosineModel(accel, fs)
		va, vf, vp = _cosineModel(mf, fs)
		#print aa, va
		if abs(af - vf)/min(af, vf) > .1*min(af, vf):
			print "Warning: %s: estimated different frequncies for input (%.4g) and output (%.4g). Skipping this data point" % (ffn, af, vf)
			continue
		freq = (af+vf)/2.0
		if freq < lfreq:
			print "Warning: frequency isn't increasing at %i (last:%.4g, this %.4g). Skipping point" % (offset, lfreq, freq)
			continue
		lfreq=freq
		#print offset
		print aa, af, ap
		print va, vf, vp
   		ap-=90
		aa=aa/(2*pi*af)
		while ap<vp:
			ap+=360
		while ap>vp:
			ap-=360
		gain = aa/va
		phase = (ap-vp) 
		phase= (phase*pi/180)
		phase = phase % (2*pi)
		if phase>pi:
			phase-=2*pi
		calib.append([freq, gain, phase])
	calib = array(calib)
	ind = calib[:,0].argsort()
	calib = calib[ind, :]
	for i in range(calib.shape[0]):
		print calib[i]
	gain = uniformsample(calib[:,[0,1]], 1.0, True)
	phase = uniformsample(calib[:,[0,2]], 1.0, True)
	head = {"SampleType":"timeseries", "SamplesPerSecond":1.0, "StartTime":calib[0, 0]}
	ds.createSubData(newpath, column_stack([gain, phase]), head=head, delete=True)	
示例#20
0
文件: abstract.py 项目: gic888/MIEN
	def getExternalFile(self):
		ef=self.attrib('FileReference')
		if not ef:
			return None
		from mien.parsers.fileIO import read
		doc=read(ef)
		ep=self.attrib('FileUPath')
		if ep:
			return doc.getInstance(ep)
		else: 
			return doc.getElements('AbstractModel')[0]
示例#21
0
def make_map(dir):
	doc = io.read(os.path.join(dir, 'standard_outline.123.nmpml'))
	invert = { 'Scale_x':-1.0}
	for fn in os.listdir(dir):
		if not fn[0] in ['L', 'M', 'S']:
			continue
		if not fn.endswith('nmpml'):
			continue
		d2=io.read(os.path.join(dir, fn))
		varic = d2.getElements('Fiducial', {"Style":'spheres'})[0]
		for s in ["color", "Color", "Direction"]:
			if s in varic.attributes:
				del(varic.attributes[s])		
		right_varic = varic.clone()
		right_varic.setAttrib('meta_cercus', 'right')
		right_varic.setName(varic.name()+"_right")
		alignObject(right_varic, invert) 
		doc.newElement(varic)
		doc.newElement(right_varic)
	io.write(doc, 'full_map.nmpml', format='nmpml')
示例#22
0
def mienstruct_from_name(fname):
    '''
    Takes a filename assumed to be a mien readable file and retuns the associated structure in python  
    '''
    from mien.parsers.fileIO import read
    fname = check_fname(fname)
    ff = read(fname)
    ds = ff.getElements()[0]
    ds.sever()
    ff.sever()
    return ds
示例#23
0
def getTF(vidfn, mffn):
	mff=os.path.join(os.path.split(ftt.__file__)[0], 'MicroflownCalib.ncl')
	doc = io.read(mffn)
	mfds = doc.getElements('Data')[0]
	mfds.delChans([z for z in range(mfds.data.shape[1]) if not z==MFCHAN])
	fd = io.read(mff)
	fd = fd.getElements('Data')[0]
	mfds.newElement(fd)
	fd.setName('filter')
	cal.applyFilterToSignal(mfds, dpathSig='/', dpathFilt="/filter", channel=0, newpath='velocity')
	vdoc = io.read(vidfn)
	vds = vdoc.getElements('Data')[0]
	vds.newElement(mfds.getSubData('velocity'))
	combine(vds, '/', '/velocity', False)
	ftt.scanForTFValues(vds)
	tfv = vds.getSubData('/ftvals')
	tf = _cleanupTFV(tfv.getData())
	f=open('trans_func.txt', 'w')
	for i in range(tf.shape[0]):
		f.write(" ".join(map(str, tf[i]))+"\n")
	f.close()
示例#24
0
文件: optRun.py 项目: gic888/MIEN
def getOptFromFile(fname):
	from mien.parsers.nmpml import tagClasses
	ocl=tagClasses()['Optimizer']
	doc=io.read(fname)
	opts=doc.getElements(ocl)
	if not opts:
		return None
	o=opts[0]
	d=o.getElementOrRef('Distributer')
	if not d:
		return None
	return (o, d)
示例#25
0
def gmms2data(lfn):
    for fn in lfn:
        doc = io.read(fn)
        doc2 = blankDocument()
        gmms = doc.getElements("MienBlock", {"Function": "ccbcv.gmm.gmm"})
        for g in gmms:
            atr = g.getInheritedAttributes()
            pars = g.getArguments()
            weights = array(pars["weights"])
            means = reshape(array(pars["means"]), (weights.shape[0], -1))
            covs = reshape(array(pars["covs"]), (means.shape[0], means.shape[1], means.shape[1]))
            doc2.newElement(_gmm2dat(atr, weights, means, covs))
        io.write(doc2, fn + ".mat")
示例#26
0
def scale134(lfn):
    """scale the elements in the listed files by a factor of 1.34 in x y and z (but not d)"""
    from mien.spatial.alignment import alignObject

    for n in lfn:
        doc = io.read(n)
        nn, ext = os.path.splitext(n)
        nn = nn + "_scaledup" + ext
        factor = 1.34
        scale = {"Scale_x": factor, "Scale_y": factor, "Scale_z": factor}
        els = doc.getElements(["Cell", "Fiducial", "SpatialField"])
        for e in els:
            alignObject(e, scale)
        io.write(doc, nn)
示例#27
0
def retrieveData(fn,sw):
	'''This function opens a file and returns the data members and sampling rate.'''
	speclist=[(b,sw[b]) for b in ['xmin','xmax','ymin','ymax','nframes'] if sw.has_key(b)]
	if len(speclist) > 0:
		select=dict(speclist)
	else:
		select=None
	if select:
		f=io.read(fn,select=select)
	else:
		f=io.read(fn)
	d=f.getElements('Data')[0]
	p=re.compile("((\w+/\w+)|(\w+))\.raw")
	if p.match(fn):
		fs = 1./d.header()['StackSpacing']
	else:
		try:
			fs=d.header()['SamplesPerSecond']
		except:
			print "File is not a timeseries or image stack. Setting sampling rate to None. May cause problems later..."
			fs = None
	dat = d.getData()
	return dat, fs
def allTFs(fnames):
	ndoc = nmpml.blankDocument()
	for fname in fnames:
		doc = io.read(fname)
		ds = doc.getElements("Data", depth=1)[0]
		bn=os.path.splitext(fname)[0]
		tf = tffmax(ds, False)
		if not tf:
			continue
		if fname.startswith("2009") or fname.startswith("2010"):
			tf.data[:,2]+=pi
			tf.data[:,2] = tf.data[:,2] -  ( 2*pi*(tf.data[:,2]>pi))
		tf.setName(bn)
		ndoc.newElement(tf)
	io.write(ndoc, "allTFs.mdat")
示例#29
0
def checkstate(lfn):
    """for all the listed files, verify presence of a cell, varicosities, at least 3 fiducial lines, and the meta tags:
	meta_cercus
	meta_class
	meta_cercal_distance
	meta_length
	meta_slide_number
	meta_cercal_distance
	meta_instar
	meta_directional_tuning
	"""
    need_metas = [
        "meta_cercus",
        "meta_class",
        "meta_cercal_distance",
        "meta_length",
        "meta_slide_number",
        "meta_cercal_distance",
        "meta_instar",
        "meta_directional_tuning",
    ]
    for fn in lfn:
        doc = io.read(fn)
        md = {}
        count = {"cell": 0, "sphere": 0, "line": 0}
        els = doc.getElements(["Cell", "Fiducial"])
        for e in els:
            if e.__tag__ == "Cell":
                count["cell"] += 1
            elif e.attrib("Style") == "spheres":
                count["sphere"] += 1
            else:
                count["line"] += 1
            for atr in e.attributes:
                if atr.startswith("meta_"):
                    md[atr] = e.attrib(atr)
        if count["cell"] < 1:
            print "%s missing cell" % fn
        if count["sphere"] < 1:
            print "%s missing varricosities" % fn
        if count["line"] < 3:
            print "%s has only %i line fiducials" % (fn, count["line"])
        for mt in need_metas:
            if not mt in md:
                print "%s is missing meta tag %s" % (fn, mt)
        sn = md.get("meta_slide_number")
        if sn in [-1, "-1"]:
            print "%s has bogus slide number" % fn
示例#30
0
def setAttrs(lfn):
    """Takes a list of "name:value" pairs and a list of filenames, and assigns all the attributes in the name:value pairs to all the toplevel elements in the files. Any input not containing a ":" is assumed to be a file name"""
    attrs = {}
    files = []
    for e in lfn:
        if ":" in e:
            n, v = e.split(":")
            attrs[n] = v
        else:
            files.append(e)
    for fn in files:
        doc = io.read(fn)
        for e in doc.elements:
            for n in attrs:
                e.setAttrib(n, attrs[n])
        io.write(doc, fn)