def execute(self, namespace): from PYME.IO.FileUtils import readSpeckle from PYME.IO import MetaDataHandler import os fileInfo = {'SEP': os.sep} seriesLength = 100000 mdh = MetaDataHandler.NestedClassMDHandler() mdh['voxelsize.x'] = .001 # default pixel size - FIXME mdh['voxelsize.y'] = .001 #use a default sensor size of 512 #this gets over-ridden below if we supply an image clip_region = [ self.edgeRejectionPixels, self.edgeRejectionPixels, 512 - self.edgeRejectionPixels, 512 - self.edgeRejectionPixels ] if not self.inputImage == '': inp = namespace[self.inputImage] mdh.update(inp.mdh) seriesLength = inp.data.shape[2] clip_region = [ self.edgeRejectionPixels, self.edgeRejectionPixels, inp.data.shape[0] - self.edgeRejectionPixels, inp.data.shape[1] - self.edgeRejectionPixels ] try: fileInfo['DIRNAME'], fileInfo['IMAGENAME'] = os.path.split( inp.filename) fileInfo['IMAGESTUB'] = fileInfo['IMAGENAME'].split('MM')[0] except: pass speckleFN = self.speckleFilename.format(**fileInfo) specks = readSpeckle.readSpeckles(speckleFN) traces = readSpeckle.gen_traces_from_speckles( specks, leadFrames=self.leadFrames, followFrames=self.followFrames, seriesLength=seriesLength, clipRegion=clip_region) #turn this into an inputFilter object inp = tabular.RecArraySource(traces) #create a mapping to covert the co-ordinates in pixels to co-ordinates in nm vs = mdh.voxelsize_nm map = tabular.MappingFilter(inp, x='x_pixels*%3.2f' % vs.x, y='y_pixels*%3.2f' % vs.y) map.mdh = mdh namespace[self.outputName] = map
def selectAndPlotEvents(pipeline, outputdir='/Users/david/FusionAnalysis', speckleFile = None): import os import pandas as pd from PYME.IO.FileUtils.readSpeckle import readSpeckles #now iterate through our clumps clumpIndices = list(set(pipeline['clumpIndex'])) clumps = [pipeline.clumps[i] for i in clumpIndices] if not speckleFile is None: #use speckle file to determine which tracks correspond to fusion events vs = pipeline.mdh.voxelsize_nm.x speckles = readSpeckles(speckleFile) #print speckles sp = np.array([s[0,:] for s in speckles]) #print sp.shape #filteredClumps = [c for c in clumps if (((c['x'][0] - vs*sp[:,1])**2 + (c['y'][0] - vs*sp[:,0])**2 + (5*(c['t'][0] - sp[:,2]))**2).min() < 300**2)] #find those clumps which are near (< 1um) to events identified in Joergs speckle file filteredClumps = [c for c in clumps if (((c['x'][0] - vs*sp[:,1])**2 + (c['y'][0] - vs*sp[:,0])**2).min() < 1000**2)] else: #do another level of filtering - fusion events expand, so we're looking for larger than normal #sigma in the lipid channel. We can also add a constraint on the mean intensity as proper docking #and fusion events are brighter than a lot of the point-like rubbish #We do this here, so we can filter on the aggregate behaviour of a track and be more resiliant against #noise. filteredClumps = [c for c in clumps if (c['Ag'].mean() > 2000) and (c['fitResults_sigma'].mean() > 300)] outputDir = os.path.join(outputdir, os.path.split(pipeline.filename)[1]) if not os.path.exists(outputDir): os.makedirs(outputDir) for c in filteredClumps: plotEvent(c, pipeline, plotRaw=True) pl.savefig('%s/track%d.pdf' % (outputDir, c['clumpIndex'][0])) r = fitIntensities(c) pl.savefig('%s/track%d_fits.pdf' % (outputDir, c['clumpIndex'][0])) r['filename'] = os.path.split(pipeline.filename)[1] r['clumpIndex'] = c['clumpIndex'][0] d = {} d.update(c) pd.DataFrame(d).to_csv('%s/track%d.csv' % (outputDir, c['clumpIndex'][0])) #pd.DataFrame(r).to_csv('%s/track%d_fitResults.csv' % (outputDir, c['clumpIndex'][0])) with open('%s/track%d_fitResults.json' % (outputDir, c['clumpIndex'][0]), 'w') as f: json.dump(r, f) return filteredClumps