def getStatsChan(pipeline, chanName, file): p = pipeline p.colourFilter.setColour(chanName) if chanName == 'Everything': label = 'Everything' else: label = p.fluorSpeciesDyes[chanName] if 'Camera.CycleTime' in p.mdh.getEntryNames(): t = p.colourFilter['t'].astype('f') * p.mdh.getEntry( 'Camera.CycleTime') else: t = p.colourFilter['t'].astype('f') * p.mdh.getEntry( 'Camera.IntegrationTime') nEvents = t.size tMax = t.max() tMedian = np.median(t) meanPhotons = getPhotonNums(p.colourFilter, p.mdh).mean() sts = models.EventStats(fileID=file, label=label, nEvents=nEvents, tMax=tMax, tMedian=tMedian, meanPhotons=meanPhotons) sts.save() return sts
def gphotons(pipeline): colourFilter = pipeline.colourFilter metadata = pipeline.mdh chans = colourFilter.getColourChans() channame = '' if len(chans) == 0: nph = kinModels.getPhotonNums(colourFilter, metadata) merr = colourFilter['error_x'] return [channame, nph.mean(), merr.mean()] ret = [] curcol = colourFilter.currentColour for chan in chans: channame = pipeline.fluorSpeciesDyes[chan] colourFilter.setColour(chan) nph = kinModels.getPhotonNums(colourFilter, metadata) merr = colourFilter['error_x'] ret.append([channame, nph.mean(), merr.mean()]) colourFilter.setColour(curcol) return ret
def analyseFile(filename): print(filename) seriesName = os.path.splitext(os.path.split(filename)[-1])[0] PL.ExtendContext({'seriesName':seriesName}) try: pipe = Pipeline(filename) except RuntimeError: print(('Error opening %s' % filename)) PL.PopContext() return #only look at first 7k frames pipe.filterKeys['t'] = (0, 7000) pipe.Rebuild() trackUtils.findTracks(pipe, 'error_x', 2, 20) pipe.Rebuild() extraParams = {} extraParams['cycleTime'] = pipe.mdh['Camera.CycleTime'] nPhot = kinModels.getPhotonNums(pipe.colourFilter, pipe.mdh) extraParams['MedianPhotons'] = np.median(nPhot) extraParams['MeanPhotons'] = np.mean(nPhot) extraParams['NEvents'] = len(nPhot) extraParams['MeanBackground'] = pipe['fitResults_background'].mean() - pipe.mdh['Camera.ADOffset'] extraParams['MedianBackground'] = np.median(pipe['fitResults_background']) - pipe.mdh['Camera.ADOffset'] extraParams['MeanClumpSize'] = pipe['clumpSize'].mean() extraParams['MeanClumpPhotons'] = (pipe['clumpSize']*nPhot).mean() PL.AddRecord('/Photophysics/ExtraParams', dictToRecarray(extraParams)) kinModels.fitDecay(pipe) kinModels.fitFluorBrightness(pipe) #kinModels.fitFluorBrightnessT(pipe) #max_off_ts = [3,5,10,20,40] #max_off_ts = [20] #for ot in max_off_ts: #PL.ExtendContext({'otMax':ot}) #find molecules appearing across multiple frames kinModels.fitOnTimes(pipe) #PL.PopContext() pipe.CloseFiles() PL.PopContext()
def plotphotons(pipeline, color='red'): nph = km.getPhotonNums(pipeline.colourFilter, pipeline.mdh) ph_range = 6 * nph.mean() n, bins = np.histogram(nph, np.linspace(0, ph_range, 100)) plt.bar(bins[:-1], n, width=bins[1] - bins[0], alpha=0.4, color=color) return nph
def OpenFile(self, filename= '', ds = None, **kwargs): """Open a file - accepts optional keyword arguments for use with files saved as .txt and .mat. These are: FieldNames: a list of names for the fields in the text file or matlab variable. VarName: the name of the variable in the .mat file which contains the data. SkipRows: Number of header rows to skip for txt file data PixelSize: Pixel size if not in nm """ #close any files we had open previously while len(self.filesToClose) > 0: self.filesToClose.pop().close() #clear our state self.dataSources.clear() if 'zm' in dir(self): del self.zm self.filter = None self.mapping = None self.colourFilter = None self.events = None self.mdh = MetaDataHandler.NestedClassMDHandler() self.filename = filename if ds is None: #load from file ds = self._ds_from_file(filename, **kwargs) #wrap the data source with a mapping so we can fiddle with things #e.g. combining z position and focus mapped_ds = tabular.MappingFilter(ds) if 'PixelSize' in kwargs.keys(): mapped_ds.addVariable('pixelSize', kwargs['PixelSize']) mapped_ds.setMapping('x', 'x*pixelSize') mapped_ds.setMapping('y', 'y*pixelSize') #extract information from any events self.ev_mappings, self.eventCharts = _processEvents(mapped_ds, self.events, self.mdh) #Fit module specific filter settings if 'Analysis.FitModule' in self.mdh.getEntryNames(): fitModule = self.mdh['Analysis.FitModule'] #print 'fitModule = %s' % fitModule if 'Interp' in fitModule: self.filterKeys['A'] = (5, 100000) if 'LatGaussFitFR' in fitModule: mapped_ds.addColumn('nPhotons', getPhotonNums(mapped_ds, self.mdh)) if 'SplitterFitFNR' in fitModule: mapped_ds.addColumn('nPhotonsg', getPhotonNums({'A': mapped_ds['fitResults_Ag'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh)) mapped_ds.addColumn('nPhotonsr', getPhotonNums({'A': mapped_ds['fitResults_Ar'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh)) mapped_ds.setMapping('nPhotons', 'nPhotonsg+nPhotonsr') if fitModule == 'SplitterShiftEstFR': self.filterKeys['fitError_dx'] = (0,10) self.filterKeys['fitError_dy'] = (0,10) #self._get_dye_ratios_from_metadata() self.addDataSource('Localizations', mapped_ds) # Retrieve or estimate image bounds if False: # 'imgBounds' in kwargs.keys(): self.imageBounds = kwargs['imgBounds'] elif (not ( 'scanx' in mapped_ds.keys() or 'scany' in mapped_ds.keys())) and 'Camera.ROIWidth' in self.mdh.getEntryNames(): self.imageBounds = ImageBounds.extractFromMetadata(self.mdh) else: self.imageBounds = ImageBounds.estimateFromSource(mapped_ds) from PYME.recipes.localisations import ProcessColour from PYME.recipes.tablefilters import FilterTable self.colour_mapper = ProcessColour(self.recipe, input='Localizations', output='colour_mapped') #we keep a copy of this so that the colour panel can find it. self.recipe.add_module(self.colour_mapper) self.recipe.add_module(FilterTable(self.recipe, inputName='colour_mapped', outputName='filtered_localizations', filters={k:list(v) for k, v in self.filterKeys.items() if k in mapped_ds.keys()})) self.recipe.execute() self.filterKeys = {} self.selectDataSource('filtered_localizations') #NB - this rebuilds the pipeline
def OpenFile(self, filename= '', ds = None, **kwargs): '''Open a file - accepts optional keyword arguments for use with files saved as .txt and .mat. These are: FieldNames: a list of names for the fields in the text file or matlab variable. VarName: the name of the variable in the .mat file which contains the data. SkipRows: Number of header rows to skip for txt file data PixelSize: Pixel size if not in nm ''' #close any files we had open previously while len(self.filesToClose) > 0: self.filesToClose.pop().close() #clear our state self.dataSources = [] if 'zm' in dir(self): del self.zm self.filter = None self.mapping = None self.colourFilter = None self.events = None self.mdh = MetaDataHandler.NestedClassMDHandler() self.filename = filename if not ds is None: self.selectedDataSource = ds self.dataSources.append(ds) elif os.path.splitext(filename)[1] == '.h5r': try: self.selectedDataSource = inpFilt.h5rSource(filename) self.dataSources.append(self.selectedDataSource) self.filesToClose.append(self.selectedDataSource.h5f) if 'DriftResults' in self.selectedDataSource.h5f.root: self.dataSources.append(inpFilt.h5rDSource(self.selectedDataSource.h5f)) if len(self.selectedDataSource['x']) == 0: self.selectedDataSource = self.dataSources[-1] except: #fallback to catch series that only have drift data self.selectedDataSource = inpFilt.h5rDSource(filename) self.dataSources.append(self.selectedDataSource) self.filesToClose.append(self.selectedDataSource.h5f) #catch really old files which don't have any metadata if 'MetaData' in self.selectedDataSource.h5f.root: self.mdh = MetaDataHandler.HDFMDHandler(self.selectedDataSource.h5f) if ('Events' in self.selectedDataSource.h5f.root) and ('StartTime' in self.mdh.keys()): self.events = self.selectedDataSource.h5f.root.Events[:] elif os.path.splitext(filename)[1] == '.mat': #matlab file ds = inpFilt.matfileSource(filename, kwargs['FieldNames'], kwargs['VarName']) self.selectedDataSource = ds self.dataSources.append(ds) elif os.path.splitext(filename)[1] == '.csv': #special case for csv files - tell np.loadtxt to use a comma rather than whitespace as a delimeter if 'SkipRows' in kwargs.keys(): ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], delimiter=',', skiprows=kwargs['SkipRows']) else: ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], delimiter=',') self.selectedDataSource = ds self.dataSources.append(ds) else: #assume it's a tab (or other whitespace) delimited text file if 'SkipRows' in kwargs.keys(): ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], skiprows=kwargs['SkipRows']) else: ds = inpFilt.textfileSource(filename, kwargs['FieldNames']) self.selectedDataSource = ds self.dataSources.append(ds) #wrap the data source with a mapping so we can fiddle with things #e.g. combining z position and focus self.inputMapping = inpFilt.mappingFilter(self.selectedDataSource) self.selectedDataSource = self.inputMapping self.dataSources.append(self.inputMapping) if 'PixelSize' in kwargs.keys(): self.selectedDataSource.pixelSize = kwargs['PixelSize'] self.selectedDataSource.setMapping('x', 'x*pixelSize') self.selectedDataSource.setMapping('y', 'y*pixelSize') #Retrieve or estimate image bounds if 'Camera.ROIWidth' in self.mdh.getEntryNames(): x0 = 0 y0 = 0 x1 = self.mdh.getEntry('Camera.ROIWidth')*1e3*self.mdh.getEntry('voxelsize.x') y1 = self.mdh.getEntry('Camera.ROIHeight')*1e3*self.mdh.getEntry('voxelsize.y') if 'Splitter' in self.mdh.getEntry('Analysis.FitModule'): if 'Splitter.Channel0ROI' in self.mdh.getEntryNames(): rx0, ry0, rw, rh = self.mdh['Splitter.Channel0ROI'] x1 = rw*1e3*self.mdh.getEntry('voxelsize.x') x1 = rh*1e3*self.mdh.getEntry('voxelsize.x') else: y1 = y1/2 self.imageBounds = ImageBounds(x0, y0, x1, y1) else: self.imageBounds = ImageBounds.estimateFromSource(self.selectedDataSource) #extract information from any events self._processEvents() #handle special cases which get detected by looking for the presence or #absence of certain variables in the data. if 'fitResults_Ag' in self.selectedDataSource.keys(): #if we used the splitter set up a number of mappings e.g. total amplitude and ratio self._processSplitter() if 'fitResults_ratio' in self.selectedDataSource.keys(): #if we used the splitter set up a number of mappings e.g. total amplitude and ratio self._processPriSplit() if 'fitResults_sigxl' in self.selectedDataSource.keys(): #fast, quickpalm like astigmatic fitting self.selectedDataSource.setMapping('sig', 'fitResults_sigxl + fitResults_sigyu') self.selectedDataSource.setMapping('sig_d', 'fitResults_sigxl - fitResults_sigyu') self.selectedDataSource.dsigd_dz = -30. self.selectedDataSource.setMapping('fitResults_z0', 'dsigd_dz*sig_d') if not 'y' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping('y', '10*t') #set up correction for foreshortening and z focus stepping if not 'foreShort' in dir(self.selectedDataSource): self.selectedDataSource.foreShort = 1. if not 'focus' in self.selectedDataSource.mappings.keys(): self.selectedDataSource.focus= np.zeros(self.selectedDataSource['x'].shape) if 'fitResults_z0' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping('z', 'fitResults_z0 + foreShort*focus') elif not 'z' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping('z', 'foreShort*focus') #Fit module specific filter settings if 'Analysis.FitModule' in self.mdh.getEntryNames(): fitModule = self.mdh['Analysis.FitModule'] print 'fitModule = %s' % fitModule if 'Interp' in fitModule: self.filterKeys['A'] = (5, 100000) if 'LatGaussFitFR' in fitModule: self.selectedDataSource.nPhot = getPhotonNums(self.selectedDataSource, self.mdh) self.selectedDataSource.setMapping('nPhotons', 'nPhot') if fitModule == 'SplitterShiftEstFR': self.filterKeys['fitError_dx'] = (0,10) self.filterKeys['fitError_dy'] = (0,10) #remove any keys from the filter which are not present in the data for k in self.filterKeys.keys(): if not k in self.selectedDataSource.keys(): self.filterKeys.pop(k) self.Rebuild() if 'Sample.Labelling' in self.mdh.getEntryNames() and 'gFrac' in self.selectedDataSource.keys(): self.SpecFromMetadata()
def OpenFile(self, filename='', ds=None, **kwargs): '''Open a file - accepts optional keyword arguments for use with files saved as .txt and .mat. These are: FieldNames: a list of names for the fields in the text file or matlab variable. VarName: the name of the variable in the .mat file which contains the data. SkipRows: Number of header rows to skip for txt file data PixelSize: Pixel size if not in nm ''' #close any files we had open previously while len(self.filesToClose) > 0: self.filesToClose.pop().close() #clear our state self.dataSources = [] if 'zm' in dir(self): del self.zm self.filter = None self.mapping = None self.colourFilter = None self.events = None self.mdh = MetaDataHandler.NestedClassMDHandler() self.filename = filename if not ds is None: self.selectedDataSource = ds self.dataSources.append(ds) elif os.path.splitext(filename)[1] == '.h5r': try: self.selectedDataSource = inpFilt.h5rSource(filename) self.dataSources.append(self.selectedDataSource) self.filesToClose.append(self.selectedDataSource.h5f) if 'DriftResults' in self.selectedDataSource.h5f.root: self.dataSources.append( inpFilt.h5rDSource(self.selectedDataSource.h5f)) if len(self.selectedDataSource['x']) == 0: self.selectedDataSource = self.dataSources[-1] except: #fallback to catch series that only have drift data self.selectedDataSource = inpFilt.h5rDSource(filename) self.dataSources.append(self.selectedDataSource) self.filesToClose.append(self.selectedDataSource.h5f) #catch really old files which don't have any metadata if 'MetaData' in self.selectedDataSource.h5f.root: self.mdh = MetaDataHandler.HDFMDHandler( self.selectedDataSource.h5f) if ('Events' in self.selectedDataSource.h5f.root) and ( 'StartTime' in self.mdh.keys()): self.events = self.selectedDataSource.h5f.root.Events[:] elif os.path.splitext(filename)[1] == '.mat': #matlab file ds = inpFilt.matfileSource(filename, kwargs['FieldNames'], kwargs['VarName']) self.selectedDataSource = ds self.dataSources.append(ds) elif os.path.splitext(filename)[1] == '.csv': #special case for csv files - tell np.loadtxt to use a comma rather than whitespace as a delimeter if 'SkipRows' in kwargs.keys(): ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], delimiter=',', skiprows=kwargs['SkipRows']) else: ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], delimiter=',') self.selectedDataSource = ds self.dataSources.append(ds) else: #assume it's a tab (or other whitespace) delimited text file if 'SkipRows' in kwargs.keys(): ds = inpFilt.textfileSource(filename, kwargs['FieldNames'], skiprows=kwargs['SkipRows']) else: ds = inpFilt.textfileSource(filename, kwargs['FieldNames']) self.selectedDataSource = ds self.dataSources.append(ds) #wrap the data source with a mapping so we can fiddle with things #e.g. combining z position and focus self.inputMapping = inpFilt.mappingFilter(self.selectedDataSource) self.selectedDataSource = self.inputMapping self.dataSources.append(self.inputMapping) if 'PixelSize' in kwargs.keys(): self.selectedDataSource.pixelSize = kwargs['PixelSize'] self.selectedDataSource.setMapping('x', 'x*pixelSize') self.selectedDataSource.setMapping('y', 'y*pixelSize') #Retrieve or estimate image bounds if 'Camera.ROIWidth' in self.mdh.getEntryNames(): x0 = 0 y0 = 0 x1 = self.mdh.getEntry( 'Camera.ROIWidth') * 1e3 * self.mdh.getEntry('voxelsize.x') y1 = self.mdh.getEntry( 'Camera.ROIHeight') * 1e3 * self.mdh.getEntry('voxelsize.y') if 'Splitter' in self.mdh.getEntry('Analysis.FitModule'): if 'Splitter.Channel0ROI' in self.mdh.getEntryNames(): rx0, ry0, rw, rh = self.mdh['Splitter.Channel0ROI'] x1 = rw * 1e3 * self.mdh.getEntry('voxelsize.x') x1 = rh * 1e3 * self.mdh.getEntry('voxelsize.x') else: y1 = y1 / 2 self.imageBounds = ImageBounds(x0, y0, x1, y1) else: self.imageBounds = ImageBounds.estimateFromSource( self.selectedDataSource) #extract information from any events self._processEvents() #handle special cases which get detected by looking for the presence or #absence of certain variables in the data. if 'fitResults_Ag' in self.selectedDataSource.keys(): #if we used the splitter set up a number of mappings e.g. total amplitude and ratio self._processSplitter() if 'fitResults_ratio' in self.selectedDataSource.keys(): #if we used the splitter set up a number of mappings e.g. total amplitude and ratio self._processPriSplit() if 'fitResults_sigxl' in self.selectedDataSource.keys(): #fast, quickpalm like astigmatic fitting self.selectedDataSource.setMapping( 'sig', 'fitResults_sigxl + fitResults_sigyu') self.selectedDataSource.setMapping( 'sig_d', 'fitResults_sigxl - fitResults_sigyu') self.selectedDataSource.dsigd_dz = -30. self.selectedDataSource.setMapping('fitResults_z0', 'dsigd_dz*sig_d') if not 'y' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping('y', '10*t') #set up correction for foreshortening and z focus stepping if not 'foreShort' in dir(self.selectedDataSource): self.selectedDataSource.foreShort = 1. if not 'focus' in self.selectedDataSource.mappings.keys(): self.selectedDataSource.focus = np.zeros( self.selectedDataSource['x'].shape) if 'fitResults_z0' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping( 'z', 'fitResults_z0 + foreShort*focus') elif not 'z' in self.selectedDataSource.keys(): self.selectedDataSource.setMapping('z', 'foreShort*focus') #Fit module specific filter settings if 'Analysis.FitModule' in self.mdh.getEntryNames(): fitModule = self.mdh['Analysis.FitModule'] print 'fitModule = %s' % fitModule if 'Interp' in fitModule: self.filterKeys['A'] = (5, 100000) if 'LatGaussFitFR' in fitModule: self.selectedDataSource.nPhot = getPhotonNums( self.selectedDataSource, self.mdh) self.selectedDataSource.setMapping('nPhotons', 'nPhot') if fitModule == 'SplitterShiftEstFR': self.filterKeys['fitError_dx'] = (0, 10) self.filterKeys['fitError_dy'] = (0, 10) #remove any keys from the filter which are not present in the data for k in self.filterKeys.keys(): if not k in self.selectedDataSource.keys(): self.filterKeys.pop(k) self.Rebuild() if 'Sample.Labelling' in self.mdh.getEntryNames( ) and 'gFrac' in self.selectedDataSource.keys(): self.SpecFromMetadata()