def execute(self, namespace):
        from PYME.IO.image import ImageBounds
        inp = namespace[self.inputLocalizations]
        if not isinstance(inp, tabular.ColourFilter):
            cf = tabular.ColourFilter(inp, None)

            print('Created colour filter with chans: %s' % cf.getColourChans())
            cf.mdh = inp.mdh
        else:
            cf = inp

        #default to taking min and max localizations as image bounds
        imb = ImageBounds.estimateFromSource(inp)

        if self.zBoundsMode == 'min-max':
            self.zBounds[0], self.zBounds[1] = float(imb.z0), float(imb.z1)

        if (self.xyBoundsMode == 'inherit'
            ) and not (getattr(inp, 'imageBounds', None) is None):
            imb = inp.imageBounds
        elif self.xyBoundsMode == 'metadata':
            imb = ImageBounds.extractFromMetadata(inp.mdh)
        elif self.xyBoundsMode == 'manual':
            imb.x0, imb.y0, imb.x1, imb.y1 = self.manualXYBounds

        cf.imageBounds = imb

        renderer = renderers.RENDERERS[str(self.renderingModule)](None, cf)

        namespace[self.outputImage] = renderer.Generate(self.trait_get())
Esempio n. 2
0
    def OpenFile(self, filename='', ds=None, clobber_recipe=True, **kwargs):
        """Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:
            
            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which 
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data
            
            PixelSize:  Pixel size if not in nm
            
        """

        #close any files we had open previously
        while len(self.filesToClose) > 0:
            self.filesToClose.pop().close()

        # clear our state
        # nb - equivalent to clearing recipe namespace
        self.dataSources.clear()

        if clobber_recipe:
            # clear any processing modules from the pipeline
            # call with clobber_recipe = False in a 'Open a new file with the processing pipeline I've set up' use case
            # TODO: Add an "File-->Open [preserving recipe]" menu option or similar
            self.recipe.modules = []

        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.events = None
        self.mdh = MetaDataHandler.NestedClassMDHandler()

        self.filename = filename

        if ds is None:
            from PYME.IO import unifiedIO  # TODO - what is the launch time penalty here for importing clusterUI and finding a nameserver?

            # load from file(/cluster, downloading a copy of the file if needed)
            with unifiedIO.local_or_temp_filename(filename) as fn:
                # TODO - check that loading isn't lazy (i.e. we need to make a copy of data in memory whilst in the
                # context manager in order to be safe with unifiedIO and cluster data). From a quick look, it would seem
                # that _ds_from_file() copies the data, but potentially keeps the file open which could be problematic.
                # This won't effect local file loading even if loading is lazy (i.e. shouldn't cause a regression)
                ds = self._ds_from_file(fn, **kwargs)
                self.events = getattr(ds, 'events', None)
                self.mdh.copyEntriesFrom(ds.mdh)

        # skip the MappingFilter wrapping, etc. in self.addDataSource and add this datasource as-is
        self.dataSources['FitResults'] = ds

        # Fit module specific filter settings
        # TODO - put all the defaults here and use a local variable rather than in __init__ (self.filterKeys is largely an artifact of pre-recipe based pipeline)
        if 'Analysis.FitModule' in self.mdh.getEntryNames():
            fitModule = self.mdh['Analysis.FitModule']
            if 'Interp' in fitModule:
                self.filterKeys['A'] = (5, 100000)
            if fitModule == 'SplitterShiftEstFR':
                self.filterKeys['fitError_dx'] = (0, 10)
                self.filterKeys['fitError_dy'] = (0, 10)

        if clobber_recipe:
            from PYME.recipes.localisations import ProcessColour, Pipelineify
            from PYME.recipes.tablefilters import FilterTable

            add_pipeline_variables = Pipelineify(
                self.recipe,
                inputFitResults='FitResults',
                pixelSizeNM=kwargs.get('PixelSize', 1.),
                outputLocalizations='Localizations')
            self.recipe.add_module(add_pipeline_variables)

            #self._get_dye_ratios_from_metadata()

            colour_mapper = ProcessColour(self.recipe,
                                          input='Localizations',
                                          output='colour_mapped')
            self.recipe.add_module(colour_mapper)
            self.recipe.add_module(
                FilterTable(self.recipe,
                            inputName='colour_mapped',
                            outputName='filtered_localizations',
                            filters={
                                k: list(v)
                                for k, v in self.filterKeys.items()
                                if k in ds.keys()
                            }))
        else:
            logger.warn(
                'Opening file without clobbering recipe, filter and ratiometric colour settings might not be handled properly'
            )
            # FIXME - should we update filter keys and/or make the filter more robust
            # FIXME - do we need to do anything about colour settings?

        self.recipe.execute()
        self.filterKeys = {}
        if 'filtered_localizations' in self.dataSources.keys():
            self.selectDataSource(
                'filtered_localizations')  #NB - this rebuilds the pipeline
        else:
            # TODO - replace / remove this fallback with something better. This is currently required
            # when we use/abuse the pipeline in dh5view, but that should ideally be replaced with
            # something cleaner. This (and case above) should probably also be conditional on `clobber_recipe`
            # as if opening with an existing recipe we would likely want to keep selectedDataSource constant as well.
            self.selectDataSource('FitResults')

        # FIXME - we do this already in pipelinify, maybe we can avoid doubling up?
        self.ev_mappings, self.eventCharts = _processEvents(
            ds, self.events, self.mdh)  # extract information from any events
        # Retrieve or estimate image bounds
        if False:  # 'imgBounds' in kwargs.keys():
            # TODO - why is this disabled? Current usage would appear to be when opening from LMAnalysis
            # during real-time localization, to force image bounds to match raw data, but also potentially useful
            # for other scenarios where metadata is not fully present.
            self.imageBounds = kwargs['imgBounds']
        elif ('scanx' not in self.selectedDataSource.keys()
              or 'scany' not in self.selectedDataSource.keys()
              ) and 'Camera.ROIWidth' in self.mdh.getEntryNames():
            self.imageBounds = ImageBounds.extractFromMetadata(self.mdh)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(
                self.selectedDataSource)
Esempio n. 3
0
    def OpenFile(self, filename= '', ds = None, **kwargs):
        """Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:
            
            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which 
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data
            
            PixelSize:  Pixel size if not in nm
            
        """
        
        #close any files we had open previously
        while len(self.filesToClose) > 0:
            self.filesToClose.pop().close()
        
        #clear our state
        self.dataSources.clear()
        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.events = None
        self.mdh = MetaDataHandler.NestedClassMDHandler()
        
        self.filename = filename
        
        if ds is None:
            #load from file
            ds = self._ds_from_file(filename, **kwargs)

            
        #wrap the data source with a mapping so we can fiddle with things
        #e.g. combining z position and focus 
        mapped_ds = tabular.MappingFilter(ds)

        
        if 'PixelSize' in kwargs.keys():
            mapped_ds.addVariable('pixelSize', kwargs['PixelSize'])
            mapped_ds.setMapping('x', 'x*pixelSize')
            mapped_ds.setMapping('y', 'y*pixelSize')

        #extract information from any events
        self.ev_mappings, self.eventCharts = _processEvents(mapped_ds, self.events, self.mdh)



        #Fit module specific filter settings        
        if 'Analysis.FitModule' in self.mdh.getEntryNames():
            fitModule = self.mdh['Analysis.FitModule']
            
            #print 'fitModule = %s' % fitModule
            
            if 'Interp' in fitModule:
                self.filterKeys['A'] = (5, 100000)
            
            if 'LatGaussFitFR' in fitModule:
                mapped_ds.addColumn('nPhotons', getPhotonNums(mapped_ds, self.mdh))

            if 'SplitterFitFNR' in fitModule:
                mapped_ds.addColumn('nPhotonsg', getPhotonNums({'A': mapped_ds['fitResults_Ag'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh))
                mapped_ds.addColumn('nPhotonsr', getPhotonNums({'A': mapped_ds['fitResults_Ar'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh))
                mapped_ds.setMapping('nPhotons', 'nPhotonsg+nPhotonsr')

            if fitModule == 'SplitterShiftEstFR':
                self.filterKeys['fitError_dx'] = (0,10)
                self.filterKeys['fitError_dy'] = (0,10)

        #self._get_dye_ratios_from_metadata()

        self.addDataSource('Localizations', mapped_ds)

        # Retrieve or estimate image bounds
        if False:  # 'imgBounds' in kwargs.keys():
            self.imageBounds = kwargs['imgBounds']
        elif (not (
                'scanx' in mapped_ds.keys() or 'scany' in mapped_ds.keys())) and 'Camera.ROIWidth' in self.mdh.getEntryNames():
            self.imageBounds = ImageBounds.extractFromMetadata(self.mdh)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(mapped_ds)

        from PYME.recipes.localisations import ProcessColour
        from PYME.recipes.tablefilters import FilterTable
        
        self.colour_mapper = ProcessColour(self.recipe, input='Localizations', output='colour_mapped')
        #we keep a copy of this so that the colour panel can find it.
        self.recipe.add_module(self.colour_mapper)
        self.recipe.add_module(FilterTable(self.recipe, inputName='colour_mapped', outputName='filtered_localizations', filters={k:list(v) for k, v in self.filterKeys.items() if k in mapped_ds.keys()}))
        self.recipe.execute()
        self.filterKeys = {}
        self.selectDataSource('filtered_localizations') #NB - this rebuilds the pipeline