Exemplo n.º 1
0
def drift_correct(pipeline):
    global id_filter
    import numpy as np
    import matplotlib.pyplot as plt
    #pipeline=visgui.pipeline

    filters = {'error_x': [0, 10]}
    if 'sig' in pipeline.keys():
        dialog = wx.TextEntryDialog(None, 'Diameter (nm): ', 'Enter Fiducial Size', str(pipeline.mdh.getOrDefault('Analysis.FiducialSize', 1000)))

        sig = [330., 370.]

        if dialog.ShowModal() == wx.ID_OK:
            size = float(dialog.GetValue())
            sigE = float(np.sqrt((size/(np.sqrt(2)*2.35))**2 + 135.**2))  # Expected std of the bead + expected std of psf
            sig = [0.95*sigE, 1.05*sigE]

        filters['sig'] = sig

    if 'fitError_z0' in pipeline.keys():
        filters['fitError_z0'] = [0,30]

    recipe = pipeline.recipe
    filt_fiducials = FilterTable(recipe, inputName='Fiducials',
                                  outputName='filtered_fiducials', filters=filters)
    
    filt_fiducials.configure_traits(kind='modal')
    #print('Adding fiducial filter module')
    #recipe.add_module(filt_fiducials)
    
    dbs = DBSCANClustering(recipe,inputName='filtered_fiducials', outputName='clumped_fiducials', columns=['x', 'y'],
                                       searchRadius=500, minClumpSize=10, clumpColumnName='fiducialID')
    recipe.add_modules_and_execute([filt_fiducials, dbs])
    
    
    fids = recipe.namespace['clumped_fiducials']
    
    fid_ids = [int(id) for id in set(fids['fiducialID']) if id > 0]
        
    id_filter = FilterTableByIDs(recipe, inputName='clumped_fiducials', outputName='selected_fiducials',
                      idColumnName='fiducialID', ids = fid_ids)
    
    #recipe.add_module(id_filter)
    
    fc = FiducialCorrection(recipe, inputLocalizations=pipeline.selectedDataSourceKey,
                                         inputFiducials='selected_fiducials',
                                         outputName='corrected_localizations', outputFiducials='corrected_fiducials')
    
    recipe.add_modules_and_execute([id_filter, fc])
    pipeline.selectDataSource('corrected_localizations')
    def OnFold(self, event=None):
        """
        See multiview.foldX. At this point the origin of x should be the corner of the concatenated frame. Note that
        a probe key will be mapped into the data source to designate colour channel, but the colour channel will not be
        selectable in the GUI until after mergeClumps is called (due to the possibility of using this module with
        ratio-metric data).

        Parameters
        ----------

            None, but requires metadata.

        Notes
        -----

        """

        from PYME.recipes.multiview import Fold
        from PYME.recipes.tablefilters import FilterTable

        recipe = self.pipeline.recipe
        #TODO - move me to building the pipeline
        recipe.add_module(
            FilterTable(recipe,
                        inputName=self.pipeline.selectedDataSourceKey,
                        outputName='filtered_input',
                        filters={
                            'error_x': [0, 30.],
                            'error_y': [0, 30.]
                        }))
        recipe.add_module(
            Fold(recipe, input_name='filtered_input', output_name='folded'))
        recipe.execute()
        self.pipeline.selectDataSource('folded')
Exemplo n.º 3
0
    def OnSelROIFT(self, event):
        try:
            #old glcanvas
            x0, y0 = self.visFr.glCanvas.selectionStart
            x1, y1 = self.visFr.glCanvas.selectionFinish
        except AttributeError:
            #new glcanvas
            x0, y0 = self.visFr.glCanvas.selectionSettings.start
            x1, y1 = self.visFr.glCanvas.selectionSettings.finish

        filters = {}
        filters['x'] = [float(min(x0, x1)), float(max(x0, x1))] # must ensure all values are eventually scalars to avoid issue with recipe yaml output
        filters['y'] = [float(min(y0, y1)), float(max(y0, y1))] # ditto

        recipe = self.visFr.pipeline.recipe
        ftable = FilterTable(recipe, inputName=self.visFr.pipeline.selectedDataSourceKey,
                                  outputName='selectedROI', filters=filters)
        if not ftable.configure_traits(kind='modal'):
            return

        recipe.add_module(ftable)
        recipe.execute()
    def OnTrackMolecules(self, event=None):
        import PYME.Analysis.points.DeClump.deClumpGUI as deClumpGUI
        #import PYME.Analysis.points.DeClump.deClump as deClump
        import PYME.Analysis.Tracking.trackUtils as trackUtils
        from PYME.LMVis.layers.tracks import TrackRenderLayer

        from PYME.recipes import tracking
        from PYME.recipes.tablefilters import FilterTable

        recipe = self.visFr.pipeline.recipe
    
        visFr = self.visFr
        pipeline = visFr.pipeline # type: PYME.LMVis.pipeline.Pipeline

        if hasattr(self, '_mol_tracking_module') and (self._mol_tracking_module in recipe.modules):
            # We have already tracked, edit existing tracking module instead
            wx.MessageBox('This dataset has already been tracked, edit parameters of existing tracking rather than starting again', 'Error', wx.OK|wx.ICON_ERROR, visFr)
            self._mol_tracking_module.configure_traits(kind='modal')
            return
            

        output_name = 'with_tracks'
        if output_name in recipe.namespace:
            # this should take care of, e.g. having tracked with feature based tracking or something in the recipe
            output_name = pipeline.new_ds_name('with_tracks')
            wx.MessageBox("Another module has already created a 'with_tracks' output, using the nonstandard name '%s' instead" % output_name, 'Warning', wx.OK|wx.ICON_WARNING, visFr)
        
        tracking_module = tracking.FindClumps(recipe, inputName=pipeline.selectedDataSourceKey,
                                              outputName=output_name,
                                    timeWindow=5,
                                    clumpRadiusVariable='1.0',
                                    clumpRadiusScale=250.,
                                    minClumpSize=50)
    
        if tracking_module.configure_traits(kind='modal'):
            self._mol_tracking_module = tracking_module
            recipe.add_modules_and_execute([tracking_module,
                                            # Add dynamic filtering on track length, etc.
                                            FilterTable(recipe,
                                                inputName=tracking_module.outputName,
                                                outputName='filtered_{}'.format(tracking_module.outputName),
                                                filters={'clumpSize':[tracking_module.minClumpSize, 1e6]})])
            
            self.visFr.pipeline.selectDataSource('filtered_{}'.format(tracking_module.outputName))
            #self.visFr.CreateFoldPanel() #TODO: can we capture this some other way?
            layer = TrackRenderLayer(pipeline, dsname='filtered_{}'.format(tracking_module.outputName), method='tracks')
            visFr.add_layer(layer)
Exemplo n.º 5
0
    def OpenFile(self, filename='', ds=None, clobber_recipe=True, **kwargs):
        """Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:
            
            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which 
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data
            
            PixelSize:  Pixel size if not in nm
            
        """

        #close any files we had open previously
        while len(self.filesToClose) > 0:
            self.filesToClose.pop().close()

        # clear our state
        # nb - equivalent to clearing recipe namespace
        self.dataSources.clear()

        if clobber_recipe:
            # clear any processing modules from the pipeline
            # call with clobber_recipe = False in a 'Open a new file with the processing pipeline I've set up' use case
            # TODO: Add an "File-->Open [preserving recipe]" menu option or similar
            self.recipe.modules = []

        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.events = None
        self.mdh = MetaDataHandler.NestedClassMDHandler()

        self.filename = filename

        if ds is None:
            from PYME.IO import unifiedIO  # TODO - what is the launch time penalty here for importing clusterUI and finding a nameserver?

            # load from file(/cluster, downloading a copy of the file if needed)
            with unifiedIO.local_or_temp_filename(filename) as fn:
                # TODO - check that loading isn't lazy (i.e. we need to make a copy of data in memory whilst in the
                # context manager in order to be safe with unifiedIO and cluster data). From a quick look, it would seem
                # that _ds_from_file() copies the data, but potentially keeps the file open which could be problematic.
                # This won't effect local file loading even if loading is lazy (i.e. shouldn't cause a regression)
                ds = self._ds_from_file(fn, **kwargs)
                self.events = getattr(ds, 'events', None)
                self.mdh.copyEntriesFrom(ds.mdh)

        # skip the MappingFilter wrapping, etc. in self.addDataSource and add this datasource as-is
        self.dataSources['FitResults'] = ds

        # Fit module specific filter settings
        # TODO - put all the defaults here and use a local variable rather than in __init__ (self.filterKeys is largely an artifact of pre-recipe based pipeline)
        if 'Analysis.FitModule' in self.mdh.getEntryNames():
            fitModule = self.mdh['Analysis.FitModule']
            if 'Interp' in fitModule:
                self.filterKeys['A'] = (5, 100000)
            if fitModule == 'SplitterShiftEstFR':
                self.filterKeys['fitError_dx'] = (0, 10)
                self.filterKeys['fitError_dy'] = (0, 10)

        if clobber_recipe:
            from PYME.recipes.localisations import ProcessColour, Pipelineify
            from PYME.recipes.tablefilters import FilterTable

            add_pipeline_variables = Pipelineify(
                self.recipe,
                inputFitResults='FitResults',
                pixelSizeNM=kwargs.get('PixelSize', 1.),
                outputLocalizations='Localizations')
            self.recipe.add_module(add_pipeline_variables)

            #self._get_dye_ratios_from_metadata()

            colour_mapper = ProcessColour(self.recipe,
                                          input='Localizations',
                                          output='colour_mapped')
            self.recipe.add_module(colour_mapper)
            self.recipe.add_module(
                FilterTable(self.recipe,
                            inputName='colour_mapped',
                            outputName='filtered_localizations',
                            filters={
                                k: list(v)
                                for k, v in self.filterKeys.items()
                                if k in ds.keys()
                            }))
        else:
            logger.warn(
                'Opening file without clobbering recipe, filter and ratiometric colour settings might not be handled properly'
            )
            # FIXME - should we update filter keys and/or make the filter more robust
            # FIXME - do we need to do anything about colour settings?

        self.recipe.execute()
        self.filterKeys = {}
        if 'filtered_localizations' in self.dataSources.keys():
            self.selectDataSource(
                'filtered_localizations')  #NB - this rebuilds the pipeline
        else:
            # TODO - replace / remove this fallback with something better. This is currently required
            # when we use/abuse the pipeline in dh5view, but that should ideally be replaced with
            # something cleaner. This (and case above) should probably also be conditional on `clobber_recipe`
            # as if opening with an existing recipe we would likely want to keep selectedDataSource constant as well.
            self.selectDataSource('FitResults')

        # FIXME - we do this already in pipelinify, maybe we can avoid doubling up?
        self.ev_mappings, self.eventCharts = _processEvents(
            ds, self.events, self.mdh)  # extract information from any events
        # Retrieve or estimate image bounds
        if False:  # 'imgBounds' in kwargs.keys():
            # TODO - why is this disabled? Current usage would appear to be when opening from LMAnalysis
            # during real-time localization, to force image bounds to match raw data, but also potentially useful
            # for other scenarios where metadata is not fully present.
            self.imageBounds = kwargs['imgBounds']
        elif ('scanx' not in self.selectedDataSource.keys()
              or 'scany' not in self.selectedDataSource.keys()
              ) and 'Camera.ROIWidth' in self.mdh.getEntryNames():
            self.imageBounds = ImageBounds.extractFromMetadata(self.mdh)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(
                self.selectedDataSource)
    def dye_kinetics(self, event):
        import sys
        from time import time

        t0 = time()

        self.pipeline = self.visFr.pipeline

        recipe = self.visFr.pipeline.recipe

        script_init = kin_rcps.DyeKineticsInit(
            recipe,
            inputName=self.pipeline.selectedDataSourceKey,
            outputName='localisations',
            clusteringRadius=40.0,
            blinkRadius=20.0,
            clusterColumnName='clusterID',
            fitHistograms=True,
            blinkGapTolerance=0,
            onTimesColumnName='on_times',
            offTimesColumnName='off_times',
            minimumClusterSize=2,
            minimumKeptBlinkSize=2,
            minimumOffTimeInSecondsFit=0,
            maximumOffTimeInSecondsToFit=10000.0,
            offTimeFitBinWidthInSeconds=1)

        if script_init.configure_traits(kind='modal'):

            recipe.trait_set(execute_on_invalidation=False)

            recipe.add_module(script_init)

            recipe.add_module(
                kin_rcps.FindClumps_DK(
                    recipe,
                    inputName='localisations',
                    outputName='with_clumps',
                    timeWindow=script_init.blinkGapTolerance,
                    clumpRadiusVariable=u'1.0',
                    clumpRadiusScale=script_init.blinkRadius,
                    minClumpSize=script_init.minimumKeptBlinkSize))

            minblink = script_init.minimumKeptBlinkSize

            minblink -= 1

            recipe.add_module(
                FilterTable(recipe,
                            inputName='with_clumps',
                            outputName='good_clumps',
                            filters={'clumpSize': [minblink, sys.maxsize]}))

            recipe.add_module(
                kin_rcps.MergeClumpsDyeKinetics(recipe,
                                                inputName='good_clumps',
                                                outputName='coalesced',
                                                lebelKey='clumpIndex'))

            recipe.add_module(
                FilterTable(recipe,
                            inputName='coalesced',
                            outputName='filtered_coalesced',
                            filters={'clumpSize': [minblink, sys.maxsize]}))

            try:
                import hdbscan
            except:
                print('HDBScan not installed, defaulting to regular DBScan')
                clusterer = dbscan(
                    recipe,
                    inputName='filtered_coalesced',
                    searchRadius=script_init.clusteringRadius,
                    minClumpSize=script_init.minimumClusterSize,
                    clumpColumnName=script_init.clusterColumnName,
                    outputName='clusters')

            else:
                print('HDBScan installed, clustering with that')
                clusterer = kin_rcps.HDBSCANClustering(
                    recipe,
                    input_name='filtered_coalesced',
                    search_radius=script_init.clusteringRadius,
                    min_clump_size=script_init.minimumClusterSize,
                    clump_column_name=script_init.clusterColumnName,
                    output_name='clusters')

            recipe.add_module(clusterer)

            recipe.add_module(
                FilterTable(recipe,
                            inputName='clusters',
                            outputName='filt_clusters',
                            filters={
                                script_init.clusterColumnName:
                                [0.0, sys.maxsize]
                            }))

            recipe.add_module(
                kin_rcps.FindBlinkStateDurations(
                    recipe,
                    inputName='filt_clusters',
                    outputName='unfiltered_kinetics',
                    labelKey=script_init.clusterColumnName,
                    onTimesColName=script_init.onTimesColumnName,
                    offTimesColName=script_init.offTimesColumnName))

            recipe.add_module(
                FilterTable(recipe,
                            inputName='unfiltered_kinetics',
                            outputName='filtered_kinetics',
                            filters={
                                script_init.offTimesColumnName:
                                [0.0, sys.maxsize],
                                script_init.onTimesColumnName:
                                [minblink, sys.maxsize]
                            }))

            recipe.add_module(
                kin_rcps.FitSwitchingRates(
                    recipe,
                    inputName='filtered_kinetics',
                    outputName='kinetics_out',
                    onTimesColName=script_init.onTimesColumnName,
                    offTimesColName=script_init.offTimesColumnName,
                    minOffTimeSecondsToFit=script_init.
                    minimumOffTimeInSecondsToFit,
                    maxOffTimeSecondsToFit=script_init.
                    maximumOffTimeInSecondsToFit,
                    offTimesFitBinSizeInSeconds=script_init.
                    offTimeFitBinWidthInSeconds))
            recipe.execute()
            self.visFr.pipeline.selectDataSource('filtered_kinetics')

        print 'RUNTIME OF ' + str(time() - t0)
Exemplo n.º 7
0
    def OpenFile(self, filename= '', ds = None, **kwargs):
        """Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:
            
            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which 
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data
            
            PixelSize:  Pixel size if not in nm
            
        """
        
        #close any files we had open previously
        while len(self.filesToClose) > 0:
            self.filesToClose.pop().close()
        
        #clear our state
        self.dataSources.clear()
        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.events = None
        self.mdh = MetaDataHandler.NestedClassMDHandler()
        
        self.filename = filename
        
        if ds is None:
            #load from file
            ds = self._ds_from_file(filename, **kwargs)

            
        #wrap the data source with a mapping so we can fiddle with things
        #e.g. combining z position and focus 
        mapped_ds = tabular.MappingFilter(ds)

        
        if 'PixelSize' in kwargs.keys():
            mapped_ds.addVariable('pixelSize', kwargs['PixelSize'])
            mapped_ds.setMapping('x', 'x*pixelSize')
            mapped_ds.setMapping('y', 'y*pixelSize')

        #extract information from any events
        self.ev_mappings, self.eventCharts = _processEvents(mapped_ds, self.events, self.mdh)



        #Fit module specific filter settings        
        if 'Analysis.FitModule' in self.mdh.getEntryNames():
            fitModule = self.mdh['Analysis.FitModule']
            
            #print 'fitModule = %s' % fitModule
            
            if 'Interp' in fitModule:
                self.filterKeys['A'] = (5, 100000)
            
            if 'LatGaussFitFR' in fitModule:
                mapped_ds.addColumn('nPhotons', getPhotonNums(mapped_ds, self.mdh))

            if 'SplitterFitFNR' in fitModule:
                mapped_ds.addColumn('nPhotonsg', getPhotonNums({'A': mapped_ds['fitResults_Ag'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh))
                mapped_ds.addColumn('nPhotonsr', getPhotonNums({'A': mapped_ds['fitResults_Ar'], 'sig': mapped_ds['fitResults_sigma']}, self.mdh))
                mapped_ds.setMapping('nPhotons', 'nPhotonsg+nPhotonsr')

            if fitModule == 'SplitterShiftEstFR':
                self.filterKeys['fitError_dx'] = (0,10)
                self.filterKeys['fitError_dy'] = (0,10)

        #self._get_dye_ratios_from_metadata()

        self.addDataSource('Localizations', mapped_ds)

        # Retrieve or estimate image bounds
        if False:  # 'imgBounds' in kwargs.keys():
            self.imageBounds = kwargs['imgBounds']
        elif (not (
                'scanx' in mapped_ds.keys() or 'scany' in mapped_ds.keys())) and 'Camera.ROIWidth' in self.mdh.getEntryNames():
            self.imageBounds = ImageBounds.extractFromMetadata(self.mdh)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(mapped_ds)

        from PYME.recipes.localisations import ProcessColour
        from PYME.recipes.tablefilters import FilterTable
        
        self.colour_mapper = ProcessColour(self.recipe, input='Localizations', output='colour_mapped')
        #we keep a copy of this so that the colour panel can find it.
        self.recipe.add_module(self.colour_mapper)
        self.recipe.add_module(FilterTable(self.recipe, inputName='colour_mapped', outputName='filtered_localizations', filters={k:list(v) for k, v in self.filterKeys.items() if k in mapped_ds.keys()}))
        self.recipe.execute()
        self.filterKeys = {}
        self.selectDataSource('filtered_localizations') #NB - this rebuilds the pipeline