def test_IdentifyOverlappingROIs():
    mdh = NestedClassMDHandler()
    mdh['voxelsize.x'] = 0.115
    roi_size = 256
    roi_size_um = roi_size * mdh['voxelsize.x']
    max_distance = np.sqrt(2) * roi_size_um
    points = tabular.RandomSource(100 * roi_size_um, 100 * roi_size_um,
                                  int(1e3))
    points = tabular.MappingFilter(points, **{
        'x_um': 'x',
        'y_um': 'y'
    })  # pretend we defined points in um
    points.mdh = mdh

    recipe = base.ModuleCollection()
    recipe.add_module(
        measurement.IdentifyOverlappingROIs(roi_size_pixels=roi_size,
                                            output='mapped'))
    recipe.add_module(
        tablefilters.FilterTable(inputName='mapped',
                                 filters={'rejected': [-0.5, 0.5]},
                                 outputName='output'))
    recipe.namespace['input'] = points
    filtered = recipe.execute()

    positions = np.stack([filtered['x'], filtered['y']], axis=1)

    kdt = KDTree(positions)
    distances, indices = kdt.query(positions, k=2, p=2)
    assert (distances[:, 1] > max_distance).all()
Exemplo n.º 2
0
    def OnGetIDsfromImage(self, event, img=None):
        from PYME.DSView import dsviewer

        pipeline = self.pipeline

        if img is None:
            selection = selectWithDialog(dsviewer.openViewers.keys())
            if selection is not None:
                img = dsviewer.openViewers[selection].image

        if img is not None:
            with_ids = unique_name('with_ids',pipeline.dataSources.keys())
            valid_ids = unique_name('valid_ids',pipeline.dataSources.keys())
            recipe = pipeline.recipe
            mapp = tablefilters.Mapping(recipe,inputName=pipeline.selectedDataSourceKey,
                                        outputName=with_ids)
            recipe.add_module(mapp)
            recipe.execute()

            withIDs = recipe.namespace[with_ids]
           
            pixX = np.round((pipeline['x'] - img.imgBounds.x0 )/img.pixelSize).astype('i')
            pixY = np.round((pipeline['y'] - img.imgBounds.y0 )/img.pixelSize).astype('i')

            ind = (pixX < img.data.shape[0])*(pixY < img.data.shape[1])*(pixX >= 0)*(pixY >= 0)

            ids = np.zeros_like(pixX)
            #assume there is only one channel
            ids[ind] = img.data[:,:,:,0].squeeze()[pixX[ind], pixY[ind]].astype('i')

            numPerObject, b = np.histogram(ids, np.arange(ids.max() + 1.5) + .5)

            withIDs.addColumn('objectID', ids)
            withIDs.addColumn('NEvents', numPerObject[ids-1])

            recipe.add_module(tablefilters.FilterTable(recipe,inputName=with_ids, outputName=valid_ids,
                                                         filters={'objectID' : [.5, sys.maxsize]}))
            recipe.execute()

            pipeline.selectDataSource(valid_ids)
            pipeline.Rebuild()
Exemplo n.º 3
0
    def OnFindMixedClusters(self, event=None):
        """
        FindMixedClusters first uses DBSCAN clustering on two color channels separately for denoising purposes, then
        after having removed noisy points, DBSCAN is run again on both channels combined, and the fraction of clumps
        containing both colors is determined.
        """
        from PYME.recipes import tablefilters, localisations
        from PYME.recipes.base import ModuleCollection
        import wx

        chans = self.pipeline.colourFilter.getColourChans()
        nchan = len(chans)
        if nchan < 2:
            raise RuntimeError(
                'FindMixedClusters requires at least two color channels')
        else:
            selectedChans = [0, 1]

        #rad_dlg = wx.NumberEntryDialog(None, 'Search Radius For Core Points', 'rad [nm]', 'rad [nm]', 125, 0, 9e9)
        #rad_dlg.ShowModal()
        searchRadius = 125.0  #rad_dlg.GetValue()
        #minPt_dlg = wx.NumberEntryDialog(None, 'Minimum Points To Be Core Point', 'min pts', 'min pts', 3, 0, 9e9)
        #minPt_dlg.ShowModal()
        minClumpSize = 3  #minPt_dlg.GetValue()

        #build a recipe programatically
        rec = ModuleCollection()
        #split input according to colour channels
        rec.add_module(
            localisations.ExtractTableChannel(rec,
                                              inputName='input',
                                              outputName='chan0',
                                              channel=chans[0]))
        rec.add_module(
            localisations.ExtractTableChannel(rec,
                                              inputName='input',
                                              outputName='chan1',
                                              channel=chans[1]))

        #clump each channel
        rec.add_module(
            localisations.DBSCANClustering(rec,
                                           inputName='chan0',
                                           outputName='chan0_clumped',
                                           searchRadius=searchRadius,
                                           minClumpSize=minClumpSize))
        rec.add_module(
            localisations.DBSCANClustering(rec,
                                           inputName='chan1',
                                           outputName='chan1_clumped',
                                           searchRadius=searchRadius,
                                           minClumpSize=minClumpSize))

        #filter unclumped points
        rec.add_module(
            tablefilters.FilterTable(
                rec,
                inputName='chan0_clumped',
                outputName='chan0_cleaned',
                filters={'dbscanClumpID': [.5, sys.maxsize]}))
        rec.add_module(
            tablefilters.FilterTable(
                rec,
                inputName='chan1_clumped',
                outputName='chan1_cleaned',
                filters={'dbscanClumpID': [.5, sys.maxsize]}))

        #rejoin cleaned datasets
        rec.add_module(
            tablefilters.ConcatenateTables(rec,
                                           inputName0='chan0_cleaned',
                                           inputName1='chan1_cleaned',
                                           outputName='joined'))

        #clump on cleaded and rejoined data
        rec.add_module(
            localisations.DBSCANClustering(rec,
                                           inputName='joined',
                                           outputName='output',
                                           searchRadius=searchRadius,
                                           minClumpSize=minClumpSize))

        rec.namespace[
            'input'] = self.pipeline.output  #do it before configuring so that we already have the channe; names populated
        if not rec.configure_traits(view=rec.pipeline_view, kind='modal'):
            return  #handle cancel

        #run recipe
        joined_clumps = rec.execute()

        joined_clump_IDs = np.unique(joined_clumps['dbscanClumpID'])
        joined_clump_IDs = joined_clump_IDs[joined_clump_IDs >
                                            .5]  #reject unclumped points

        chan0_clump_IDs = np.unique(
            joined_clumps['dbscanClumpID'][joined_clumps['concatSource'] < .5])
        chan0_clump_IDs = chan0_clump_IDs[chan0_clump_IDs > .5]

        chan1_clump_IDs = np.unique(
            joined_clumps['dbscanClumpID'][joined_clumps['concatSource'] > .5])
        chan1_clump_IDs = chan1_clump_IDs[chan1_clump_IDs > .5]

        both_chans_IDS = [c for c in chan0_clump_IDs if c in chan1_clump_IDs]

        n_total_clumps = len(joined_clump_IDs)

        print('Total clumps: %i' % n_total_clumps)
        c0Ratio = float(len(chan0_clump_IDs)) / n_total_clumps
        print('fraction clumps with channel %i present: %f' %
              (selectedChans[0], c0Ratio))
        self.colocalizationRatios['Channel%iin%i%i' %
                                  (selectedChans[0], selectedChans[0],
                                   selectedChans[1])] = c0Ratio

        c1Ratio = float(len(chan1_clump_IDs)) / n_total_clumps
        print('fraction clumps with channel %i present: %f' %
              (selectedChans[1], c1Ratio))
        self.colocalizationRatios['Channel%iin%i%i' %
                                  (selectedChans[1], selectedChans[0],
                                   selectedChans[1])] = c1Ratio

        bothChanRatio = float(len(both_chans_IDS)) / n_total_clumps
        print('fraction of clumps with both channel %i and %i present: %f' %
              (selectedChans[0], selectedChans[1], bothChanRatio))
        self.colocalizationRatios['mixedClumps%i%i' %
                                  tuple(selectedChans)] = bothChanRatio

        self._rec = rec