Пример #1
0
    def execute(self, namespace):
        from PYME.IO import tabular
        from PYME.Analysis.points import fiducials

        locs = namespace[self.inputLocalizations]
        fids = namespace[self.inputFiducials]
        
        t_fid, fid_trajectory, clump_index = fiducials.extractAverageTrajectory(fids, clumpRadiusVar=self.clumpRadiusVar,
                                                        clumpRadiusMultiplier=float(self.clumpRadiusMultiplier),
                                                        timeWindow=int(self.timeWindow),
                                                        filter=self.temporalFilter, filterScale=float(self.temporalFilterScale))
        
        out = tabular.MappingFilter(locs)
        t_out = out['t']

        out_f = tabular.MappingFilter(fids)
        out_f.addColumn('clumpIndex', clump_index)
        t_out_f = out_f['t']

        for dim in fid_trajectory.keys():
            print(dim)
            out.addColumn('fiducial_{0}'.format(dim), np.interp(t_out, t_fid, fid_trajectory[dim]))
            out.setMapping(dim, '{0} - fiducial_{0}'.format(dim))

            out_f.addColumn('fiducial_{0}'.format(dim), np.interp(t_out_f, t_fid, fid_trajectory[dim]))
            out_f.setMapping(dim, '{0} - fiducial_{0}'.format(dim))

        # propagate metadata, if present
        try:
            out.mdh = locs.mdh
        except AttributeError:
            pass

        namespace[self.outputName] = out
        namespace[self.outputFiducials] = out_f
Пример #2
0
    def execute(self, namespace):
        from PYME.IO import tabular

        if self.lowerMinPtsPerCluster > self.higherMinPtsPerCluster:
            print(
                'Swapping low and high MinPtsPerCluster - input was reversed')
            temp = self.lowerMinPtsPerCluster
            self.lowerMinPtsPerCluster = self.higherMinPtsPerCluster
            self.higherMinPtsPerCluster = temp

        iters = (int(np.max(namespace[self.inputName]['t'])) /
                 int(self.stepSize)) + 2

        # other counts
        lowDensMinPtsClumps = np.empty(iters)
        lowDensMinPtsClumps[0] = 0
        hiDensMinPtsClumps = np.empty(iters)
        hiDensMinPtsClumps[0] = 0
        t = np.empty(iters)
        t[0] = 0

        inp = tabular.MappingFilter(namespace[self.inputName])

        for ind in range(
                1, iters):  # start from 1 since t=[0,0] will yield no clumps
            # filter time
            inc = tabular.ResultsFilter(inp, t=[0, self.stepSize * ind])
            t[ind] = np.max(inc['t'])

            cid, counts = np.unique(inc[self.labelsKey], return_counts=True)
            # cmask = np.in1d(inc['DBSCAN_allFrames'], cid)

            cidL = cid[counts >= self.lowerMinPtsPerCluster]
            lowDensMinPtsClumps[ind] = np.sum(
                cidL != -1)  # ignore unclumped in count
            cid = cid[counts >= self.higherMinPtsPerCluster]
            hiDensMinPtsClumps[ind] = np.sum(
                cid != -1)  # ignore unclumped in count

        res = tabular.MappingFilter({
            't':
            t,
            'N_labelsWithLowMinPoints':
            lowDensMinPtsClumps,
            'N_labelsWithHighMinPoints':
            hiDensMinPtsClumps
        })

        # propagate metadata, if present
        try:
            res.mdh = namespace[self.inputName].mdh
        except AttributeError:
            pass

        namespace[self.outputName] = res
Пример #3
0
def saveDataFrame(output, filename):
    """Saves a pandas dataframe, inferring the destination type based on extension"""
    warnings.warn('saveDataFrame is deprecated, use output modules instead', DeprecationWarning)
    if filename.endswith('.csv'):
        output.to_csv(filename)
    elif filename.endswith('.xlsx') or filename.endswith('.xls'):
        output.to_excell(filename)
    elif filename.endswith('.hdf'):
        tabular.MappingFilter(output).to_hdf(filename)
    else:
        tabular.MappingFilter(output).to_hdf(filename + '.hdf', 'Data')
Пример #4
0
def find_clumps(datasource,
                gap_tolerance,
                radius_scale,
                radius_offset,
                inject=False):
    from PYME.Analysis.points.DeClump import findClumps
    from PYME.IO import tabular
    t = datasource['t']  #OK as int
    clumps = np.zeros(len(t), 'i')
    I = np.argsort(t)
    t = t[I].astype('i')
    x = datasource['x'][I].astype('f4')
    y = datasource['y'][I].astype('f4')

    deltaX = (radius_scale * datasource['error_x'][I] +
              radius_offset).astype('f4')

    assigned = findClumps(t, x, y, deltaX, gap_tolerance)
    clumps[I] = assigned

    if not inject:
        datasource = tabular.MappingFilter(datasource)

    datasource.addColumn('clumpIndex', clumps)

    return datasource
Пример #5
0
    def execute(self, namespace):
        input = namespace[self.input]
        mdh = input.mdh

        output = tabular.MappingFilter(input)
        output.mdh = mdh

        output.addColumn(
            'block_id', np.mod((output['t'] / self.block_size).astype('int'),
                               2))

        channel_names = [k for k in input.keys() if k.startswith('p_')]

        print(channel_names)
        print(input.keys())

        if len(channel_names) == 0:
            #single channel data - no channels defined.
            output.setMapping('ColourNorm', '1.0 + 0*t')
            output.setMapping('p_block0', '1.0*block_id')
            output.setMapping('p_block1', '1.0 - block_id')
        else:
            #have colour channels - subdivide them
            for k in channel_names:
                output.setMapping('%s_block0' % k, '%s*block_id' % k)
                output.setMapping('%s_block1' % k, '%s*(1.0 - block_id)' % k)

            #hide original channel names
            output.hidden_columns.extend(channel_names)

        namespace[self.output] = output
Пример #6
0
    def execute(self, namespace):
        from PYME.Analysis.points import spherical_harmonics
        from PYME.IO import MetaDataHandler

        inp = namespace[self.input_localizations]
        points = tabular.MappingFilter(inp)
        shell = namespace[self.input_shell]
        if isinstance(shell, tabular.TabularBase):
            shell = spherical_harmonics.ScaledShell.from_tabular(shell)

        # map points to scaled spherical coordinates
        azimuth, zenith, r = shell.shell_coordinates(
            (points['x'], points['y'], points['z']))
        # lookup shell radius at those angles
        r_shell = spherical_harmonics.reconstruct_shell(
            shell.modes, shell.coefficients, azimuth, zenith)

        points.addColumn(self.name_scaled_azimuth, azimuth)
        points.addColumn(self.name_scaled_zenith, zenith)
        points.addColumn(self.name_scaled_radius, r)
        points.addColumn(self.name_normalized_radius, r / r_shell)

        try:
            points.mdh = MetaDataHandler.DictMDHandler(inp.mdh)
        except AttributeError:
            pass
        namespace[self.output_mapped] = points
Пример #7
0
    def OpenChannel(self, filename='', ds=None, channel_name='', **kwargs):
        """Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:

            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data

            PixelSize:  Pixel size if not in nm

        """
        if channel_name == '' or channel_name is None:
            #select a channel name automatically
            channel_name = 'Channel%d' % self._extra_chan_num
            self._extra_chan_num += 1

        if ds is None:
            #load from file
            ds = self._ds_from_file(filename, **kwargs)

        #wrap the data source with a mapping so we can fiddle with things
        #e.g. combining z position and focus
        mapped_ds = tabular.MappingFilter(ds)

        if 'PixelSize' in kwargs.keys():
            mapped_ds.addVariable('pixelSize', kwargs['PixelSize'])
            mapped_ds.setMapping('x', 'x*pixelSize')
            mapped_ds.setMapping('y', 'y*pixelSize')

        self.addDataSource(channel_name, mapped_ds)
Пример #8
0
    def execute(self, namespace):
        inp = namespace[self.input]

        if not isinstance(inp, Tesselation):
            raise RuntimeError(
                'expected a Tesselation object (as output by the DelaunayTesselation module)'
            )

        if inp._3d:
            x, y, z = inp.circumcentres().T

            if self.append_original_locs:
                x = np.hstack([x, inp['x']])
                y = np.hstack([y, inp['y']])
                z = np.hstack([z, inp['z']])

            pts = {'x': x, 'y': y, 'z': z}
        else:
            x, y = inp.circumcentres().T

            if self.append_original_locs:
                x = np.hstack([x, inp['x']])
                y = np.hstack([y, inp['y']])

            pts = {'x': x, 'y': y, 'z': 0 * x}

        out = tabular.MappingFilter(pts)
        try:
            out.mdh = inp.mdh
        except AttributeError:
            pass

        namespace[self.output] = out
Пример #9
0
    def execute(self, namespace):
        from PYME.LMVis import pipeline
        fitResults = namespace[self.inputFitResults]
        mdh = fitResults.mdh

        mapped_ds = tabular.MappingFilter(fitResults)

        if not self.pixelSizeNM == 1:  # TODO - check close instead?
            mapped_ds.addVariable('pixelSize', self.pixelSizeNM)
            mapped_ds.setMapping('x', 'x*pixelSize')
            mapped_ds.setMapping('y', 'y*pixelSize')

        #extract information from any events
        events = namespace.get(self.inputEvents, None)
        if isinstance(events, tabular.TabularBase):
            events = events.to_recarray()

        ev_maps, ev_charts = pipeline._processEvents(mapped_ds, events, mdh)
        pipeline._add_missing_ds_keys(mapped_ds, ev_maps)

        #Fit module specific filter settings
        if 'Analysis.FitModule' in mdh.getEntryNames():
            fitModule = mdh['Analysis.FitModule']

            if 'LatGaussFitFR' in fitModule:
                mapped_ds.addColumn('nPhotons',
                                    pipeline.getPhotonNums(mapped_ds, mdh))

        mapped_ds.mdh = mdh

        namespace[self.outputLocalizations] = mapped_ds
Пример #10
0
    def execute(self, namespace):
        from PYME.Analysis.points.astigmatism import astigTools
        from PYME.IO import unifiedIO
        import json

        inp = namespace[self.input_name]

        if 'mdh' not in dir(inp):
            raise RuntimeError('MapAstigZ needs metadata')

        if self.astigmatism_calibration_location == '':  # grab calibration from the metadata
            calibration_location = inp.mdh['Analysis.AstigmatismMapID']
        else:
            calibration_location = self.astigmatism_calibration_location

        s = unifiedIO.read(calibration_location)

        astig_calibrations = json.loads(s)

        mapped = tabular.MappingFilter(inp)

        z, zerr = astigTools.lookup_astig_z(mapped,
                                            astig_calibrations,
                                            self.rough_knot_spacing,
                                            plot=False)

        mapped.addColumn('astigmatic_z', z)
        mapped.addColumn('astigmatic_z_lookup_error', zerr)
        mapped.setMapping('z', 'astigmatic_z + z')

        mapped.mdh = inp.mdh
        mapped.mdh[
            'Analysis.astigmatism_calibration_used'] = calibration_location

        namespace[self.output_name] = mapped
Пример #11
0
    def execute(self, namespace):
        meas = namespace[self.input_measurements]
        img = namespace[self.input_supertile]

        out = tabular.MappingFilter(meas)

        x_frame_um, y_frame_um = img.data.tile_coords_um[meas['t']].T
        x_frame_px, y_frame_px = img.data.tile_coords[meas['t']].T

        if self.measurement_units == 'um':
            x_to_micron, y_to_micron = 1, 1
            x_to_pixels, y_to_pixels = 1 / meas.mdh[
                'voxelsize.x'], 1 / meas.mdh['voxelsize.y']
        elif self.measurement_units == 'nm':
            x_to_micron, y_to_micron = 1e-3, 1e-3
            x_to_pixels, y_to_pixels = 1 / (
                1e3 * meas.mdh['voxelsize.x']), 1 / (1e3 *
                                                     meas.mdh['voxelsize.y'])
        elif self.measurement_units == 'px':
            x_to_micron, y_to_micron = meas.mdh['voxelsize.x'], meas.mdh[
                'voxelsize.y']
            x_to_pixels, y_to_pixels = 1, 1
        else:
            raise RuntimeError("Supported units include 'um', 'nm', and 'px'")

        out.addColumn('x_um', x_frame_um + meas['x'] * x_to_micron)
        out.addColumn('y_um', y_frame_um + meas['y'] * y_to_micron)
        out.addColumn('x_px', x_frame_px + meas['x'] * x_to_pixels)
        out.addColumn('y_px', y_frame_px + meas['y'] * y_to_pixels)

        out.mdh = meas.mdh

        namespace[self.output_name] = out
Пример #12
0
def test_TravelingSalesperson():
    r = 10
    theta = np.linspace(0, 2 * np.pi, 5)
    dt = theta[1] - theta[0]
    x, y = r * np.cos(theta), r * np.sin(theta)
    x = np.concatenate([x, r * np.cos(theta + 0.5 * dt)])
    y = np.concatenate([y, r * np.sin(theta + 0.5 * dt)])

    points = tabular.MappingFilter({
        'x_um':
        np.concatenate([x, 1.1 * r * np.cos(theta)]),
        'y_um':
        np.concatenate([y, 1.1 * r * np.sin(theta)])
    })

    recipe = base.ModuleCollection()
    recipe.add_module(
        measurement.TravelingSalesperson(output='output', epsilon=0.001))
    recipe.namespace['input'] = points

    ordered = recipe.execute()

    # should be not too much more than the rough circumference.
    assert ordered.mdh['TravelingSalesperson.Distance'] < 1.25 * (2 * np.pi *
                                                                  r)
Пример #13
0
    def execute(self, namespace):
        from PYME.IO.FileUtils import readSpeckle
        from PYME.IO import MetaDataHandler
        import os

        fileInfo = {'SEP': os.sep}

        seriesLength = 100000

        mdh = MetaDataHandler.NestedClassMDHandler()
        mdh['voxelsize.x'] = .001  # default pixel size - FIXME
        mdh['voxelsize.y'] = .001

        #use a default sensor size of 512
        #this gets over-ridden below if we supply an image
        clip_region = [
            self.edgeRejectionPixels, self.edgeRejectionPixels,
            512 - self.edgeRejectionPixels, 512 - self.edgeRejectionPixels
        ]

        if not self.inputImage == '':
            inp = namespace[self.inputImage]
            mdh.update(inp.mdh)
            seriesLength = inp.data.shape[2]

            clip_region = [
                self.edgeRejectionPixels, self.edgeRejectionPixels,
                inp.data.shape[0] - self.edgeRejectionPixels,
                inp.data.shape[1] - self.edgeRejectionPixels
            ]

            try:
                fileInfo['DIRNAME'], fileInfo['IMAGENAME'] = os.path.split(
                    inp.filename)
                fileInfo['IMAGESTUB'] = fileInfo['IMAGENAME'].split('MM')[0]
            except:
                pass

        speckleFN = self.speckleFilename.format(**fileInfo)

        specks = readSpeckle.readSpeckles(speckleFN)
        traces = readSpeckle.gen_traces_from_speckles(
            specks,
            leadFrames=self.leadFrames,
            followFrames=self.followFrames,
            seriesLength=seriesLength,
            clipRegion=clip_region)

        #turn this into an inputFilter object
        inp = tabular.RecArraySource(traces)

        #create a mapping to covert the co-ordinates in pixels to co-ordinates in nm
        vs = mdh.voxelsize_nm
        map = tabular.MappingFilter(inp,
                                    x='x_pixels*%3.2f' % vs.x,
                                    y='y_pixels*%3.2f' % vs.y)

        map.mdh = mdh

        namespace[self.outputName] = map
Пример #14
0
    def execute(self, namespace):
        from PYME.IO import tabular
        locs = namespace[self.inputName]

        t_shift, shifts = self.calcCorrDrift(locs['x'], locs['y'], locs['t'])
        shx = shifts[:, 0]
        shy = shifts[:, 1]

        out = tabular.MappingFilter(locs)
        t_out = out['t']
        dx = np.interp(t_out, t_shift, shx)
        dy = np.interp(t_out, t_shift, shy)

        out.addColumn('dx', dx)
        out.addColumn('dy', dy)
        out.setMapping('x', 'x + dx')
        out.setMapping('y', 'y + dy')

        # propagate metadata, if present
        try:
            out.mdh = locs.mdh
        except AttributeError:
            pass

        namespace[self.outputName] = out
def test_IdentifyOverlappingROIs():
    mdh = NestedClassMDHandler()
    mdh['voxelsize.x'] = 0.115
    roi_size = 256
    roi_size_um = roi_size * mdh['voxelsize.x']
    max_distance = np.sqrt(2) * roi_size_um
    points = tabular.RandomSource(100 * roi_size_um, 100 * roi_size_um,
                                  int(1e3))
    points = tabular.MappingFilter(points, **{
        'x_um': 'x',
        'y_um': 'y'
    })  # pretend we defined points in um
    points.mdh = mdh

    recipe = base.ModuleCollection()
    recipe.add_module(
        measurement.IdentifyOverlappingROIs(roi_size_pixels=roi_size,
                                            output='mapped'))
    recipe.add_module(
        tablefilters.FilterTable(inputName='mapped',
                                 filters={'rejected': [-0.5, 0.5]},
                                 outputName='output'))
    recipe.namespace['input'] = points
    filtered = recipe.execute()

    positions = np.stack([filtered['x'], filtered['y']], axis=1)

    kdt = KDTree(positions)
    distances, indices = kdt.query(positions, k=2, p=2)
    assert (distances[:, 1] > max_distance).all()
Пример #16
0
    def execute(self, namespace):
        from sklearn.cluster import dbscan

        inp = namespace[self.inputName]
        mapped = tabular.MappingFilter(inp)

        # Note that sklearn gives unclustered points label of -1, and first value starts at 0.
        if self.multithreaded:
            core_samp, dbLabels = dbscan(np.vstack(
                [inp[k] for k in self.columns]).T,
                                         self.searchRadius,
                                         self.minClumpSize,
                                         n_jobs=self.numberOfJobs)
        else:
            #NB try-catch from Christians multithreaded example removed as I think we should see failure here
            core_samp, dbLabels = dbscan(
                np.vstack([inp[k] for k in self.columns]).T, self.searchRadius,
                self.minClumpSize)

        # shift dbscan labels up by one to match existing convention that a clumpID of 0 corresponds to unclumped
        mapped.addColumn(str(self.clumpColumnName), dbLabels + 1)

        # propogate metadata, if present
        try:
            mapped.mdh = inp.mdh
        except AttributeError:
            pass

        namespace[self.outputName] = mapped
Пример #17
0
    def _process_features(self, data, features):
        from PYME.IO import tabular
        out = tabular.MappingFilter(data)
        out.mdh = getattr(data, 'mdh', None)

        if self.normalise:
            features = features - features.mean(0)[None, :]
            features = features / features.std(0)[None, :]

        if self.PCA:
            from sklearn.decomposition import PCA

            pca = PCA(n_components=(
                self.PCA_components if self.PCA_components > 0 else None
            )).fit(features)
            features = pca.transform(features)

            out.pca = pca  #save the pca object just in case we want to look at what the principle components are (this is hacky)

        out.addColumn(self.outputColumnName, features)

        if self.columnForEachFeature:
            for i in range(features.shape[1]):
                out.addColumn('feat_%d' % i, features[:, i])

        return out
Пример #18
0
    def addDataSource(self, dskey, ds, add_missing_vars=True):
        """
        Add a new data source

        Parameters
        ----------
        dskey : str
            The name of the new data source
        ds : an tabular.inputFilter derived class
            The new data source

        """
        #check that we have a suitable object - note that this could potentially be relaxed
        assert isinstance(ds, tabular.TabularBase)

        if not isinstance(ds, tabular.MappingFilter):
            #wrap with a mapping filter
            ds = tabular.MappingFilter(ds)

        #add keys which might not already be defined
        if add_missing_vars:
            _add_missing_ds_keys(ds, self.ev_mappings)

        if getattr(ds, 'mdh', None) is None:
            try:
                ds.mdh = self.mdh
            except AttributeError:
                logger.error('No metadata defined in pipeline')
                pass

        self.dataSources[dskey] = ds
Пример #19
0
    def execute(self, namespace):
        from PYME.localization import traveling_salesperson
        from scipy.spatial import distance_matrix

        points = namespace[self.input]

        try:
            positions = np.stack([points['x_um'], points['y_um']], axis=1)
        except KeyError:
            # units don't matter for these calculations, but we want to preserve them on the other side
            positions = np.stack([points['x'], points['y']], axis=1) / 1e3

        distances = distance_matrix(positions, positions)

        route, best_distance, og_distance = traveling_salesperson.two_opt(
            distances, self.epsilon)

        # plot_path(positions, route)
        out = tabular.MappingFilter({
            'x_um': positions[:, 0][route],
            'y_um': positions[:, 1][route]
        })
        out.mdh = MetaDataHandler.NestedClassMDHandler()
        try:
            out.mdh.copyEntriesFrom(points.mdh)
        except AttributeError:
            pass
        out.mdh['TravelingSalesperson.Distance'] = best_distance
        out.mdh['TravelingSalesperson.OriginalDistance'] = og_distance

        namespace[self.output] = out
Пример #20
0
    def execute(self, namespace):
        from PYME.LMVis import pipeline
        fitResults = namespace[self.inputFitResults]
        mdh = fitResults.mdh

        mapped_ds = tabular.MappingFilter(fitResults)

        if not self.pixelSizeNM == 1:  # TODO - check close instead?
            mapped_ds.addVariable('pixelSize', self.pixelSizeNM)
            mapped_ds.setMapping('x', 'x*pixelSize')
            mapped_ds.setMapping('y', 'y*pixelSize')

        #extract information from any events
        if self.inputEvents != '':
            # Use specified table for events if given (otherwise look for a `.events` attribute on the input data
            # TODO: resolve how best to handle events (i.e. should they be a separate table, or should they be attached to data tables)
            events = namespace.get(self.inputEvents, None)
        else:
            try:
                events = fitResults.events
            except AttributeError:
                logger.debug('no events found')
                events = None

        if isinstance(events, tabular.TabularBase):
            events = events.to_recarray()

        ev_maps, ev_charts = pipeline._processEvents(mapped_ds, events, mdh)
        pipeline._add_missing_ds_keys(mapped_ds, ev_maps)

        #Fit module specific filter settings
        if 'Analysis.FitModule' in mdh.getEntryNames():
            fitModule = mdh['Analysis.FitModule']

            if 'LatGaussFitFR' in fitModule:
                # TODO - move getPhotonNums() out of pipeline
                mapped_ds.addColumn('nPhotons',
                                    pipeline.getPhotonNums(mapped_ds, mdh))

            if 'SplitterFitFNR' in fitModule:
                mapped_ds.addColumn(
                    'nPhotonsg',
                    pipeline.getPhotonNums(
                        {
                            'A': mapped_ds['fitResults_Ag'],
                            'sig': mapped_ds['fitResults_sigma']
                        }, mdh))
                mapped_ds.addColumn(
                    'nPhotonsr',
                    pipeline.getPhotonNums(
                        {
                            'A': mapped_ds['fitResults_Ar'],
                            'sig': mapped_ds['fitResults_sigma']
                        }, mdh))
                mapped_ds.setMapping('nPhotons', 'nPhotonsg+nPhotonsr')

        mapped_ds.mdh = mdh

        namespace[self.outputLocalizations] = mapped_ds
Пример #21
0
    def OnGenEvents(self, event):
        from PYME.simulation import locify
        #from PYME.Acquire.Hardware.Simulator import wormlike2
        from PYME.IO import tabular
        from PYME.IO.image import ImageBounds
        import pylab

        #wc = wormlike2.wormlikeChain(100)

        pipeline = self.visFr.pipeline
        pipeline.filename = 'Simulation'

        pylab.figure()
        pylab.plot(self.xp, self.yp, 'x')  #, lw=2)
        if isinstance(self.source, WormlikeSource):
            pylab.plot(self.xp, self.yp, lw=2)

        res = locify.eventify(self.xp,
                              self.yp,
                              self.meanIntensity,
                              self.meanDuration,
                              self.backgroundIntensity,
                              self.meanEventNumber,
                              self.scaleFactor,
                              self.meanTime,
                              z=self.zp)

        pylab.plot(res['fitResults']['x0'], res['fitResults']['y0'], '+')

        ds = tabular.MappingFilter(tabular.FitResultsSource(res))

        if isinstance(self.source, ImageSource):
            pipeline.imageBounds = image.openImages[
                self.source.image].imgBounds
        else:
            pipeline.imageBounds = ImageBounds.estimateFromSource(ds)

        pipeline.addDataSource('Generated Points', ds)
        pipeline.selectDataSource('Generated Points')

        from PYME.IO.MetaDataHandler import NestedClassMDHandler
        pipeline.mdh = NestedClassMDHandler()
        pipeline.mdh['Camera.ElectronsPerCount'] = 1
        pipeline.mdh['Camera.TrueEMGain'] = 1
        pipeline.mdh['Camera.CycleTime'] = 1
        pipeline.mdh['voxelsize.x'] = .110

        try:
            pipeline.filterKeys.pop('sig')
        except:
            pass

        pipeline.Rebuild()
        if len(self.visFr.layers) < 1:
            self.visFr.add_pointcloud_layer(
            )  #TODO - move this logic so that layer added automatically when datasource is added?
        #self.visFr.CreateFoldPanel()
        self.visFr.SetFit()
Пример #22
0
    def execute(self, namespace):
        inp = namespace[self.inputName]

        mapped = tabular.MappingFilter(inp, **self.mappings)

        if 'mdh' in dir(inp):
            mapped.mdh = inp.mdh

        namespace[self.outputName] = mapped
Пример #23
0
def find_clumps_within_channel(datasource,
                               gap_tolerance,
                               radius_scale,
                               radius_offset,
                               inject=False):
    """

    Args:
        datasource: PYME datasource object - dictionary-like object with addColumn method
        gap_tolerance: number of frames acceptable for a molecule to go MIA and still be called the same molecule when
            it returns
        radius_scale: multiplicative factor applied to the error_x term in deciding search radius for pairing
        radius_offset: term added to radius_scale*error_x to set search radius

    Returns:
        Nothing, but adds clumpIndex column to datasource input
        
    FIXME: This function should probably not exist as channel handling should ideally only be in one place within the code base. A prefered solution would be to split using a colour filter, clump
    each channel separately, and then merge channels.

    """
    from PYME.Analysis.points.DeClump import findClumps
    from PYME.IO import tabular
    t = datasource['t']  #OK as int
    clumps = np.zeros(len(t), 'i')
    I = np.argsort(t)
    t = t[I].astype('i')
    x = datasource['x'][I].astype('f4')
    y = datasource['y'][I].astype('f4')

    deltaX = (radius_scale * datasource['error_x'][I] +
              radius_offset).astype('f4')

    # extract color channel information
    uprobe = np.unique(datasource['probe'])
    probe = datasource['probe'][I]

    # only clump within color channels
    assigned = np.zeros_like(clumps)
    startAt = 0
    for pi in uprobe:
        pmask = probe == pi
        pClumps = findClumps(t[pmask], x[pmask], y[pmask], deltaX,
                             gap_tolerance) + startAt
        # throw all unclumped into the 0th clumpID, and preserve pClumps[-1] of the last iteration
        pClumps[pClumps == startAt] = 0
        # patch in assignments for this color channel
        assigned[pmask] = pClumps
        startAt = np.max(assigned)
    clumps[I] = assigned

    if not inject:
        datasource = tabular.MappingFilter(datasource)

    datasource.addColumn('clumpIndex', clumps)

    return datasource
Пример #24
0
def foldX(datasource, mdh, inject=False, chroma_mappings=False):
    """

    At this point the origin of x should be the corner of the concatenated frame

    Args:
        datasource:

    Returns: nothing
        Adds folded x-coordinates to the datasource
        Adds channel assignments to the datasource

    """
    from PYME.IO import tabular
    if not inject:
        datasource = tabular.MappingFilter(datasource)

    roiSizeNM = (mdh['Multiview.ROISize'][1]*mdh['voxelsize.x']*1000)  # voxelsize is in um

    numChans = mdh.getOrDefault('Multiview.NumROIs', 1)
    color_chans = np.array(mdh.getOrDefault('Multiview.ChannelColor', np.zeros(numChans, 'i'))).astype('i')

    datasource.addVariable('roiSizeNM', roiSizeNM)
    datasource.addVariable('numChannels', numChans)

    #FIXME - cast to int should probably happen when we use multiViewChannel, not here (because we might have saved and reloaded in between)
    datasource.setMapping('multiviewChannel', 'clip(floor(x/roiSizeNM), 0, numChannels - 1).astype(int)')
    if chroma_mappings:
        datasource.addColumn('chromadx', 0 * datasource['x'])
        datasource.addColumn('chromady', 0 * datasource['y'])

        datasource.setMapping('x', 'x%roiSizeNM + chromadx')
        datasource.setMapping('y', 'y + chromady')
    else:
        datasource.setMapping('x', 'x%roiSizeNM')

    probe = color_chans[datasource['multiviewChannel']] #should be better performance
    datasource.addColumn('probe', probe)

    # add separate sigmaxy columns for each plane
    for chan in range(numChans):
        chan_mask = datasource['multiviewChannel'] == chan
        datasource.addColumn('chan%d' % chan, chan_mask)

        #mappings are cheap if we don't evaluate them
        datasource.setMapping('sigmax%d' % chan, 'chan%d*fitResults_sigmax' % chan)
        datasource.setMapping('sigmay%d' % chan, 'chan%d*fitResults_sigmay' % chan)
        datasource.setMapping('error_sigmax%d' % chan,
                                               'chan%(chan)d*fitError_sigmax - 1e4*(1-chan%(chan)d)' % {'chan': chan})
        datasource.setMapping('error_sigmay%d' % chan,
                                               'chan%(chan)d*fitError_sigmay - 1e4*(1-chan%(chan)d)' % {'chan': chan})

        #lets add some more that might be useful
        #datasource.setMapping('A%d' % chan, 'chan%d*A' % chan)

    return datasource
Пример #25
0
    def Rebuild(self, **kwargs):
        """
        Rebuild the pipeline. Called when the selected data source is changed/modified and/or the filter is changed.

        """
        for s in self.dataSources.values():
            if 'setMapping' in dir(s):
                #keep raw measurements available
                s.setMapping('x_raw', 'x')
                s.setMapping('y_raw', 'y')

                if 'z' in s.keys():
                    s.setMapping('z_raw', 'z')

        if not self.selectedDataSource is None:
            if not self.mapping is None:
                # copy any mapping we might have made across to the new mapping filter (should fix drift correction)
                # TODO - make drift correction a recipe module so that we don't need this code. Long term we should be
                # ditching the mapping filter here.
                old_mapping = self.mapping
                self.mapping = tabular.MappingFilter(self.selectedDataSource)
                self.mapping.mappings.update(old_mapping.mappings)
            else:
                self.mapping = tabular.MappingFilter(self.selectedDataSource)

            #the filter, however needs to be re-generated with new keys and or data source
            self.filter = tabular.ResultsFilter(self.mapping,
                                                **self.filterKeys)

            #we can also recycle the colour filter
            if self.colourFilter is None:
                self.colourFilter = tabular.ColourFilter(self.filter)
            else:
                self.colourFilter.resultsSource = self.filter

            #self._process_colour()

            self.ready = True

        self.ClearGenerated()
Пример #26
0
def findTracks2(datasource,
                rad_var='error_x',
                multiplier='2.0',
                nFrames=20,
                minClumpSize=0):
    import PYME.Analysis.points.DeClump as deClump
    from PYME.IO import tabular

    with_clumps = tabular.MappingFilter(datasource)

    if rad_var == '1.0':
        delta_x = 0 * datasource['x'] + multiplier
    else:
        delta_x = multiplier * datasource[rad_var]

    t = datasource['t'].astype('i')
    x = datasource['x'].astype('f4')
    y = datasource['y'].astype('f4')
    delta_x = delta_x.astype('f4')

    I = np.argsort(t)

    clumpIndices = np.zeros(len(x), dtype='i')
    clumpIndices[I] = deClump.findClumps(t[I], x[I], y[I], delta_x[I], nFrames)

    numPerClump, b = np.histogram(clumpIndices,
                                  np.arange(clumpIndices.max() + 1.5) + .5)

    trackVelocities = 0 * x
    trackVelocities[I] = calcTrackVelocity(x[I], y[I], clumpIndices[I],
                                           t.astype('f')[I])
    #print b

    with_clumps.addColumn('clumpIndex', clumpIndices)
    with_clumps.addColumn('clumpSize', numPerClump[clumpIndices - 1])
    with_clumps.addColumn('trackVelocity', trackVelocities)

    if minClumpSize > 0:
        filt = tabular.ResultsFilter(with_clumps,
                                     clumpSize=[minClumpSize, 1e6])
    else:
        filt = with_clumps

    try:
        filt.mdh = datasource.mdh
    except AttributeError:
        pass

    return with_clumps, ClumpManager(filt)
    def OnLoadHarmonicRepresentation(self, wx_event):
        import wx
        from PYME.IO import tabular, FileUtils
        from PYME.Analysis.points.spherical_harmonics import scaled_shell_from_hdf
        import PYME.experimental._triangle_mesh as triangle_mesh
        from PYME.LMVis.layers.mesh import TriangleRenderLayer

        fdialog = wx.FileDialog(
            None,
            'Load Spherical Harmonic Representation',
            wildcard='Harmonic shell (*.hdf)|*.hdf',
            style=wx.FD_OPEN,
            defaultDir=FileUtils.nameUtils.genShiftFieldDirectoryPath())
        succ = fdialog.ShowModal()
        if (succ == wx.ID_OK):
            path = fdialog.GetPath()
            fdialog.Destroy()
        else:
            fdialog.Destroy()
            return

        shell = scaled_shell_from_hdf(path)

        points = tabular.MappingFilter(self.pipeline.selectedDataSource)
        separations, closest_points = shell.distance_to_shell(
            (points['x'], points['y'], points['z']), d_angles=self.d_angle)

        self._shells.append(shell)
        shell_number = len(self._shells)
        points.addColumn('distance_to_loaded_shell%d' % shell_number,
                         separations)
        points.addColumn(
            'inside_shell%d' % shell_number,
            shell.check_inside(points['x'], points['y'], points['z']))

        self.pipeline.addDataSource('shell%d_mapped' % shell_number, points)
        self.pipeline.selectDataSource('shell%d_mapped' % shell_number)

        v, f = shell.get_mesh_vertices_faces(self.d_angle)
        surf = triangle_mesh.TriangleMesh(v, f)
        self.pipeline.dataSources['shell_surface'] = surf

        layer = TriangleRenderLayer(self.pipeline,
                                    dsname='shell_surface',
                                    method='shaded',
                                    cmap='C')
        self.vis_frame.add_layer(layer)

        self.vis_frame.RefreshView()
Пример #28
0
    def execute(self, namespace):
        from PYME.Analysis.points import multiview
        from PYME.IO import unifiedIO
        from PYME.IO.MetaDataHandler import HDFMDHandler
        import tables
        import json

        inp = namespace[self.input_name]

        if 'mdh' not in dir(inp):
            raise RuntimeError('ShiftCorrect needs metadata')

        if self.shift_map_path == '':  # grab shftmap from the metadata
            loc = inp.mdh['Shiftmap']
        else:
            loc = self.shift_map_path

        try:  # try loading shift map as hdf file
            with unifiedIO.local_or_temp_filename(loc) as f:
                t = tables.open_file(f)
                shift_map_source = tabular.HDFSource(
                    t,
                    'shift_map')  # todo - is there a cleaner way to do this?
                shift_map_source.mdh = HDFMDHandler(t)

            # build dict of dicts so we can easily rebuild shiftfield objects in multiview.calc_shifts_for_points
            shift_map = {
                'shiftModel': shift_map_source.mdh['Multiview.shift_map.model']
            }
            legend = shift_map_source.mdh['Multiview.shift_map.legend']
            for l in legend.keys():
                keys = shift_map_source.keys()
                shift_map[l] = dict(
                    zip(keys, [shift_map_source[k][legend[l]] for k in keys]))

            t.close()
        except tables.HDF5ExtError:  # file is probably saved as json (legacy)
            s = unifiedIO.read(self.shift_map_path)
            shift_map = json.loads(s)

        mapped = tabular.MappingFilter(inp)

        multiview.apply_shifts_to_points(mapped, shift_map)
        # propagate metadata
        mapped.mdh = inp.mdh
        mapped.mdh['Multiview.shift_map.location'] = loc

        namespace[self.output_name] = mapped
Пример #29
0
def saveOutput(output, filename):
    warnings.warn('saveOutput is deprecated, use output modules instead', DeprecationWarning)
    """Save an output variable, inferring type from the file extension"""
    if isinstance(output, ImageStack):
        try:
            output.Save(filename)
        except RuntimeError:
            output.Save(filename + '.tif')
    elif isinstance(output, tabular.TabularBase):
        saveTabular(output, filename)
    elif isinstance(output, pd.DataFrame):
        saveDataFrame(output, filename)
    elif isinstance(output, matplotlib.figure.Figure):
        output.savefig(filename)
    else: #hope we can convert to a tabular format
        saveTabular(tabular.MappingFilter(output), filename)
Пример #30
0
    def RegenFilter(self):
        if not self.selectedDataSource is None:
            self.filter = inpFilt.ResultsFilter(self.selectedDataSource,
                                                **self.filterKeys)
            if self.mapping:
                self.mapping.resultsSource = self.filter
            else:
                self.mapping = inpFilt.MappingFilter(self.filter)

            if not self.colourFilter:
                self.colourFilter = inpFilt.ColourFilter(self.mapping, self)

        self.edb = None
        self.objects = None

        self.GeneratedMeasures = {}