예제 #1
0
def findTracks(pipeline, rad_var='error_x', multiplier='2.0', nFrames=20):
    import PYME.Analysis.points.DeClump.deClump as deClump
    import warnings
    
    warnings.warn('deprecated, use findTracks2 instead', DeprecationWarning)
    
    if rad_var == '1.0':
        delta_x = 0*pipeline['x'] + multiplier
    else:
        delta_x = multiplier*pipeline[rad_var]
        
    t = pipeline['t'].astype('i')
    x = pipeline['x'].astype('f4')
    y = pipeline['y'].astype('f4')
    delta_x = delta_x.astype('f4')
    
    I = np.argsort(t)

    clumpIndices = np.zeros(len(x), dtype='i')
    clumpIndices[I] = deClump.findClumpsN(t[I], x[I], y[I], delta_x[I], nFrames)
    
    numPerClump, b = np.histogram(clumpIndices, np.arange(clumpIndices.max() + 1.5) + .5)

    trackVelocities = 0*x
    trackVelocities[I] = calcTrackVelocity(x[I], y[I], clumpIndices[I], t.astype('f')[I])
    #print b

    pipeline.addColumn('clumpIndex', clumpIndices, -1)
    pipeline.addColumn('clumpSize', numPerClump[clumpIndices - 1])
    pipeline.addColumn('trackVelocity', trackVelocities)
    
    pipeline.clumps = ClumpManager(pipeline)
예제 #2
0
def find_clumps_within_channel(datasource, gap_tolerance, radius_scale, radius_offset, inject=False):
    """

    Args:
        datasource: PYME datasource object - dictionary-like object with addColumn method
        gap_tolerance: number of frames acceptable for a molecule to go MIA and still be called the same molecule when
            it returns
        radius_scale: multiplicative factor applied to the error_x term in deciding search radius for pairing
        radius_offset: term added to radius_scale*error_x to set search radius

    Returns:
        Nothing, but adds clumpIndex column to datasource input
        
    FIXME: This function should probably not exist as channel handling should ideally only be in one place within the code base. A prefered solution would be to split using a colour filter, clump
    each channel separately, and then merge channels.

    """
    from PYME.Analysis.points.DeClump import deClump
    from PYME.IO import tabular
    t = datasource['t'] #OK as int
    clumps = np.zeros(len(t), 'i')
    I = np.argsort(t)
    t = t[I].astype('i')
    x = datasource['x'][I].astype('f4')
    y = datasource['y'][I].astype('f4')

    deltaX = (radius_scale*datasource['error_x'][I] + radius_offset).astype('f4')

    # extract color channel information
    uprobe = np.unique(datasource['probe'])
    probe = datasource['probe'][I]


    # only clump within color channels
    assigned = np.zeros_like(clumps)
    startAt = 0
    for pi in uprobe:
        pmask = probe == pi
        pClumps = deClump.findClumpsN(t[pmask], x[pmask], y[pmask], deltaX, gap_tolerance) + startAt
        # throw all unclumped into the 0th clumpID, and preserve pClumps[-1] of the last iteration
        pClumps[pClumps == startAt] = 0
        # patch in assignments for this color channel
        assigned[pmask] = pClumps
        startAt = np.max(assigned)
    clumps[I] = assigned

    if not inject:
        datasource = tabular.MappingFilter(datasource)

    datasource.addColumn('clumpIndex', clumps)

    return datasource
예제 #3
0
def findTracks2(datasource, rad_var='error_x', multiplier='2.0', nFrames=20, minClumpSize=0):
    import PYME.Analysis.points.DeClump.deClump as deClump
    from PYME.IO import tabular
    
    with_clumps = tabular.MappingFilter(datasource)
    
    if rad_var == '1.0':
        delta_x = 0 * datasource['x'] + multiplier
    else:
        delta_x = multiplier * datasource[rad_var]
    
    t = datasource['t'].astype('i')
    x = datasource['x'].astype('f4')
    y = datasource['y'].astype('f4')
    delta_x = delta_x.astype('f4')
    
    I = np.argsort(t)
    
    clumpIndices = np.zeros(len(x), dtype='i')
    clumpIndices[I] = deClump.findClumpsN(t[I], x[I], y[I], delta_x[I], nFrames)
    
    numPerClump, b = np.histogram(clumpIndices, np.arange(clumpIndices.max() + 1.5) + .5)
    
    trackVelocities = 0 * x
    trackVelocities[I] = calcTrackVelocity(x[I], y[I], clumpIndices[I], t.astype('f')[I])
    #print b
    
    with_clumps.addColumn('clumpIndex', clumpIndices)
    with_clumps.addColumn('clumpSize', numPerClump[clumpIndices - 1])
    with_clumps.addColumn('trackVelocity', trackVelocities)
    
    if minClumpSize > 0:
        filt = tabular.ResultsFilter(with_clumps, clumpSize=[minClumpSize, 1e6])
    else:
        filt = with_clumps

    try:
        filt.mdh = datasource.mdh
    except AttributeError:
        pass
    
    return with_clumps, ClumpManager(filt)
예제 #4
0
def find_clumps(datasource, gap_tolerance, radius_scale, radius_offset, inject=False):
    from PYME.Analysis.points.DeClump import deClump
    from PYME.IO import tabular
    t = datasource['t'] #OK as int
    clumps = np.zeros(len(t), 'i')
    I = np.argsort(t)
    t = t[I].astype('i')
    x = datasource['x'][I].astype('f4')
    y = datasource['y'][I].astype('f4')

    deltaX = (radius_scale*datasource['error_x'][I] + radius_offset).astype('f4')

    assigned = deClump.findClumpsN(t, x, y, deltaX, gap_tolerance)
    clumps[I] = assigned

    if not inject:
        datasource = tabular.MappingFilter(datasource)

    datasource.addColumn('clumpIndex', clumps)

    return datasource
예제 #5
0
def _extractAverageTrajectory(pipeline,
                              clumpRadiusVar='error_x',
                              clumpRadiusMultiplier=5.0,
                              timeWindow=25,
                              filter='Gaussian',
                              filterScale=10.0):

    #import PYME.Analysis.trackUtils as trackUtils
    import PYME.Analysis.points.DeClump.deClump as deClump
    from scipy.optimize import fmin
    #track beads through frames
    if clumpRadiusVar == '1.0':
        delta_x = 0 * pipeline['x'] + clumpRadiusMultiplier
    else:
        delta_x = clumpRadiusMultiplier * pipeline[clumpRadiusVar]

    t = pipeline['t'].astype('i')
    x = pipeline['x'].astype('f4')
    y = pipeline['y'].astype('f4')
    delta_x = delta_x.astype('f4')

    I = np.argsort(t)

    clumpIndex = np.zeros(len(x), dtype='i')
    clumpIndex[I] = deClump.findClumpsN(t[I], x[I], y[I], delta_x[I],
                                        timeWindow)
    #trackUtils.findTracks(pipeline, clumpRadiusVar,clumpRadiusMultiplier, timeWindow)

    #longTracks = pipeline['clumpSize'] > 50

    #x = x[longTracks].copy()
    #y = pipeline['y_raw'][longTracks].copy()
    #t = pipeline['t'][longTracks].copy() #.astype('i')
    #clumpIndex = pipeline['clumpIndex'][longTracks].copy()

    tMax = t.max()

    clumpIndices = list(set(clumpIndex))

    x_f = []
    y_f = []
    clump_sizes = []

    t_f = np.arange(0, tMax + 1, dtype='i')

    #loop over all our clumps and extract trajectories
    for ci in clumpIndices:
        if ci > 0:
            clump_mask = (clumpIndex == ci)
            x_i = x[clump_mask]
            clump_size = len(x_i)

            if clump_size > 50:
                y_i = y[clump_mask]
                t_i = t[clump_mask].astype('i')

                x_i_f = np.NaN * np.ones_like(t_f)
                x_i_f[t_i] = x_i - x_i.mean()

                y_i_f = np.NaN * np.ones_like(t_f)
                y_i_f[t_i] = y_i - y_i.mean()

                #clumps.append((x_i_f, y_i_f))
                x_f.append(x_i_f)
                y_f.append(y_i_f)
                clump_sizes.append(len(x_i))

    #re-order to start with the largest clump
    clumpOrder = np.argsort(clump_sizes)[::-1]
    x_f = np.array(x_f)[clumpOrder, :]
    y_f = np.array(y_f)[clumpOrder, :]

    def _mf(p, meas):
        '''calculate the offset between trajectories'''
        m_adj = meas + np.hstack([[0], p])[:, None]

        return np.nansum(np.nanvar(m_adj, axis=0))

    #print x_f.shape, np.hstack([[0], np.random.randn(x_f.shape[0]-1)]).shape

    def _align(meas, tol=.1):
        n_iters = 0

        dm_old = 5e12
        dm = 4e12

        mm = np.nanmean(meas, 0)

        while ((dm_old - dm) > tol) and (n_iters < 50):
            dm_old = dm
            mm = np.nanmean(meas, 0)
            d = np.nanmean(meas - mm, 1)
            dm = sum(d**2)
            meas = meas - d[:, None]
            n_iters += 1
            print(n_iters, dm)

        mm = np.nanmean(meas, 0)
        print('Finished:', n_iters, dm)
        return mm

    x_corr = _align(x_f)
    y_corr = _align(y_f)

    filtered_corr = FILTER_FUNCS[filter](t_f, {
        'x': x_corr,
        'y': y_corr
    }, filterScale)

    return t_f, filtered_corr
예제 #6
0
def extractTrajectoriesClump(ds,
                             clumpRadiusVar='error_x',
                             clumpRadiusMultiplier=5.0,
                             timeWindow=25,
                             clumpMinSize=50,
                             align=True):

    import PYME.Analysis.points.DeClump.deClump as deClump
    #track beads through frames
    if clumpRadiusVar == '1.0':
        delta_x = 0 * ds['x'] + clumpRadiusMultiplier
    else:
        delta_x = clumpRadiusMultiplier * ds[clumpRadiusVar]

    t = ds['t'].astype('i')
    x = ds['x'].astype('f4')
    y = ds['y'].astype('f4')
    z = ds['z'].astype('f4')
    delta_x = delta_x.astype('f4')

    I = np.argsort(t)

    clumpIndex = np.zeros(len(x), dtype='i')
    isFiducial = np.zeros(len(x), dtype='i')
    clumpIndex[I] = deClump.findClumpsN(t[I], x[I], y[I], delta_x[I],
                                        timeWindow)

    tMax = t.max()

    clumpIndices = list(set(clumpIndex))

    x_f = []
    y_f = []
    z_f = []
    clump_sizes = []

    t_f = np.arange(0, tMax + 1, dtype='i')

    #loop over all our clumps and extract trajectories
    for ci in clumpIndices:
        if ci > 0:
            clump_mask = (clumpIndex == ci)
            x_i = x[clump_mask]
            clump_size = len(x_i)

            if clump_size > clumpMinSize:
                y_i = y[clump_mask]
                z_i = z[clump_mask]
                t_i = t[clump_mask].astype('i')
                isFiducial[
                    clump_mask] = 1  # mark the event mask that this is a fiducial

                x_i_f = np.NaN * np.ones_like(t_f)
                if align:
                    x_i_f[t_i] = x_i - x_i.mean()
                else:
                    x_i_f[t_i] = x_i

                y_i_f = np.NaN * np.ones_like(t_f)
                if align:
                    y_i_f[t_i] = y_i - y_i.mean()
                else:
                    y_i_f[t_i] = y_i

                z_i_f = np.NaN * np.ones_like(t_f)
                if align:
                    z_i_f[t_i] = z_i - z_i.mean()
                else:
                    z_i_f[t_i] = z_i

                #clumps.append((x_i_f, y_i_f))
                x_f.append(x_i_f)
                y_f.append(y_i_f)
                z_f.append(z_i_f)
                clump_sizes.append(len(x_i))

    #re-order to start with the largest clump
    clumpOrder = np.argsort(clump_sizes)[::-1]
    x_f = np.array(x_f)[clumpOrder, :]
    y_f = np.array(y_f)[clumpOrder, :]
    z_f = np.array(z_f)[clumpOrder, :]

    return (t_f, x_f, y_f, z_f, isFiducial)