Ejemplo n.º 1
0
def melt_analysis(data):
    mdata = initialize_mdata(data)

    frames, mframes = helpy.splitter((data, mdata), 'f')
    shells = assign_shell(frames[0]['xy'], frames[0]['t'],
                          maxt=data['t'].max())
    mdata['sh'] = shells[mdata['t']]

    dens_method = 'dist'
    # Calculate radial speed (not MSD!) (maybe?)
    for frame, melt in it.izip(frames, mframes):
        nn = np.where(melt['sh'] == nshells-1, 3, 4)
        neigh_args = {'size': (nn,)*2}

        dens, psi, phi = melting_stats(frame, dens_method, neigh_args)
        melt['dens'] = dens
        melt['psi'] = psi
        melt['phi'] = phi
    return mdata
Ejemplo n.º 2
0
def split_shells(mdata, zero_to=0, do_mean=True):
    """Split melting data into dict of slices for each shell.

    parameters
    ----------
    mdata:      melting data with 'sh' field.
    zero_to:    shell with which to merge shell zero. e.g., `zero_to=1` will
                include center particle in first shell.
    do_mean:    include an additional `shell` which is a merging of all shells

    return
    ------
    shells:     dict from integers to mdata slices.
    """
    splindex = np.where(mdata['sh'], mdata['sh'], zero_to) if zero_to else 'sh'
    shells = helpy.splitter(mdata, splindex, noncontiguous=True, ret_dict=True)
    if do_mean:
        shells[nshells] = mdata[mdata['sh'] >= 0]
    return shells
Ejemplo n.º 3
0
def split_shells(mdata, zero_to=0, do_mean=True):
    """Split melting data into dict of slices for each shell.

    parameters
    ----------
    mdata:      melting data with 'sh' field.
    zero_to:    shell with which to merge shell zero. e.g., `zero_to=1` will
                include center particle in first shell.
    do_mean:    include an additional `shell` which is a merging of all shells

    return
    ------
    shells:     dict from integers to mdata slices.
    """
    splindex = np.where(mdata['sh'], mdata['sh'], zero_to) if zero_to else 'sh'
    shells = helpy.splitter(mdata, splindex, noncontiguous=True, ret_dict=True)
    if do_mean:
        shells[nshells] = mdata[mdata['sh'] >= 0]
    return shells
Ejemplo n.º 4
0
def melt_analysis(data):
    mdata = initialize_mdata(data)

    frames, mframes = helpy.splitter((data, mdata), 'f')
    shells = assign_shell(frames[0]['xy'],
                          frames[0]['t'],
                          maxt=data['t'].max())
    mdata['sh'] = shells[mdata['t']]

    dens_method = 'dist'
    # Calculate radial speed (not MSD!) (maybe?)
    for frame, melt in it.izip(frames, mframes):
        nn = np.where(melt['sh'] == nshells - 1, 3, 4)
        neigh_args = {'size': (nn, ) * 2}

        dens, psi, phi = melting_stats(frame, dens_method, neigh_args)
        melt['dens'] = dens
        melt['psi'] = psi
        melt['phi'] = phi
    return mdata
Ejemplo n.º 5
0
def t0avg(trackset, tracklen, tau):
    """ Averages the squared displacement of a track for a certain value of tau
        over all valid values of t0 (such that t0 + tau < tracklen)

        That is, for a given particle, do the average over all t0
            <[(r_i(t0 + tau) - r_i(t0)]^2>
        for a single particle i and fixed time shift tau

        parameters
        ----------
        trackset : a subset of data for all points in the given track
        tracklen : the length (duration) of the track
        tau : the time separation for the displacement: r(tau) - r(0)

        returns
        -------
        the described mean, a scalar
    """
    totsqdisp = 0.0
    nt0s = 0.0
    tfsets = helpy.splitter(trackset, trackset['f'], ret_dict=True)
    for t0 in np.arange(1,(tracklen-tau-1),dt0): # for t0 in (T - tau - 1), by dt0 stepsize
        olddot = tfsets[t0]
        newdot = tfsets[t0+tau]
        if len(newdot) != 1 or len(olddot) != 1:
            continue
        sqdisp  = (newdot['x'] - olddot['x'])**2 \
                + (newdot['y'] - olddot['y'])**2
        if len(sqdisp) == 1:
            if verbose > 1: print 'unflattened'
            totsqdisp += sqdisp
        elif len(sqdisp[0]) == 1:
            if verbose: print 'flattened once'
            totsqdisp += sqdisp[0]
        else:
            if verbose: print "fail"
            continue
        nt0s += 1.0
    return totsqdisp/nt0s if nt0s else None
Ejemplo n.º 6
0
def remove_duplicates(trackids=None, data=None, tracksets=None,
                      target='', inplace=False, verbose=False):
    if tracksets is None:
        target = target or 'trackids'
        tracksets = helpy.load_tracksets(data, trackids, min_length=0)
    elif trackids is None:
        target = target or 'tracksets'
    else:
        target = target or 'trackids'
    rejects = defaultdict(dict)
    for t, tset in tracksets.iteritems():
        fs = tset['f']
        count = np.bincount(fs)
        dup_fs = np.where(count>1)[0]
        if not len(dup_fs):
            continue
        ftsets = helpy.splitter(tset, fs, ret_dict=True)
        for f in dup_fs:
            prv = fs[np.searchsorted(fs, f, 'left') - 1] if f > fs[0] else None
            nxt = fs[np.searchsorted(fs, f, 'right')] if f < fs[-1] else None
            if nxt is not None and nxt in dup_fs:
                nxt = fs[np.searchsorted(fs, nxt, 'right')] if nxt < fs[-1] else None
                if nxt is not None and nxt in dup_fs:
                    nxt = None
                    assert prv is not None, ("Duplicate track particles in too many "
                            "frames in a row at frame {} for track {}".format(f, t))
            seps = np.zeros(count[f])
            for neigh in (prv, nxt):
                if neigh is None: continue
                if count[neigh] > 1 and neigh in rejects[t]:
                    isreject = np.in1d(ftsets[neigh]['id'], rejects[t][neigh], assume_unique=True)
                    ftsets[neigh] = ftsets[neigh][~isreject]
                sepx = ftsets[f]['x'] - ftsets[neigh]['x']
                sepy = ftsets[f]['y'] - ftsets[neigh]['y']
                seps += sepx*sepx + sepy*sepy
            rejects[t][f] = ftsets[f][seps > seps.min()]['id']
    if not rejects:
        return None if inplace else trackids if target=='trackids' else tracksets
    if target=='tracksets':
        if not inplace:
            tracksets = tracksets.copy()
        for t, tr in rejects.iteritems():
            trs = np.concatenate(tr.values())
            tr = np.in1d(tracksets[t]['id'], trs, True, True)
            new = tracksets[t][tr]
            if inplace:
                tracksets[t] = new
        return None if inplace else tracksets
    elif target=='trackids':
        if not inplace:
            trackids = trackids.copy()
        rejects = np.concatenate([tfr for tr in rejects.itervalues()
                                for tfr in tr.itervalues()])
        if data is None:
            data_from_tracksets = np.concatenate(tracksets.values())
            if len(data_from_tracksets)!=len(trackids):
                raise ValueError, "You must provide data to return/modify trackids"
            ids = data_from_tracksets['id']
            ids.sort()
        else:
            ids = data['id']
        rejects = np.searchsorted(ids, rejects)
        trackids[rejects] = -1
        return None if inplace else trackids
Ejemplo n.º 7
0
        helpy.txt_to_npz(datapath, verbose=True, compress=True)
        if args.orient or args.track:
            print 'NOTICE: not tracking, only converting file from txt to npz'
            print '        please run again without `-l` to track/orient'
        sys.exit()

    if args.track or args.orient:
        from scipy.spatial import cKDTree as KDTree
        if args.track != args.orient and helpy.bool_input("Would you like to "
                "simultaneously track and find orientations? (It's faster)\n"):
            args.track = args.orient = True
        if args.orient:
            pdata, cdata = helpy.load_data(absprefix, 'position corner')
        else:
            pdata = helpy.load_data(absprefix, 'position')
        pfsets = helpy.splitter(pdata, ret_dict=True)
        pftrees = { f: KDTree(np.column_stack([pfset['x'], pfset['y']]), leafsize=50)
                   for f, pfset in pfsets.iteritems() }
    if args.track:
        meta.update(track_sidelength=args.side, track_maxdist=args.maxdist,
                track_maxtime=args.giveup, track_stub=args.stub,
                track_cut=args.cut)
        trackids = find_tracks(pdata, maxdist=args.maxdist, giveup=args.giveup,
                               n=args.number, cut=args.cut, stub=args.stub)
        trackids = remove_duplicates(trackids, data=pdata)
    else:
        trackids = None
    if args.orient:
        from orientation import get_angles_loop
        cfsets = helpy.splitter(cdata, ret_dict=True)
        cftrees = { f: KDTree(np.column_stack([cfset['x'], cfset['y']]), leafsize=50)
Ejemplo n.º 8
0
        save = locdir + prefix + "_MS{}D.pdf".format('A' if ang else '')
    if save:
        print "saving to", save
        pl.savefig(save)
    if show: pl.show()
    return [fig] + fig.get_axes() + [taus] + [msd, msd_err] if errorbars else [msd]

if __name__=='__main__':
    if args.load:
        datapath = locdir+prefix+dotfix+'_POSITIONS.txt'
        data = gen_data(datapath)
        if verbose: print "\t...loaded"
    if args.track:
        if not args.load:
            data = np.load(locdir+prefix+'_POSITIONS.npz')['data']
        fsets = helpy.splitter(data, ret_dict=True)
#        from scipy.spatial.kdtree import KDTree
#        ftrees = { f: KDTree(np.column_stack([fset['x'], fset['y']]), leafsize=50)
#                   for f, fset in fsets.iteritems() }
        trackids = find_tracks(maxdist=args.maxdist, giveup=args.giveup,
                               n=args.number, cut=args.cut, stub=args.stub)
        # save the data record array and the trackids array
        print "saving track data to",
        print locdir+prefix+dotfix+"_TRACKS"
        np.savez(locdir+prefix+dotfix+"_TRACKS",
                data=data, trackids=trackids)

    elif args.load:
        print "saving " + dotfix.strip('_').lower() + " data (no tracks) to",
        print prefix + dotfix + "_POSITIONS.npz"
        np.savez(locdir+prefix+dotfix+"_POSITIONS",
Ejemplo n.º 9
0
        save = locdir + prefix + "_MS{}D.pdf".format('A' if ang else '')
    if save:
        print "saving to", save
        pl.savefig(save)
    if show: pl.show()
    return [fig] + fig.get_axes() + [taus] + [msd, msd_err] if errorbars else [msd]

if __name__=='__main__':
    if gendata:
        datapath = locdir+prefix+dotfix+'_POSITIONS.txt'
        data = gen_data(datapath)
        print "\t...loaded"
    if findtracks:
        if not gendata:
            data = np.load(locdir+prefix+'_POSITIONS.npz')['data']
        fsets = helpy.splitter(data, ret_dict=True)
#        from scipy.spatial.kdtree import KDTree
#        ftrees = { f: KDTree(np.column_stack([fset['x'], fset['y']]), leafsize=50)
#                   for f, fset in fsets.iteritems() }
        trackids = find_tracks(n=args.number, maxdist=args.maxdist,
                               giveup=args.giveup, cut=args.cut)
        np.savez(locdir+prefix+dotfix+"_TRACKS",
                data=data, trackids=trackids)

    elif gendata:
        print "saving data only (no tracks) to "+prefix+dotfix+"_POSITIONS.npz"
        np.savez(locdir+prefix+dotfix+"_POSITIONS",
                data = data)
        print '\t...saved'
    else:
        # assume existing tracks.npz