예제 #1
0
def compile_noise(prefixes, vs, width=3, side=1, fps=1, cat=True,
                  do_orientation=True, do_translation=True, subtract=True,
                  minlen=10, torient=True, interp=True, dupes=False, **ignored):
    if np.isscalar(prefixes):
        prefixes = [prefixes]
    for prefix in prefixes:
        if args.verbose:
            print "Loading data for", prefix
        data = helpy.load_data(prefix, 'tracks')
        if dupes:
            data['t'] = tracks.remove_duplicates(data['t'], data)
        tracksets = helpy.load_tracksets(data, min_length=minlen,
                run_track_orient=torient, run_fill_gaps=interp)
        for track in tracksets:
            tdata = tracksets[track]
            velocities = noise_derivatives(tdata, width=width,
                    side=side, fps=fps, do_orientation=do_orientation,
                    do_translation=do_translation, subtract=subtract)
            for v in velocities:
                vs[v].append(velocities[v])
    if cat:
        for v in vs:
            vs[v] = np.concatenate(vs[v], -1)
    return len(tracksets)
예제 #2
0
def remove_duplicates(trackids=None, data=None, tracksets=None,
                      target='', inplace=False, verbose=False):
    if tracksets is None:
        target = target or 'trackids'
        tracksets = helpy.load_tracksets(data, trackids, min_length=0)
    elif trackids is None:
        target = target or 'tracksets'
    else:
        target = target or 'trackids'
    rejects = defaultdict(dict)
    for t, tset in tracksets.iteritems():
        fs = tset['f']
        count = np.bincount(fs)
        dup_fs = np.where(count>1)[0]
        if not len(dup_fs):
            continue
        ftsets = helpy.splitter(tset, fs, ret_dict=True)
        for f in dup_fs:
            prv = fs[np.searchsorted(fs, f, 'left') - 1] if f > fs[0] else None
            nxt = fs[np.searchsorted(fs, f, 'right')] if f < fs[-1] else None
            if nxt is not None and nxt in dup_fs:
                nxt = fs[np.searchsorted(fs, nxt, 'right')] if nxt < fs[-1] else None
                if nxt is not None and nxt in dup_fs:
                    nxt = None
                    assert prv is not None, ("Duplicate track particles in too many "
                            "frames in a row at frame {} for track {}".format(f, t))
            seps = np.zeros(count[f])
            for neigh in (prv, nxt):
                if neigh is None: continue
                if count[neigh] > 1 and neigh in rejects[t]:
                    isreject = np.in1d(ftsets[neigh]['id'], rejects[t][neigh], assume_unique=True)
                    ftsets[neigh] = ftsets[neigh][~isreject]
                sepx = ftsets[f]['x'] - ftsets[neigh]['x']
                sepy = ftsets[f]['y'] - ftsets[neigh]['y']
                seps += sepx*sepx + sepy*sepy
            rejects[t][f] = ftsets[f][seps > seps.min()]['id']
    if not rejects:
        return None if inplace else trackids if target=='trackids' else tracksets
    if target=='tracksets':
        if not inplace:
            tracksets = tracksets.copy()
        for t, tr in rejects.iteritems():
            trs = np.concatenate(tr.values())
            tr = np.in1d(tracksets[t]['id'], trs, True, True)
            new = tracksets[t][tr]
            if inplace:
                tracksets[t] = new
        return None if inplace else tracksets
    elif target=='trackids':
        if not inplace:
            trackids = trackids.copy()
        rejects = np.concatenate([tfr for tr in rejects.itervalues()
                                for tfr in tr.itervalues()])
        if data is None:
            data_from_tracksets = np.concatenate(tracksets.values())
            if len(data_from_tracksets)!=len(trackids):
                raise ValueError, "You must provide data to return/modify trackids"
            ids = data_from_tracksets['id']
            ids.sort()
        else:
            ids = data['id']
        rejects = np.searchsorted(ids, rejects)
        trackids[rejects] = -1
        return None if inplace else trackids
예제 #3
0
            msg = 'No tifs found at the following pattern, please fix it\n{}\n'
            pattern = raw_input(msg.format(pattern))
            imfiles = glob(pattern)
        meta['path_to_tiffs'] = pattern

        frange = raw_input("Number or range (as slice: 'start:end') of frames to view? "
                           "({} available) ".format(len(imfiles)))
        fslice = slice(*[int(s) if s else None for s in frange.split(':')])
        imstack = map(plt.imread, sorted(imfiles)[fslice])
        datas = helpy.load_data(absprefix, 't c o')
        fsets = map(lambda d: helpy.splitter(d, datas[0]['f']), datas)
        animate_detection(imstack, *fsets, rc=args.rcorner, side=args.side,
                          verbose=args.verbose)

    if args.msd or args.nn or args.rn:
        tracksets = helpy.load_tracksets(data, min_length=args.stub,
                            run_fill_gaps=True, verbose=args.verbose)

    if args.msd:
        msds, msdids = find_msds(tracksets, dt0, dtau, min_length=args.stub)
        if args.save:
            save = absprefix+"_MSD.npz"
            print "saving msd data to", save
            np.savez(save,
                     msds = np.asarray(msds),
                     msdids = np.asarray(msdids),
                     dt0  = np.asarray(dt0),
                     dtau = np.asarray(dtau))
    elif args.plotmsd or args.rr:
        if verbose: print "loading msd data from npz files"
        datapath = absprefix+"_MSD.npz"
        msdnpz = np.load(datapath)
예제 #4
0
    if W is None:
        data = helpy.load_data(args.prefix)
        N, W = square_size(helpy.mode(data['f'][data['t'] >= 0], count=True))
        meta['crystal_width'] = W
    N = W*W
    nshells = (W+1)//2
    print args.prefix
    print "Crystal size {W}x{W} = {N} ({s} shells)".format(W=W, N=N, s=nshells)

    if args.save:
        helpy.save_meta(args.prefix, meta)

    if args.melt:
        print 'calculating'
        data = helpy.load_data(args.prefix)
        tsets = helpy.load_tracksets(data, run_repair='interp',
                                     run_track_orient=True)
        # to get the benefits of tracksets (interpolation, stub filtering):
        data = np.concatenate(tsets.values())
        data.sort(order=['f', 't'])
        if not args.start:
            args.start = find_start_frame(data, plot=args.plot)
        mdata = melt_analysis(data)
        if args.save:
            np.savez_compressed(args.prefix + '_MELT.npz', data=mdata)
            helpy.save_meta(args.prefix, meta, start_frame=args.start)
    else:
        mdata = np.load(args.prefix + '_MELT.npz')['data']

    if args.plot:
        stats = ['dens', 'psi', 'phi']
        plot_args = make_plot_args(nshells, args)
예제 #5
0
                bgimage = pl.imread(locdir+prefix+'_001.tif')
            except IOError:
                bgimage = None
        if args.singletracks:
            mask = np.in1d(trackids, args.singletracks)
        else:
            mask = None
        plot_tracks(data, trackids, bgimage, mask=mask,
                    save=args.save, show=args.show)

if __name__=='__main__' and args.nn:
    # Calculate the <nn> correlation for all the tracks in a given dataset
    # TODO: fix this to combine multiple datasets (more than one prefix)

    data, trackids, odata, omask = helpy.load_data(prefix, True, False)
    tracksets, otracksets = helpy.load_tracksets(data, trackids, odata, omask,
                                                 min_length=args.stub)

    coscorrs = [ corr.autocorr(np.cos(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]
    sincorrs = [ corr.autocorr(np.sin(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]

    # Gather all the track correlations and average them
    allcorr = coscorrs + sincorrs
    allcorr = helpy.pad_uneven(allcorr, np.nan)
    tcorr = np.arange(allcorr.shape[1])/fps
    meancorr = np.nanmean(allcorr, 0)
    added = np.sum(np.isfinite(allcorr), 0)
    errcorr = np.nanstd(allcorr, 0)/np.sqrt(added - 1)
    sigma = errcorr + 1e-5*errcorr.std() # add something small to prevent 0
    if verbose:
예제 #6
0
            bgimage = Im.open(extdir+prefix+'_0001.tif')
        except IOError:
            try:
                bgimage = Im.open(locdir+prefix+'_001.tif')
            except IOError:
                bgimage = None
        if singletracks:
            mask = np.in1d(trackids, singletracks)
        plot_tracks(data, trackids, bgimage, mask=mask)

if __name__=='__main__' and args.nn:
    # Calculate the <nn> correlation for all the tracks in a given dataset
    # TODO: fix this to combine multiple datasets (more than one prefix)

    data, trackids, odata, omask = helpy.load_data(prefix, True, False)
    tracksets, otracksets = helpy.load_tracksets(data, trackids, odata, omask,
                                                 min_length=args.stub)

    coscorrs = [ corr.autocorr(np.cos(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]
    sincorrs = [ corr.autocorr(np.sin(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]

    # Gather all the track correlations and average them
    allcorr = coscorrs + sincorrs
    allcorr = helpy.pad_uneven(allcorr, np.nan)
    tcorr = np.arange(allcorr.shape[1])/fps
    meancorr = np.nanmean(allcorr, 0)
    errcorr = np.nanstd(allcorr, 0)/sqrt(len(allcorr))
    if verbose:
        print "Merged nn corrs"
예제 #7
0
                         ['side', 'fps'], ['sidelength', 'fps'], [1, 1])
    compile_args = dict(args.__dict__)
    if args.prefix == 'simulate':
        import simulate as sim
        spar = {'DR': 1/21, 'v0': 0.3678, 'DT': 0.01,
                'fps': args.fps, 'side': args.side, 'size': 1000}
        print spar
        sdata = [sim.SimTrack(num=i, **spar)
                 for i in xrange(1, 1001)]
        data = np.concatenate([sdatum.track for sdatum in sdata])
        data['id'] = np.arange(len(data))
        data = {'simulate': data}
    else:
        data = find_data(args)
    tsets = {prefix: helpy.load_tracksets(
                data[prefix], min_length=args.stub, verbose=args.verbose,
                run_remove_dupes=args.dupes, run_repair=args.gaps,
                run_track_orient=args.torient)
             for prefix in data}

    label = {'o': r'$\xi$', 'par': r'$v_\parallel$', 'perp': r'$v_\perp$',
             'etapar': r'$\eta_\parallel$', 'x': '$v_x$', 'y': '$v_y$'}
    ls = {'o': '-', 'x': '-.', 'y': ':',
          'par': '-.', 'perp': ':', 'etapar': '--'}
    cs = {'mean': 'r', 'var': 'g', 'std': 'b'}

if __name__ == '__main__':
    if 'widths' in args.command:
        command_widths(tsets, compile_args, args)
    if 'autocorr' in args.command:
        command_autocorr(tsets, args)
    if 'hist' in args.command: